takedown testcases that are almost never failing as to save time on gate

This commit is contained in:
yanghaoran 2022-09-27 22:36:45 +08:00
parent 9ec380970b
commit 164fada9f5
306 changed files with 1020 additions and 1064 deletions

View File

@ -57,7 +57,7 @@ class BackwardNet(nn.Cell):
return grads return grads
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -73,7 +73,7 @@ def test_forward():
assert graph_mode_out == Tensor(21, mstype.int32) assert graph_mode_out == Tensor(21, mstype.int32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training

View File

@ -79,7 +79,7 @@ class BackwardNetReplaceBreak(nn.Cell):
return grads return grads
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -95,7 +95,7 @@ def test_forward():
# Problem: Exceed function call depth limit 1000. # Problem: Exceed function call depth limit 1000.
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -156,7 +156,7 @@ def test_if_after_if_in_if():
control_flow_if_after_if_in_if(IfAfterIfInIfNet, x, expect1, expect2) control_flow_if_after_if_in_if(IfAfterIfInIfNet, x, expect1, expect2)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training

View File

@ -70,7 +70,7 @@ def test_forward():
assert graph_out == Tensor(np.array(18), mstype.int32) assert graph_out == Tensor(np.array(18), mstype.int32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training

View File

@ -75,7 +75,7 @@ def test_forward():
assert graph_mode_out == (Tensor(np.array(9), mstype.int32), Tensor(np.array(4), mstype.int32)) assert graph_mode_out == (Tensor(np.array(9), mstype.int32), Tensor(np.array(4), mstype.int32))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training

View File

@ -71,7 +71,7 @@ def test_forward():
assert graph_out == Tensor(np.array(36), mstype.int32) assert graph_out == Tensor(np.array(36), mstype.int32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training

View File

@ -24,7 +24,7 @@ from mindspore.common import dtype as mstype
grad_all = C.GradOperation(get_all=True) grad_all = C.GradOperation(get_all=True)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training

View File

@ -29,7 +29,7 @@ grad_by_list = C.GradOperation(get_by_list=True)
grad_all = C.GradOperation(get_all=True) grad_all = C.GradOperation(get_all=True)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -156,7 +156,7 @@ def test_cpu_e2e_dump():
run_e2e_dump() run_e2e_dump()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
@security_off_wrap @security_off_wrap
@ -178,7 +178,7 @@ def test_gpu_e2e_dump():
run_e2e_dump() run_e2e_dump()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@security_off_wrap @security_off_wrap

View File

@ -35,7 +35,7 @@ class NetWithEmbeddingLookUp(nn.Cell):
return out return out
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training

View File

@ -74,7 +74,7 @@ class UniqueReshapeAdd(nn.Cell):
return self.add(x, y) return self.add(x, y)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -94,7 +94,7 @@ def test_unique():
assert (output[1].asnumpy() == expect2).all() assert (output[1].asnumpy() == expect2).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -130,7 +130,7 @@ def test_unique_square_relu():
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training

View File

@ -112,7 +112,7 @@ def test_getnext_sink_size_dynamic_pipeline():
for output, (_, last_input) in zip(outputs, last_inputs): for output, (_, last_input) in zip(outputs, last_inputs):
assert output.shape == last_input.shape assert output.shape == last_input.shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -125,7 +125,7 @@ def test_getnext_sink_size_dynamic_pipeline_ascend():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_getnext_sink_size_dynamic_pipeline() test_getnext_sink_size_dynamic_pipeline()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_getnext_sink_size_dynamic_pipeline_gpu(): def test_getnext_sink_size_dynamic_pipeline_gpu():

View File

@ -39,7 +39,7 @@ class ControlNet(nn.Cell):
return self.inner_function_2(a, b) return self.inner_function_2(a, b)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -57,7 +57,7 @@ def test_fallback_control_sink_tensor():
assert output == output_expect assert output == output_expect
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -84,7 +84,7 @@ def test_np_tensor_list():
assert len(tensor_list) == 3 assert len(tensor_list) == 3
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -103,7 +103,7 @@ def test_list_count():
assert list_count() == 1 assert list_count() == 1
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -131,7 +131,7 @@ def np_fallback_func_tensor_index(x):
return me_x[x] return me_x[x]
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -148,7 +148,7 @@ def test_np_fallback_func_tensor_index():
assert output == output_expect assert output == output_expect
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -168,7 +168,7 @@ def test_np_calculate():
assert np.all(np_calculate().asnumpy() == np.array([1, 1, 0, 0, 1])) assert np.all(np_calculate().asnumpy() == np.array([1, 1, 0, 0, 1]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -186,7 +186,7 @@ def test_fallback_tensor_array_astype():
print(foo()) print(foo())
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -211,7 +211,7 @@ def test_fallback_tuple_with_mindspore_function():
assert foo() assert foo()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import ms_function, context, Tensor
context.set_context(mode=context.GRAPH_MODE) context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -40,7 +40,7 @@ def test_and_int_tensor():
assert ret == 0 assert ret == 0
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -62,7 +62,7 @@ def test_and_int_tensor_2():
assert ret == 0 assert ret == 0
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -85,7 +85,7 @@ def test_and_multi_int_tensor():
assert "can be converted to bool, but" in str(error_info.value) assert "can be converted to bool, but" in str(error_info.value)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -106,7 +106,7 @@ def test_and_multi_int_tensor_2():
assert "can be converted to bool, but" in str(error_info.value) assert "can be converted to bool, but" in str(error_info.value)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -126,7 +126,7 @@ def test_and_bool_tensor():
assert ret == 0 assert ret == 0
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -148,7 +148,7 @@ def test_and_bool_tensor_2():
assert ret == 1 assert ret == 1
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -169,7 +169,7 @@ def test_and_different_type_variable_tensor():
assert "Cannot join the return values of different branches" in str(error_info.value) assert "Cannot join the return values of different branches" in str(error_info.value)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -191,7 +191,7 @@ def test_and_different_type_constant_tensor():
assert res == 2.0 assert res == 2.0
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -212,7 +212,7 @@ def test_and_constant_and_variable_tensor():
assert res == 2 assert res == 2
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -234,7 +234,7 @@ def test_and_constant_and_variable_tensor_2():
assert "Cannot join the return values of different branches" in str(error_info.value) assert "Cannot join the return values of different branches" in str(error_info.value)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -255,7 +255,7 @@ def test_and_constant_and_variable_tensor_3():
assert res == 1 assert res == 1
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training

View File

@ -23,7 +23,7 @@ from mindspore import Tensor, context, ms_class
context.set_context(mode=context.GRAPH_MODE) context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -53,7 +53,7 @@ def test_fallback_class_attr():
assert out.asnumpy() == 1 assert out.asnumpy() == 1
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -88,7 +88,7 @@ def test_fallback_class_method():
assert out.asnumpy() == 10 assert out.asnumpy() == 10
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -124,7 +124,7 @@ def test_fallback_class_call():
assert out.asnumpy() == 14 assert out.asnumpy() == 14
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -155,7 +155,7 @@ def test_fallback_class_input_attr():
assert np.all(out.asnumpy() == expect_res) assert np.all(out.asnumpy() == expect_res)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -188,7 +188,7 @@ def test_fallback_class_input_method():
assert out.asnumpy() == 6 assert out.asnumpy() == 6
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -223,7 +223,7 @@ def test_fallback_class_class_nested():
assert out.asnumpy() == 1 assert out.asnumpy() == 1
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -273,7 +273,7 @@ def test_fallback_class_cell_nested():
assert out == 25 assert out == 25
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -307,7 +307,7 @@ def test_fallback_class_type_attr():
assert out == 2 assert out == 2
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -337,7 +337,7 @@ def test_fallback_class_create_instance_attr():
assert out == 5 assert out == 5
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -373,7 +373,7 @@ def test_fallback_class_create_instance_method():
assert out.asnumpy() == 10 assert out.asnumpy() == 10
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -410,7 +410,7 @@ def test_fallback_class_create_instance_call():
assert out == 10 assert out == 10
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -429,7 +429,7 @@ def test_fallback_raise_error_not_class_type():
func(1, 2) func(1, 2)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import context, Tensor, Parameter
context.set_context(mode=context.GRAPH_MODE) context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -50,7 +50,7 @@ def test_issubclass_list():
assert (res.asnumpy() == [2, 3, 4]).all() assert (res.asnumpy() == [2, 3, 4]).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import ms_function, context, Tensor
context.set_context(mode=context.GRAPH_MODE) context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -47,7 +47,7 @@ def test_np_linspace():
print("e:", e) print("e:", e)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -76,7 +76,7 @@ def test_np_arange_slice_1():
assert np.all(e.asnumpy() == np.array([2, 3, 4])) assert np.all(e.asnumpy() == np.array([2, 3, 4]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -102,7 +102,7 @@ def test_np_arange_slice_2():
assert np.all(d.asnumpy() == np.array([[2, 3], [4, 5], [5, 6]])) assert np.all(d.asnumpy() == np.array([[2, 3], [4, 5], [5, 6]]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -155,7 +155,7 @@ def test_np_array_advanced_index_2():
assert np.all(c.asnumpy() == np.array([2. + 6.j, 3.5 + 5.j])) assert np.all(c.asnumpy() == np.array([2. + 6.j, 3.5 + 5.j]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -181,7 +181,7 @@ def test_np_array_advanced_index_3():
print("c:", c) print("c:", c)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -200,7 +200,7 @@ def test_np_reshape():
assert np.all(np_reshape().asnumpy() == np.array([[0, 1, 2, 3], [4, 5, 6, 7]])) assert np.all(np_reshape().asnumpy() == np.array([[0, 1, 2, 3], [4, 5, 6, 7]]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -219,7 +219,7 @@ def test_np_ndarray_flatten():
assert np.all(np_ndarray_flatten().asnumpy() == np.array([0, 1, 2, 3, 4, 5, 6, 7])) assert np.all(np_ndarray_flatten().asnumpy() == np.array([0, 1, 2, 3, 4, 5, 6, 7]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -238,7 +238,7 @@ def test_np_ravel():
assert np.all(np_ravel().asnumpy() == np.array([0, 4, 1, 5, 2, 6, 3, 7])) assert np.all(np_ravel().asnumpy() == np.array([0, 4, 1, 5, 2, 6, 3, 7]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -257,7 +257,7 @@ def test_np_transpose():
assert np.all(np_transpose().asnumpy() == np.array([0, 1, 2, 3])) assert np.all(np_transpose().asnumpy() == np.array([0, 1, 2, 3]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -279,7 +279,7 @@ def test_np_rollaxis():
assert x == 6 and y == 5 assert x == 6 and y == 5
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -301,7 +301,7 @@ def test_np_swapaxes():
assert x == 6 and y == 3 assert x == 6 and y == 3
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -321,7 +321,7 @@ def test_np_broadcast():
assert np.all(np_broadcast().asnumpy() == np.array([3, 3])) assert np.all(np_broadcast().asnumpy() == np.array([3, 3]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -340,7 +340,7 @@ def test_np_broadcast_to():
assert np.all(np_broadcast_to().asnumpy() == np.array([[0, 1, 2, 3], [0, 1, 2, 3]])) assert np.all(np_broadcast_to().asnumpy() == np.array([[0, 1, 2, 3], [0, 1, 2, 3]]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -359,7 +359,7 @@ def test_np_expand_dims():
assert np.all(np_expand_dims().asnumpy() == np.array([[[1, 2], [3, 4]]])) assert np.all(np_expand_dims().asnumpy() == np.array([[[1, 2], [3, 4]]]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -378,7 +378,7 @@ def test_np_squeeze():
assert np.all(np_squeeze().asnumpy() == np.array([[0, 1], [2, 3]])) assert np.all(np_squeeze().asnumpy() == np.array([[0, 1], [2, 3]]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -406,7 +406,7 @@ def test_np_concat():
assert np.all(out_vstack.asnumpy() == np.array([[1, 2], [3, 4], [5, 6], [7, 8]])) assert np.all(out_vstack.asnumpy() == np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -431,7 +431,7 @@ def test_np_split():
assert np.all(out_vsplit.asnumpy() == np.array([[[0, 1]], [[2, 3]]])) assert np.all(out_vsplit.asnumpy() == np.array([[[0, 1]], [[2, 3]]]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -459,7 +459,7 @@ def test_np_element():
assert np.all(out_unique.asnumpy() == np.array([2, 5, 6, 7, 8, 9])) assert np.all(out_unique.asnumpy() == np.array([2, 5, 6, 7, 8, 9]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -487,7 +487,7 @@ def test_np_bitwise():
assert right_shift.asnumpy() == 10 assert right_shift.asnumpy() == 10
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -520,7 +520,7 @@ def test_np_char_1():
assert char_upper.asnumpy() == 'FALLBACK' assert char_upper.asnumpy() == 'FALLBACK'
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -553,7 +553,7 @@ def test_np_char_2():
assert char_decode.asnumpy() == 'runoob' assert char_decode.asnumpy() == 'runoob'
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -584,7 +584,7 @@ def test_np_degree():
assert np.isclose(out_arctan.asnumpy(), 45) assert np.isclose(out_arctan.asnumpy(), 45)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -617,7 +617,7 @@ def test_np_math_1():
assert np.all(out_remainder.asnumpy() == np.array([0, 2])) assert np.all(out_remainder.asnumpy() == np.array([0, 2]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -646,7 +646,7 @@ def test_np_math_2():
assert np.allclose(out_power.asnumpy(), np.array([1, 4, 9])) assert np.allclose(out_power.asnumpy(), np.array([1, 4, 9]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -684,7 +684,7 @@ def test_np_statistic():
assert np.isclose(out_var.asnumpy(), 2.0) assert np.isclose(out_var.asnumpy(), 2.0)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -716,7 +716,7 @@ def test_np_sort():
assert np.all(out_where.asnumpy() == np.array([4])) assert np.all(out_where.asnumpy() == np.array([4]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@ -738,7 +738,7 @@ def test_np_extract():
assert np.all(out_extract.asnumpy() == np.array([2, 4])) assert np.all(out_extract.asnumpy() == np.array([2, 4]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training

View File

@ -424,7 +424,7 @@ def test_print_dict():
check_output(cap.output, patterns) check_output(cap.output, patterns)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -105,7 +105,7 @@ def test_jet_single_input_single_output_graph_mode():
assert np.allclose(out_primals.asnumpy(), expected_primals, atol=1.e-4) assert np.allclose(out_primals.asnumpy(), expected_primals, atol=1.e-4)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@ -128,7 +128,7 @@ def test_jet_single_input_single_output_with_scalar_graph_mode():
assert np.allclose(out_primals.asnumpy(), expected_primals, atol=1.e-4) assert np.allclose(out_primals.asnumpy(), expected_primals, atol=1.e-4)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@ -150,7 +150,7 @@ def test_derivative_single_input_single_output_graph_mode():
assert np.allclose(out_series.asnumpy(), expected_series, atol=1.e-4) assert np.allclose(out_series.asnumpy(), expected_series, atol=1.e-4)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@ -172,7 +172,7 @@ def test_jet_multiple_input_single_output_graph_mode():
assert np.allclose(out_series.asnumpy(), expected_series, atol=1.e-4) assert np.allclose(out_series.asnumpy(), expected_series, atol=1.e-4)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@ -194,7 +194,7 @@ def test_derivative_multiple_input_single_output_graph_mode():
assert np.allclose(out_series.asnumpy(), expected_series, atol=1.e-4) assert np.allclose(out_series.asnumpy(), expected_series, atol=1.e-4)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@ -228,7 +228,7 @@ def test_jet_construct_graph_mode():
assert np.allclose(out_series.asnumpy(), expected_series, atol=1.e-4) assert np.allclose(out_series.asnumpy(), expected_series, atol=1.e-4)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@ -266,7 +266,7 @@ def test_derivative_construct_graph_mode():
assert np.allclose(out_series[1].asnumpy(), expected_series_y, atol=1.e-4) assert np.allclose(out_series[1].asnumpy(), expected_series_y, atol=1.e-4)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_jet_function_graph_mode(): def test_jet_function_graph_mode():

View File

@ -76,7 +76,7 @@ def test_jet_multiple_input_single_output_pynative_mode():
assert np.allclose(out_primals.asnumpy(), expected_primals, atol=1.e-4) assert np.allclose(out_primals.asnumpy(), expected_primals, atol=1.e-4)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@ -98,7 +98,7 @@ def test_derivative_multiple_input_single_output_pynative_mode():
assert np.allclose(out_series.asnumpy(), expected_series, atol=1.e-4) assert np.allclose(out_series.asnumpy(), expected_series, atol=1.e-4)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@ -120,7 +120,7 @@ def test_jet_single_input_single_output_pynative_mode():
assert np.allclose(out_series.asnumpy(), expected_series, atol=1.e-4) assert np.allclose(out_series.asnumpy(), expected_series, atol=1.e-4)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training

View File

@ -48,7 +48,7 @@ class HyperMapNet(nn.Cell):
return output return output
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -71,7 +71,7 @@ def test_single_element_hypermap_with_tensor_input():
assert np.allclose(output[1].asnumpy(), expect_output_2) assert np.allclose(output[1].asnumpy(), expect_output_2)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -95,7 +95,7 @@ def test_double_elements_hypermap_tensor_tuple_inputs():
assert np.allclose(output[1].asnumpy(), expect_output_2) assert np.allclose(output[1].asnumpy(), expect_output_2)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -119,7 +119,7 @@ def test_double_elements_hypermap_tensor_list_inputs():
assert np.allclose(output[1].asnumpy(), expect_output_2) assert np.allclose(output[1].asnumpy(), expect_output_2)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -145,7 +145,7 @@ def test_doubel_elements_hypermap_correct_mix_inputs():
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -162,7 +162,7 @@ def test_double_elements_hypermap_inputs_length_mismatch():
common_map((x, y)) common_map((x, y))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -1102,7 +1102,7 @@ def pad_with_npfunc(vector, pad_width, iaxis, kwargs):
vector[-pad_width[1]:] = pad_value vector[-pad_width[1]:] = pad_value
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_pad_gpu(): def test_pad_gpu():

View File

@ -203,7 +203,7 @@ def onp_isnan(x):
return onp.isnan(x) return onp.isnan(x)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -219,7 +219,7 @@ def onp_isinf(x):
return onp.isinf(x) return onp.isinf(x)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -919,7 +919,7 @@ def onp_maximum(x1, x2):
return onp.maximum(x1, x2) return onp.maximum(x1, x2)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -1303,7 +1303,7 @@ def test_kron():
match_res(mnp.kron, onp.kron, x, y) match_res(mnp.kron, onp.kron, x, y)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@ -1952,7 +1952,7 @@ def onp_nanmean(x):
return a, b, c return a, b, c
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -2034,7 +2034,7 @@ def test_multi_dot():
match_all_arrays(mnp.multi_dot(mnp_arrays[1:-1]), onp.linalg.multi_dot(arrays[1:-1])) match_all_arrays(mnp.multi_dot(mnp_arrays[1:-1]), onp.linalg.multi_dot(arrays[1:-1]))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@ -2148,7 +2148,7 @@ def test_bincount():
onp.bincount(x, weights, minlength=25), error=3) onp.bincount(x, weights, minlength=25), error=3)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@ -2472,7 +2472,7 @@ def test_result_type():
assert actual == expected assert actual == expected
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@ -2555,7 +2555,7 @@ def test_ravel_multi_index():
match_array(actual.asnumpy(), expected, error=5) match_array(actual.asnumpy(), expected, error=5)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training

View File

@ -61,7 +61,7 @@ def test_index_add_float32():
index_add_forward(np.float32) index_add_forward(np.float32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -77,7 +77,7 @@ def test_index_add_float16():
index_add_forward(np.float16) index_add_forward(np.float16)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -181,7 +181,7 @@ def test_index_add_grad_float32():
index_add_grad_with_type(np.float32) index_add_grad_with_type(np.float32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -245,7 +245,7 @@ def test_index_add_grad_uint8():
index_add_grad_with_type(np.uint8) index_add_grad_with_type(np.uint8)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -269,7 +269,7 @@ def test_index_add_dynamic_y():
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -48,7 +48,7 @@ def scattre_add_forward(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -64,7 +64,7 @@ def test_scattre_add_forward_float16():
scattre_add_forward(np.float16) scattre_add_forward(np.float16)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -94,7 +94,7 @@ def scattre_add_dynamic_indices():
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -123,7 +123,7 @@ def scattre_add_dynamic_updates():
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -76,7 +76,7 @@ def scattre_div_dynamic_indices():
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -95,7 +95,7 @@ def test_scattre_div_forward_float16():
scattre_div_forward(np.float16, expected) scattre_div_forward(np.float16, expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -114,7 +114,7 @@ def test_scattre_div_forward_float32():
scattre_div_forward(np.float32, expected) scattre_div_forward(np.float32, expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -130,7 +130,7 @@ def test_scattre_div_dynamic_indices():
scattre_div_dynamic_indices() scattre_div_dynamic_indices()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -317,7 +317,7 @@ def scatter_func_updates_nest_vmap():
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -333,7 +333,7 @@ def test_scatter_func_forward_float16():
scatter_func_forward(np.float16) scatter_func_forward(np.float16)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -349,7 +349,7 @@ def test_scatter_func_forward_float32():
scatter_func_forward(np.float32) scatter_func_forward(np.float32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -365,7 +365,7 @@ def test_scatter_func_forward_int32():
scatter_func_forward(np.int32) scatter_func_forward(np.int32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -381,7 +381,7 @@ def test_scatter_func_dynamic_indices():
scatter_func_dynamic_indices() scatter_func_dynamic_indices()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -397,7 +397,7 @@ def test_scatter_func_dynamic_updates():
scatter_func_dynamic_updates() scatter_func_dynamic_updates()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -413,7 +413,7 @@ def test_scatter_func_grad_float16():
scatter_func_grad(np.float16) scatter_func_grad(np.float16)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -429,7 +429,7 @@ def test_scatter_func_grad_float32():
scatter_func_grad(np.float32) scatter_func_grad(np.float32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -445,7 +445,7 @@ def test_scatter_func_grad_int32():
scatter_func_grad(np.int32) scatter_func_grad(np.int32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -461,7 +461,7 @@ def test_scatter_func_indices_vmap():
scatter_func_indices_vmap() scatter_func_indices_vmap()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -477,7 +477,7 @@ def test_scatter_func_updates_vmap():
scatter_func_updates_vmap() scatter_func_updates_vmap()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -84,7 +84,7 @@ def scatter_mul_dynamic_indices():
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -100,7 +100,7 @@ def test_scatter_mul_forward_float16():
scatter_mul_forward(np.float16) scatter_mul_forward(np.float16)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -116,7 +116,7 @@ def test_scatter_mul_forward_float32():
scatter_mul_forward(np.float32) scatter_mul_forward(np.float32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -132,7 +132,7 @@ def test_scatter_mul_forward_int32():
scatter_mul_forward(np.int32) scatter_mul_forward(np.int32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -148,7 +148,7 @@ def test_scatter_mul_dynamic_indices():
scatter_mul_dynamic_indices() scatter_mul_dynamic_indices()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -164,7 +164,7 @@ def test_scatter_mul_dynamic_updates():
scatter_mul_dynamic_updates() scatter_mul_dynamic_updates()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -104,7 +104,7 @@ def test_tensor_scatter_arithmetic_small_float(func, data_type, index_type):
compare_with_numpy(func, input_x, indices, updates) compare_with_numpy(func, input_x, indices, updates)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -124,7 +124,7 @@ def test_tensor_scatter_arithmetic_small_int(func, data_type, index_type):
compare_with_numpy(func, input_x, indices, updates) compare_with_numpy(func, input_x, indices, updates)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -152,7 +152,7 @@ def test_tensor_scatter_arithmetic_multi_dims(func, data_type, index_type):
compare_with_numpy(func, input_x, indices, updates) compare_with_numpy(func, input_x, indices, updates)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -176,7 +176,7 @@ def test_tensor_scatter_arithmetic_function_op(func, data_type, index_type):
np.testing.assert_allclose(output.asnumpy(), expected, rtol=1e-6) np.testing.assert_allclose(output.asnumpy(), expected, rtol=1e-6)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@ -204,7 +204,7 @@ def test_tensor_scatter_arithmetic_tensor_op(func, data_type, index_type):
np.testing.assert_allclose(output.asnumpy(), expected, rtol=1e-6) np.testing.assert_allclose(output.asnumpy(), expected, rtol=1e-6)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard @pytest.mark.env_onecard

View File

@ -53,7 +53,7 @@ def test_random_shuffle_op_dtype(mode, dtype):
assert output.shape == expect_shape assert output.shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE]) @pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@ -74,7 +74,7 @@ def test_random_shuffle_op_tensor(mode, shape):
assert output.shape == expect_shape assert output.shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE]) @pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@ -91,7 +91,7 @@ def test_random_shuffle_op_scalar(mode):
assert output == x assert output == x
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE]) @pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@ -112,7 +112,7 @@ def test_random_shuffle_op_dynamic_shape(mode):
assert (output_dyn.asnumpy() == out.asnumpy()).all() assert (output_dyn.asnumpy() == out.asnumpy()).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE]) @pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])

View File

@ -44,7 +44,7 @@ class NetAcoshGradGrad(nn.Cell):
return backward_net(y, grad, dout) return backward_net(y, grad, dout)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def acosh_grad_grad_base(dtype, loss): def acosh_grad_grad_base(dtype, loss):

View File

@ -63,7 +63,7 @@ def main_test(var_numpy, m_numpy, v_numpy, beta1_power_numpy, lr_numpy, beta1_nu
return (expect_m, res_m_mindspore), (expect_v, res_v_mindspore), (expect_var, res_var_mindspore) return (expect_m, res_m_mindspore), (expect_v, res_v_mindspore), (expect_var, res_var_mindspore)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_apply_adamax_fff(): def test_apply_adamax_fff():
@ -93,7 +93,7 @@ def test_apply_adamax_fff():
assert numpy.all(var[0] - var[1] < eps_f32) assert numpy.all(var[0] - var[1] < eps_f32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_apply_adamax_ddd(): def test_apply_adamax_ddd():

View File

@ -63,7 +63,7 @@ def main_test(var_np, accum_np, accum_update_np, lr_np, rho_np, epsilon_np, grid
(expect_var_np, res_var_mindspore) (expect_var_np, res_var_mindspore)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_apply_adadelta_fff(): def test_apply_adadelta_fff():
@ -90,7 +90,7 @@ def test_apply_adadelta_fff():
assert np.all(abs(var[0] - var[1]) < eps_f32) assert np.all(abs(var[0] - var[1]) < eps_f32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_apply_adadelta_ddd(): def test_apply_adadelta_ddd():

View File

@ -58,7 +58,7 @@ def main_test(var_np, accum_np, lr_np, grident_np, epsilon_np, update_slots):
return (expect_var_np, res_var_mindspore), (expect_accum_np, res_accum_mindspore) return (expect_var_np, res_var_mindspore), (expect_accum_np, res_accum_mindspore)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_apply_adagradv2_fff(): def test_apply_adagradv2_fff():
@ -83,7 +83,7 @@ def test_apply_adagradv2_fff():
assert np.all(abs(var[0] - var[1]) < eps_f32) assert np.all(abs(var[0] - var[1]) < eps_f32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_apply_adagradv2_ddd(): def test_apply_adagradv2_ddd():

View File

@ -29,7 +29,7 @@ def adaptive_avg_pool1d_forward_functional(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_adaptive_avg_pool1d_forward_float32_functional(): def test_adaptive_avg_pool1d_forward_float32_functional():

View File

@ -112,7 +112,7 @@ def test_net_value():
assert (dx.asnumpy() == expect_dx).all assert (dx.asnumpy() == expect_dx).all
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_graph_mode(): def test_net_graph_mode():
@ -144,7 +144,7 @@ def test_net_graph_mode():
assert (dx.asnumpy() == expect_dx).all assert (dx.asnumpy() == expect_dx).all
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_graph_mode_fp64(): def test_net_graph_mode_fp64():
@ -176,7 +176,7 @@ def test_net_graph_mode_fp64():
assert (dx.asnumpy() == expect_dx).all assert (dx.asnumpy() == expect_dx).all
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_adaptive_avgpool_vmap(): def test_adaptive_avgpool_vmap():

View File

@ -50,7 +50,7 @@ class GradNet(nn.Cell):
return self.adaptive_avg_pool3d_grad(x, dy) return self.adaptive_avg_pool3d_grad(x, dy)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE]) @pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@ -74,7 +74,7 @@ def test_net_normal_with_functional(mode, shape):
assert output.asnumpy().shape == expect_shape assert output.asnumpy().shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE]) @pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@ -99,7 +99,7 @@ def test_net_normal_with_nn(mode, shape):
assert output.asnumpy().shape == expect_shape assert output.asnumpy().shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_normal(): def test_net_normal():
@ -115,7 +115,7 @@ def test_net_normal():
assert output.asnumpy().shape == expect_shape assert output.asnumpy().shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_graph_mode_fp64(): def test_net_graph_mode_fp64():

View File

@ -27,7 +27,7 @@ def adaptive_max_pool1d_forward_functional(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_adaptive_max_pool1d_forward_float32_functional(): def test_adaptive_max_pool1d_forward_float32_functional():

View File

@ -36,7 +36,7 @@ class NetP(nn.Cell):
return self.adaptive_max_pool2d(x) return self.adaptive_max_pool2d(x)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_normal(): def test_net_normal():
@ -52,7 +52,7 @@ def test_net_normal():
assert output.asnumpy().shape == expect_shape assert output.asnumpy().shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_h_none(): def test_net_h_none():
@ -68,7 +68,7 @@ def test_net_h_none():
assert output.asnumpy().shape == expect_shape assert output.asnumpy().shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_hxh(): def test_net_hxh():
@ -94,7 +94,7 @@ class NetWithIndices(nn.Cell):
return self.adaptive_max_pool2d(x) return self.adaptive_max_pool2d(x)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_with_indices(): def test_net_with_indices():
@ -110,7 +110,7 @@ def test_net_with_indices():
assert output[1].asnumpy().shape == expect_shape assert output[1].asnumpy().shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_f(): def test_net_f():
@ -135,7 +135,7 @@ class Netnn(nn.Cell):
return self.adaptive_max_pool2d(x) return self.adaptive_max_pool2d(x)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_nn(): def test_net_nn():
@ -176,7 +176,7 @@ def test_tensor_interface_graph():
assert y.asnumpy().shape == expect_shape assert y.asnumpy().shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_dynamic_shape(): def test_dynamic_shape():

View File

@ -66,7 +66,7 @@ class RankDynamicNet(nn.Cell):
return out, in_x return out, in_x
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_rank_dynamic(): def test_rank_dynamic():
@ -86,7 +86,7 @@ def test_rank_dynamic():
assert (dyn_output[1].asnumpy() == output[1].asnumpy()).all() assert (dyn_output[1].asnumpy() == output[1].asnumpy()).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_4d(): def test_net_4d():
@ -103,7 +103,7 @@ def test_net_4d():
assert output[0].asnumpy().shape == expect_shape assert output[0].asnumpy().shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_5d(): def test_net_5d():
@ -120,7 +120,7 @@ def test_net_5d():
assert output[0].asnumpy().shape == expect_shape assert output[0].asnumpy().shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_dynamic_shape(): def test_net_dynamic_shape():
@ -138,7 +138,7 @@ def test_net_dynamic_shape():
assert output[0].asnumpy().shape == expect_shape assert output[0].asnumpy().shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_vmap(): def test_net_vmap():

View File

@ -227,14 +227,14 @@ def add_dynamic(nptype):
assert (output2.asnumpy() == expect2).all() assert (output2.asnumpy() == expect2).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_add_dynamic_float64(): def test_add_dynamic_float64():
add_dynamic(np.float64) add_dynamic(np.float64)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_add_dynamic_float32(): def test_add_dynamic_float32():
@ -275,7 +275,7 @@ def test_add_tensor_api(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_add_float32_tensor_api(): def test_add_float32_tensor_api():

View File

@ -29,7 +29,7 @@ class NetAddcdiv(nn.Cell):
return self.addcdiv(input_data, x1, x2, value) return self.addcdiv(input_data, x1, x2, value)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_addcdiv_float32_graph(type_s=np.float32): def test_addcdiv_float32_graph(type_s=np.float32):
@ -54,7 +54,7 @@ def test_addcdiv_float32_graph(type_s=np.float32):
assert np.all(abs(diff) < error) assert np.all(abs(diff) < error)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_addcdiv_float64_pynative_value(type_s=np.float64): def test_addcdiv_float64_pynative_value(type_s=np.float64):

View File

@ -29,7 +29,7 @@ class NetAddcmul(nn.Cell):
return self.addcmul(input_data, x1, x2, value) return self.addcmul(input_data, x1, x2, value)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_addcmul_float32_graph(type_s=np.float32): def test_addcmul_float32_graph(type_s=np.float32):
@ -55,7 +55,7 @@ def test_addcmul_float32_graph(type_s=np.float32):
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_addcmul_float64_pynative_value(type_s=np.float64): def test_addcmul_float64_pynative_value(type_s=np.float64):

View File

@ -33,7 +33,7 @@ def test_addmv_forward_tensor_api(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_addmv_forward_float32_tensor_api(): def test_addmv_forward_float32_tensor_api():

View File

@ -33,7 +33,7 @@ def test_addr_tensor_api(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_addr_float32_tensor_api(): def test_addr_float32_tensor_api():

View File

@ -97,7 +97,7 @@ def np_affine_grid_5d(theta, size, align_corners=False):
return grid.reshape((n, d, h, w, 3)) return grid.reshape((n, d, h, w, 3))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("net", [AffineGridNet, AffineGridDynamicShapeNet]) @pytest.mark.parametrize("net", [AffineGridNet, AffineGridDynamicShapeNet])
@ -130,7 +130,7 @@ def test_affine_grid_4d(net, align, dtype):
assert np.allclose(np_grid, ms_grid.asnumpy(), atol=atol) assert np.allclose(np_grid, ms_grid.asnumpy(), atol=atol)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("net", [AffineGridNet, AffineGridDynamicShapeNet]) @pytest.mark.parametrize("net", [AffineGridNet, AffineGridDynamicShapeNet])

View File

@ -141,7 +141,7 @@ def ms_forward_impl_vmap2(grad, np_lr, np_l1, np_l2, np_global_step, data_type):
return output return output
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
def test_apply_adagrad_da_float(): def test_apply_adagrad_da_float():
@ -170,7 +170,7 @@ def test_apply_adagrad_da_float():
np.testing.assert_allclose(np_out[i], ms_out[i].asnumpy(), rtol=error, atol=error) np.testing.assert_allclose(np_out[i], ms_out[i].asnumpy(), rtol=error, atol=error)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
def test_apply_adagrad_da_float16(): def test_apply_adagrad_da_float16():
@ -201,7 +201,7 @@ def test_apply_adagrad_da_float16():
np.testing.assert_allclose(np_out[i], ms_out[i].asnumpy(), rtol=error, atol=error) np.testing.assert_allclose(np_out[i], ms_out[i].asnumpy(), rtol=error, atol=error)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
def test_apply_adagrad_da_float16_vmap(): def test_apply_adagrad_da_float16_vmap():
@ -231,7 +231,7 @@ def test_apply_adagrad_da_float16_vmap():
np.testing.assert_allclose(np_out[i], ms_out[i].asnumpy(), rtol=error, atol=error) np.testing.assert_allclose(np_out[i], ms_out[i].asnumpy(), rtol=error, atol=error)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
def test_apply_adagrad_da_float_vmap(): def test_apply_adagrad_da_float_vmap():
@ -260,7 +260,7 @@ def test_apply_adagrad_da_float_vmap():
np.testing.assert_allclose(np_out[i], ms_out[i].asnumpy(), rtol=error, atol=error) np.testing.assert_allclose(np_out[i], ms_out[i].asnumpy(), rtol=error, atol=error)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
def test_apply_adagrad_da_float16_vmap2(): def test_apply_adagrad_da_float16_vmap2():
@ -290,7 +290,7 @@ def test_apply_adagrad_da_float16_vmap2():
np.testing.assert_allclose(np_out[i], ms_out[i].asnumpy(), rtol=error, atol=error) np.testing.assert_allclose(np_out[i], ms_out[i].asnumpy(), rtol=error, atol=error)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
def test_apply_adagrad_da_float_vmap2(): def test_apply_adagrad_da_float_vmap2():

View File

@ -52,7 +52,7 @@ def numpy_apply_adam_with_amsgrad(var, m, v, vhat, grad, beta1=0.9, beta2=0.999,
return var return var
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize("data_type", [np.float32, np.float16]) @pytest.mark.parametrize("data_type", [np.float32, np.float16])
def test_apply_adam_with_amsgrad_op(data_type): def test_apply_adam_with_amsgrad_op(data_type):
@ -98,7 +98,7 @@ class AmsgradNetVmap(nn.Cell):
return self.vmap_amsgrad(self.var, self.m, self.v, self.vhat, beta1_power, beta2_power, lr, grad) return self.vmap_amsgrad(self.var, self.m, self.v, self.vhat, beta1_power, beta2_power, lr, grad)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_apply_adam_witm_amsgrad_op_vmap(): def test_apply_adam_witm_amsgrad_op_vmap():
""" """
@ -144,7 +144,7 @@ class AmsgradNetVmap2(nn.Cell):
return self.vmap_amsgrad(self.var, self.m, self.v, self.vhat, beta1_power, beta2_power, lr, grad) return self.vmap_amsgrad(self.var, self.m, self.v, self.vhat, beta1_power, beta2_power, lr, grad)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_apply_adam_with_amsgrad_grad_op_vmap2(): def test_apply_adam_with_amsgrad_grad_op_vmap2():
""" """

View File

@ -35,7 +35,7 @@ class Net(nn.Cell):
return z return z
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_apply_addsign_graph_float32(): def test_apply_addsign_graph_float32():

View File

@ -35,7 +35,7 @@ class ApplyProximalAdagradTEST(nn.Cell):
return self.apply_proximal_adagrad(self.var, self.accum, lr, l1, l2, grad) return self.apply_proximal_adagrad(self.var, self.accum, lr, l1, l2, grad)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("data_type", [np.float32, np.float16]) @pytest.mark.parametrize("data_type", [np.float32, np.float16])
@ -86,7 +86,7 @@ class AdgradNetVmap(nn.Cell):
return self.vmap_adagrad(self.var, self.accum, lr, l1, l2, grad) return self.vmap_adagrad(self.var, self.accum, lr, l1, l2, grad)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
def test_apply_proximal_adagrad_op_vmap(): def test_apply_proximal_adagrad_op_vmap():
@ -139,7 +139,7 @@ class AdgradNetVmap2(nn.Cell):
return self.vmap_adagrad(self.var, self.accum, lr, l1, l2, grad) return self.vmap_adagrad(self.var, self.accum, lr, l1, l2, grad)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
def test_apply_proximal_adagrad_op_vmap2(): def test_apply_proximal_adagrad_op_vmap2():

View File

@ -41,7 +41,7 @@ class NetApproximateEqual(nn.Cell):
return output return output
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_approximate_equal_fp16(): def test_approximate_equal_fp16():
@ -59,7 +59,7 @@ def test_approximate_equal_fp16():
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_approximate_equal_fp32(): def test_approximate_equal_fp32():

View File

@ -84,7 +84,7 @@ def test_argmax_2d():
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_argmax_high_dims(): def test_argmax_high_dims():
@ -109,7 +109,7 @@ def adaptive_argmax_functional(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_argmax_float32_functional(): def test_argmax_float32_functional():

View File

@ -34,7 +34,7 @@ class NetArgmin(nn.Cell):
return self.argmin(x) return self.argmin(x)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_argmin_1d(): def test_argmin_1d():
@ -54,7 +54,7 @@ def test_argmin_1d():
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_argmin_2d(): def test_argmin_2d():
@ -82,7 +82,7 @@ def test_argmin_2d():
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_argmin_high_dims(): def test_argmin_high_dims():

View File

@ -52,7 +52,7 @@ def test_asin_tensor_api(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_asin_float32_tensor_api(): def test_asin_float32_tensor_api():

View File

@ -44,7 +44,7 @@ class NetAsinhGradGrad(nn.Cell):
return backward_net(y, grad, dout) return backward_net(y, grad, dout)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def asinh_grad_grad_base(dtype, loss): def asinh_grad_grad_base(dtype, loss):

View File

@ -32,7 +32,7 @@ def test_asinhgrad_fp32():
output_np = dout_np / np.cosh(y_np) output_np = dout_np / np.cosh(y_np)
assert np.allclose(output_ms.asnumpy(), output_np, 1e-4, 1e-4) assert np.allclose(output_ms.asnumpy(), output_np, 1e-4, 1e-4)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_asinhgrad_fp16(): def test_asinhgrad_fp16():

View File

@ -53,7 +53,7 @@ def test_asinh_forward_tensor_api(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_asinh_forward_float32_tensor_api(): def test_asinh_forward_float32_tensor_api():

View File

@ -31,7 +31,7 @@ class AssertTEST(nn.Cell):
return self.assert1(cond, x) return self.assert1(cond, x)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
def test_assert_op(): def test_assert_op():

View File

@ -33,7 +33,7 @@ class AssignSub(nn.Cell):
return res return res
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_assign_sub(): def test_assign_sub():
@ -76,7 +76,7 @@ def test_assign_sub():
assert (output2.asnumpy() == expect2).all() assert (output2.asnumpy() == expect2).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_assign_sub_float16(): def test_assign_sub_float16():

View File

@ -44,7 +44,7 @@ class NetAtanGradGrad(nn.Cell):
return backward_net(x, grad, dout) return backward_net(x, grad, dout)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def atan_grad_grad_base(dtype, loss): def atan_grad_grad_base(dtype, loss):

View File

@ -53,7 +53,7 @@ def test_atan_forward_tensor_api(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_atan_forward_float32_tensor_api(): def test_atan_forward_float32_tensor_api():

View File

@ -26,7 +26,7 @@ def atanh(x):
return 0.5 * np.log((1. + x) / (1. - x)) return 0.5 * np.log((1. + x) / (1. - x))
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_atanh_fp64(): def test_atanh_fp64():
@ -43,7 +43,7 @@ def test_atanh_fp64():
assert np.allclose(output_ms.asnumpy(), expect) assert np.allclose(output_ms.asnumpy(), expect)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_atanh_fp32(): def test_atanh_fp32():
@ -60,7 +60,7 @@ def test_atanh_fp32():
assert np.allclose(output_ms.asnumpy(), expect) assert np.allclose(output_ms.asnumpy(), expect)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_atanh_fp16(): def test_atanh_fp16():
@ -77,7 +77,7 @@ def test_atanh_fp16():
assert np.allclose(output_ms.asnumpy(), expect, 1e-3, 1e-3) assert np.allclose(output_ms.asnumpy(), expect, 1e-3, 1e-3)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_atanh_complex64(): def test_atanh_complex64():
@ -94,7 +94,7 @@ def test_atanh_complex64():
assert np.allclose(output_ms.asnumpy(), expect) assert np.allclose(output_ms.asnumpy(), expect)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_atanh_complex128(): def test_atanh_complex128():
@ -123,7 +123,7 @@ def test_atanh_forward_tensor_api(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_atanh_forward_float32_tensor_api(): def test_atanh_forward_float32_tensor_api():

View File

@ -51,7 +51,7 @@ class AvgPoolGrad(nn.Cell):
return self.grad(self.forward)(x, sens) return self.grad(self.forward)(x, sens)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_avgpool2d_valid(): def test_avgpool2d_valid():
@ -85,7 +85,7 @@ def test_avgpool2d_valid():
assert np.allclose(actual_grad[0].asnumpy(), expect_grad) assert np.allclose(actual_grad[0].asnumpy(), expect_grad)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_avgpool2d_same(): def test_avgpool2d_same():
@ -119,7 +119,7 @@ def test_avgpool2d_same():
assert np.allclose(actual_grad[0].asnumpy(), expect_grad) assert np.allclose(actual_grad[0].asnumpy(), expect_grad)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_avgpool3d_1(): def test_avgpool3d_1():
@ -164,7 +164,7 @@ def test_avgpool3d_1():
assert np.allclose(actual_grad[0].asnumpy(), expect_grad) assert np.allclose(actual_grad[0].asnumpy(), expect_grad)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_avgpool3d_2(): def test_avgpool3d_2():
@ -209,7 +209,7 @@ def test_avgpool3d_2():
assert np.allclose(actual_grad[0].asnumpy(), expect_grad) assert np.allclose(actual_grad[0].asnumpy(), expect_grad)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_avgpool3d_3(): def test_avgpool3d_3():
@ -251,7 +251,7 @@ def test_avgpool3d_3():
assert np.allclose(actual_grad[0].asnumpy(), expect_grad) assert np.allclose(actual_grad[0].asnumpy(), expect_grad)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_avgpool3d_4(): def test_avgpool3d_4():
@ -308,7 +308,7 @@ def test_avgpool3d_4():
assert np.allclose(actual_grad[0].asnumpy(), expect_grad) assert np.allclose(actual_grad[0].asnumpy(), expect_grad)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_avgpool_vmap(): def test_avgpool_vmap():
@ -325,7 +325,7 @@ def test_avgpool_vmap():
assert out.shape == (6, 3, 1, 1, 3, 3) assert out.shape == (6, 3, 1, 1, 3, 3)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_avgpool_grad_vmap(): def test_avgpool_grad_vmap():
@ -358,7 +358,7 @@ class DynamicShapeAvgPool3DGrad(nn.Cell):
return self.net(x_shape, sens) return self.net(x_shape, sens)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_avgpool3d_grad_dynamic_shape(): def test_avgpool3d_grad_dynamic_shape():

View File

@ -75,7 +75,7 @@ def bartlett_window_pynative(periodic, dtype, loss):
assert np.allclose(bartlett_window_output.asnumpy(), bartlett_window_expect.numpy().astype(nptype), loss, loss) assert np.allclose(bartlett_window_output.asnumpy(), bartlett_window_expect.numpy().astype(nptype), loss, loss)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bartlett_window_graph_int32_true_float32(): def test_bartlett_window_graph_int32_true_float32():
@ -87,7 +87,7 @@ def test_bartlett_window_graph_int32_true_float32():
bartlett_window(periodic=True, dtype="float32", loss=1.0e-4) bartlett_window(periodic=True, dtype="float32", loss=1.0e-4)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bartlett_window_pynative_int64_false_float64(): def test_bartlett_window_pynative_int64_false_float64():

View File

@ -253,7 +253,7 @@ def test_bmm_forward_tensor_api(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bmm_forward_float32_tensor_api(): def test_bmm_forward_float32_tensor_api():
@ -281,7 +281,7 @@ def test_bmm_forward_functional_api(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bmm_forward_float32_functional_api(): def test_bmm_forward_float32_functional_api():

View File

@ -92,7 +92,7 @@ def get_nc_inputs(np_type):
return x, scale, dy, dout_dx, dout_dscale, dout_dbias return x, scale, dy, dout_dx, dout_dscale, dout_dbias
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchnorm_grad_grad_training_nhwc(): def test_batchnorm_grad_grad_training_nhwc():
@ -121,7 +121,7 @@ def test_batchnorm_grad_grad_training_nhwc():
assert np.allclose(dscale.asnumpy(), expect_dscale) assert np.allclose(dscale.asnumpy(), expect_dscale)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchnorm_grad_grad_training_nchw(): def test_batchnorm_grad_grad_training_nchw():
@ -150,7 +150,7 @@ def test_batchnorm_grad_grad_training_nchw():
assert np.allclose(dscale.asnumpy(), expect_dscale) assert np.allclose(dscale.asnumpy(), expect_dscale)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchnorm_grad_grad_inference_nhwc(): def test_batchnorm_grad_grad_inference_nhwc():
@ -179,7 +179,7 @@ def test_batchnorm_grad_grad_inference_nhwc():
assert np.allclose(dscale.asnumpy(), expect_dscale) assert np.allclose(dscale.asnumpy(), expect_dscale)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchnorm_grad_grad_inference_nchw(): def test_batchnorm_grad_grad_inference_nchw():
@ -208,7 +208,7 @@ def test_batchnorm_grad_grad_inference_nchw():
assert np.allclose(dscale.asnumpy(), expect_dscale) assert np.allclose(dscale.asnumpy(), expect_dscale)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchnorm_grad_grad_training_nc(): def test_batchnorm_grad_grad_training_nc():
@ -233,7 +233,7 @@ def test_batchnorm_grad_grad_training_nc():
assert np.allclose(dscale.asnumpy(), expect_dscale) assert np.allclose(dscale.asnumpy(), expect_dscale)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchnorm_grad_grad_inference_nc(): def test_batchnorm_grad_grad_inference_nc():
@ -258,7 +258,7 @@ def test_batchnorm_grad_grad_inference_nc():
assert np.allclose(dscale.asnumpy(), expect_dscale) assert np.allclose(dscale.asnumpy(), expect_dscale)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchnorm_grad_grad_training_nchw_2(): def test_batchnorm_grad_grad_training_nchw_2():
@ -311,7 +311,7 @@ def test_batchnorm_grad_grad_training_nchw_2():
assert np.allclose(dscale.asnumpy(), expect_dscale) assert np.allclose(dscale.asnumpy(), expect_dscale)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchnorm_grad_grad_training_nhwc_2(): def test_batchnorm_grad_grad_training_nhwc_2():
@ -356,7 +356,7 @@ def test_batchnorm_grad_grad_training_nhwc_2():
assert np.allclose(dscale.asnumpy(), expect_dscale) assert np.allclose(dscale.asnumpy(), expect_dscale)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchnorm_grad_grad_training_nchw_dynamic_shape(): def test_batchnorm_grad_grad_training_nchw_dynamic_shape():
@ -415,7 +415,7 @@ def test_batchnorm_grad_grad_training_nchw_dynamic_shape():
assert np.allclose(dscale.asnumpy(), expect_dscale) assert np.allclose(dscale.asnumpy(), expect_dscale)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchnorm_grad_grad_training_nhwc_dynamic_shape(): def test_batchnorm_grad_grad_training_nhwc_dynamic_shape():
@ -466,7 +466,7 @@ def test_batchnorm_grad_grad_training_nhwc_dynamic_shape():
assert np.allclose(dscale.asnumpy(), expect_dscale) assert np.allclose(dscale.asnumpy(), expect_dscale)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchnorm_grad_grad_training_nchw_vmap(): def test_batchnorm_grad_grad_training_nchw_vmap():
@ -505,7 +505,7 @@ def test_batchnorm_grad_grad_training_nchw_vmap():
assert np.allclose(dscale.asnumpy(), expect_dscale) assert np.allclose(dscale.asnumpy(), expect_dscale)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchnorm_grad_grad_inference_nhwc_vmap(): def test_batchnorm_grad_grad_inference_nhwc_vmap():

View File

@ -358,7 +358,7 @@ def test_batch_norm_forward_functional(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batch_norm_forward_float32_functional(): def test_batch_norm_forward_float32_functional():

View File

@ -71,13 +71,13 @@ def BatchToSpace_pynative(nptype, block_size=2, input_shape=(4, 1, 2, 2)):
assert (output.asnumpy() == expect).all() assert (output.asnumpy() == expect).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchtospace_graph_float32(): def test_batchtospace_graph_float32():
BatchToSpace(np.float32) BatchToSpace(np.float32)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_batchtospace_graph_float16(): def test_batchtospace_graph_float16():

View File

@ -86,7 +86,7 @@ def test_reduction_mean_testcases():
assert math.isclose(output.asnumpy().tolist(), expected, abs_tol=0.00001) assert math.isclose(output.asnumpy().tolist(), expected, abs_tol=0.00001)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_reduction_sum_testcases(): def test_reduction_sum_testcases():

View File

@ -31,7 +31,7 @@ class Net(nn.Cell):
return self.bernoulli(x, p) return self.bernoulli(x, p)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bernoulli(): def test_bernoulli():
@ -65,7 +65,7 @@ class BernoulliDynamic(nn.Cell):
return self.bernoulli(x, p) return self.bernoulli(x, p)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bernoulli_dynamic(): def test_bernoulli_dynamic():

View File

@ -266,7 +266,7 @@ class BesselY1GradNet(Cell):
return gout return gout
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_y0_fp32(): def test_bessel_y0_fp32():
@ -288,7 +288,7 @@ def test_bessel_y0_fp32():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_y0_fp16(): def test_bessel_y0_fp16():
@ -310,7 +310,7 @@ def test_bessel_y0_fp16():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_y1_fp32(): def test_bessel_y1_fp32():
@ -332,7 +332,7 @@ def test_bessel_y1_fp32():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_y1_fp16(): def test_bessel_y1_fp16():
@ -354,7 +354,7 @@ def test_bessel_y1_fp16():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_j0_fp32(): def test_bessel_j0_fp32():
@ -376,7 +376,7 @@ def test_bessel_j0_fp32():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_j0_fp16(): def test_bessel_j0_fp16():
@ -398,7 +398,7 @@ def test_bessel_j0_fp16():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_j1_fp32(): def test_bessel_j1_fp32():
@ -420,7 +420,7 @@ def test_bessel_j1_fp32():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_j1_fp16(): def test_bessel_j1_fp16():
@ -442,7 +442,7 @@ def test_bessel_j1_fp16():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_k0_fp32(): def test_bessel_k0_fp32():
@ -463,7 +463,7 @@ def test_bessel_k0_fp32():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_k0_fp16(): def test_bessel_k0_fp16():
@ -484,7 +484,7 @@ def test_bessel_k0_fp16():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_k0e_fp32(): def test_bessel_k0e_fp32():
@ -505,7 +505,7 @@ def test_bessel_k0e_fp32():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_k0e_fp16(): def test_bessel_k0e_fp16():
@ -526,7 +526,7 @@ def test_bessel_k0e_fp16():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_k1_fp32(): def test_bessel_k1_fp32():
@ -547,7 +547,7 @@ def test_bessel_k1_fp32():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_k1_fp16(): def test_bessel_k1_fp16():
@ -568,7 +568,7 @@ def test_bessel_k1_fp16():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_k1e_fp32(): def test_bessel_k1e_fp32():
@ -589,7 +589,7 @@ def test_bessel_k1e_fp32():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_k1e_fp16(): def test_bessel_k1e_fp16():
@ -610,7 +610,7 @@ def test_bessel_k1e_fp16():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_i0_fp32(): def test_bessel_i0_fp32():
@ -632,7 +632,7 @@ def test_bessel_i0_fp32():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_i0_fp16(): def test_bessel_i0_fp16():
@ -654,7 +654,7 @@ def test_bessel_i0_fp16():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_i0e_fp32(): def test_bessel_i0e_fp32():
@ -676,7 +676,7 @@ def test_bessel_i0e_fp32():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_i0e_fp16(): def test_bessel_i0e_fp16():
@ -698,7 +698,7 @@ def test_bessel_i0e_fp16():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_i1_fp32(): def test_bessel_i1_fp32():
@ -720,7 +720,7 @@ def test_bessel_i1_fp32():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_i1_fp16(): def test_bessel_i1_fp16():
@ -742,7 +742,7 @@ def test_bessel_i1_fp16():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_i1e_fp32(): def test_bessel_i1e_fp32():
@ -764,7 +764,7 @@ def test_bessel_i1e_fp32():
assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output) assert np.allclose(output_grad_ms[0].asnumpy(), expect_grad_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bessel_i1e_fp16(): def test_bessel_i1e_fp16():

View File

@ -33,7 +33,7 @@ def test_bias_add_forward_functional(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bias_add_forward_float32_functional(): def test_bias_add_forward_float32_functional():

View File

@ -33,7 +33,7 @@ class Net(nn.Cell):
return self.bidense(x1, x2) return self.bidense(x1, x2)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net(): def test_net():
@ -50,7 +50,7 @@ def test_net():
assert output.shape == (128, 40) assert output.shape == (128, 40)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_nd(): def test_net_nd():
@ -67,7 +67,7 @@ def test_net_nd():
assert output.shape == (128, 4, 40) assert output.shape == (128, 4, 40)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_net_1d(): def test_net_1d():

View File

@ -109,7 +109,7 @@ def test_binary_cross_entropy_forward_functional(nptype):
np.testing.assert_array_almost_equal(output.asnumpy(), expected) np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_binary_cross_entropy_forward_float32_functional(): def test_binary_cross_entropy_forward_float32_functional():

View File

@ -30,7 +30,7 @@ class NetBitwiseAnd(nn.Cell):
return self.bitwiseand(x1, x2) return self.bitwiseand(x1, x2)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bitwiseand_graph(): def test_bitwiseand_graph():
@ -50,7 +50,7 @@ def test_bitwiseand_graph():
assert np.all(abs(result_expect - result) < eps) assert np.all(abs(result_expect - result) < eps)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bitwiseand_graph1(): def test_bitwiseand_graph1():
@ -71,7 +71,7 @@ def test_bitwiseand_graph1():
assert np.all(abs(result_expect - result) < eps) assert np.all(abs(result_expect - result) < eps)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bitwiseand_graph2(): def test_bitwiseand_graph2():

View File

@ -30,7 +30,7 @@ class NetBitwiseOr(nn.Cell):
return self.bitwiseor(x1, x2) return self.bitwiseor(x1, x2)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bitwiseor_graph(): def test_bitwiseor_graph():
@ -51,7 +51,7 @@ def test_bitwiseor_graph():
assert np.all(abs(result_expect - result) < eps) assert np.all(abs(result_expect - result) < eps)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bitwiseor_graph2(): def test_bitwiseor_graph2():
@ -72,7 +72,7 @@ def test_bitwiseor_graph2():
assert np.all(abs(result_expect - result) < eps) assert np.all(abs(result_expect - result) < eps)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bitwiseor_graph3(): def test_bitwiseor_graph3():

View File

@ -30,7 +30,7 @@ class NetBitwiseXor(nn.Cell):
return self.bitwisexor(x1, x2) return self.bitwisexor(x1, x2)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bitwisexor_graph(): def test_bitwisexor_graph():
@ -51,7 +51,7 @@ def test_bitwisexor_graph():
assert np.all(abs(result_expect - result) < eps) assert np.all(abs(result_expect - result) < eps)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bitwisexor_graph1(): def test_bitwisexor_graph1():
@ -72,7 +72,7 @@ def test_bitwisexor_graph1():
assert np.all(abs(result_expect - result) < eps) assert np.all(abs(result_expect - result) < eps)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bitwisexor_graph2(): def test_bitwisexor_graph2():

View File

@ -76,7 +76,7 @@ def blackman_window_pynative(periodic, dtype, loss):
assert np.allclose(blackman_window_output.asnumpy(), blackman_window_expect.numpy().astype(nptype), loss, loss) assert np.allclose(blackman_window_output.asnumpy(), blackman_window_expect.numpy().astype(nptype), loss, loss)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_blackman_window_graph_int32_true_float32(): def test_blackman_window_graph_int32_true_float32():
@ -88,7 +88,7 @@ def test_blackman_window_graph_int32_true_float32():
blackman_window(periodic=True, dtype="float32", loss=1.0e-4) blackman_window(periodic=True, dtype="float32", loss=1.0e-4)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_blackman_window_pynative_int64_false_float64(): def test_blackman_window_pynative_int64_false_float64():

View File

@ -297,7 +297,7 @@ def test_broadcast_diff_dims():
assert np.allclose(output_ms.asnumpy(), output_np) assert np.allclose(output_ms.asnumpy(), output_np)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_broadcast_diff_dims_float64(): def test_broadcast_diff_dims_float64():

View File

@ -125,7 +125,7 @@ def test_broadcast_dyn_init():
assert np.allclose(output.asnumpy(), expect) assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_broadcast_dyn_invalid_init(): def test_broadcast_dyn_invalid_init():

View File

@ -32,7 +32,7 @@ class BucketizeNet(Cell):
return self.bucketize(x) return self.bucketize(x)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bucketize_4x5_float64(): def test_bucketize_4x5_float64():
@ -55,7 +55,7 @@ def test_bucketize_4x5_float64():
assert np.allclose(output_ms.asnumpy(), expect_output) assert np.allclose(output_ms.asnumpy(), expect_output)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_bucketize_4x5x6_int32(): def test_bucketize_4x5x6_int32():

View File

@ -36,7 +36,7 @@ class NetCeil(nn.Cell):
return self.ceil(x) return self.ceil(x)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_ceil_fp32(): def test_ceil_fp32():
@ -52,7 +52,7 @@ def test_ceil_fp32():
assert np.allclose(output.asnumpy(), expect) assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_ceil_fp16(): def test_ceil_fp16():
@ -68,7 +68,7 @@ def test_ceil_fp16():
assert np.allclose(output.asnumpy(), expect) assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_tensor_ceil(): def test_tensor_ceil():
@ -85,7 +85,7 @@ def test_tensor_ceil():
assert np.allclose(output.asnumpy(), expect) assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_func_ceil(): def test_func_ceil():
@ -103,7 +103,7 @@ def test_func_ceil():
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_vmap(): def test_vmap():
@ -129,7 +129,7 @@ def test_vmap():
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_vmap2(): def test_vmap2():

View File

@ -33,7 +33,7 @@ class CeluTEST(nn.Cell):
return self.celu(x) return self.celu(x)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("data_type", [np.float32, np.float16]) @pytest.mark.parametrize("data_type", [np.float32, np.float16])
@ -57,7 +57,7 @@ def test_celu_op(data_type):
np.testing.assert_allclose(output.asnumpy(), expect, rtol=error) np.testing.assert_allclose(output.asnumpy(), expect, rtol=error)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("data_type", [np.float32, np.float16]) @pytest.mark.parametrize("data_type", [np.float32, np.float16])
@ -80,7 +80,7 @@ def test_celu_func(data_type):
np.testing.assert_allclose(output.asnumpy(), expect, rtol=error) np.testing.assert_allclose(output.asnumpy(), expect, rtol=error)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
def test_celu_vmap(): def test_celu_vmap():

View File

@ -35,7 +35,7 @@ class Col2ImTest(nn.Cell):
return self.c2i(x, output_size) return self.c2i(x, output_size)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize("mode, input_type", @pytest.mark.parametrize("mode, input_type",

View File

@ -19,7 +19,7 @@ from mindspore.ops.operations import math_ops as P
import mindspore.common.dtype as ms import mindspore.common.dtype as ms
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_complex_abs_complex64_3x3(): def test_complex_abs_complex64_3x3():

View File

@ -47,7 +47,7 @@ def complex_compare(complex1, complex2):
return np.allclose(real1, real2, rtol=5e-03, atol=5e-03) and np.allclose(imag1, imag2, rtol=5e-03, atol=5e-03) return np.allclose(real1, real2, rtol=5e-03, atol=5e-03) and np.allclose(imag1, imag2, rtol=5e-03, atol=5e-03)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_complex_elemwise(): def test_complex_elemwise():
@ -89,7 +89,7 @@ def test_complex_elemwise():
assert complex_compare(res_ms, res_to) assert complex_compare(res_ms, res_to)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_complex_broadcast(): def test_complex_broadcast():

View File

@ -44,7 +44,7 @@ class NetConv3d(nn.Cell):
return self.conv(x, w) return self.conv(x, w)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_conv3d_dshape_1(): def test_conv3d_dshape_1():
@ -65,7 +65,7 @@ def test_conv3d_dshape_1():
assert output.asnumpy().shape == expect_shape assert output.asnumpy().shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_conv3d_dshape_2(): def test_conv3d_dshape_2():
@ -86,7 +86,7 @@ def test_conv3d_dshape_2():
assert output.asnumpy().shape == expect_shape assert output.asnumpy().shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_conv3d(): def test_conv3d():
@ -153,7 +153,7 @@ class MSGradNet(nn.Cell):
return output return output
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_conv3d_grad(): def test_conv3d_grad():

View File

@ -42,7 +42,7 @@ class NetConv3dTranspose(nn.Cell):
return self.conv_trans(x, w) return self.conv_trans(x, w)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_conv3dtranspose_dshape_1(): def test_conv3dtranspose_dshape_1():
@ -63,7 +63,7 @@ def test_conv3dtranspose_dshape_1():
assert output.asnumpy().shape == expect_shape assert output.asnumpy().shape == expect_shape
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_conv3dtranspose_dshape_2(): def test_conv3dtranspose_dshape_2():

View File

@ -29,7 +29,7 @@ class NetCosh(nn.Cell):
return ops.cosh(x) return ops.cosh(x)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize('dtype, tol', [(np.float16, 1.e-3), (np.float32, 1.e-5), (np.float64, 1.e-8), @pytest.mark.parametrize('dtype, tol', [(np.float16, 1.e-3), (np.float32, 1.e-5), (np.float64, 1.e-8),
@ -49,7 +49,7 @@ def test_cosh_graph(dtype, tol):
assert np.allclose(output.asnumpy(), expect, atol=tol, rtol=tol) assert np.allclose(output.asnumpy(), expect, atol=tol, rtol=tol)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize('dtype, tol', [(np.float16, 1.e-3), (np.float32, 1.e-5), (np.float64, 1.e-8), @pytest.mark.parametrize('dtype, tol', [(np.float16, 1.e-3), (np.float32, 1.e-5), (np.float64, 1.e-8),
@ -68,7 +68,7 @@ def test_cosh_py(dtype, tol):
assert np.allclose(output.asnumpy(), expect, atol=tol, rtol=tol) assert np.allclose(output.asnumpy(), expect, atol=tol, rtol=tol)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_cosh_dynamic_shape(): def test_cosh_dynamic_shape():
@ -92,7 +92,7 @@ def test_cosh_dynamic_shape():
assert np.allclose(output.asnumpy(), expect, atol=tol, rtol=tol) assert np.allclose(output.asnumpy(), expect, atol=tol, rtol=tol)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize('dtype, tol', [(np.float16, 1.e-3), (np.float32, 1.e-5), (np.float64, 1.e-8)]) @pytest.mark.parametrize('dtype, tol', [(np.float16, 1.e-3), (np.float32, 1.e-5), (np.float64, 1.e-8)])

View File

@ -28,7 +28,7 @@ class NetCropAndResizeGradBoxes(nn.Cell):
return self.op(grads, images, boxes, box_index) return self.op(grads, images, boxes, box_index)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize("image_type", [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64, np.float16, @pytest.mark.parametrize("image_type", [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64, np.float16,
@ -74,7 +74,7 @@ def test_crop_and_resize_grad_boxes_float32(image_type):
assert np.all(abs(diff) < error) assert np.all(abs(diff) < error)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize("image_type", [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64, np.float16, @pytest.mark.parametrize("image_type", [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64, np.float16,

View File

@ -29,7 +29,7 @@ class NetCropAndResizeGradImage(nn.Cell):
return self.op(grads, boxes, box_index, image_size) return self.op(grads, boxes, box_index, image_size)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize("input_type", [np.float32, np.float64]) @pytest.mark.parametrize("input_type", [np.float32, np.float64])
@ -68,7 +68,7 @@ def test_crop_and_resize_grad_image_bilinear(input_type, output_type):
assert np.all(abs(diff) < error) assert np.all(abs(diff) < error)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize("input_type", [np.float32, np.float64]) @pytest.mark.parametrize("input_type", [np.float32, np.float64])

View File

@ -118,7 +118,7 @@ def compare_to_numpy(method, input_matrix, target, input_lengths, target_lengths
assert np.allclose(loss.asnumpy(), expected) assert np.allclose(loss.asnumpy(), expected)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("batch", [1, 10]) @pytest.mark.parametrize("batch", [1, 10])
@ -242,7 +242,7 @@ def test_ctc_loss_v2_un_padded_grad():
np.allclose(grad.asnumpy(), expected_grad) np.allclose(grad.asnumpy(), expected_grad)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("batch", [1, 10]) @pytest.mark.parametrize("batch", [1, 10])

View File

@ -60,7 +60,7 @@ def cum_minmax_compare(op, x, expected, axis, data_type, is_vmap=False):
assert np.allclose(output[1].asnumpy(), expected[1]) assert np.allclose(output[1].asnumpy(), expected[1])
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("data_type", [np.uint8, np.int8, np.int32, np.float16]) @pytest.mark.parametrize("data_type", [np.uint8, np.int8, np.int32, np.float16])
@ -86,7 +86,7 @@ def test_cummin_multi_dims(data_type):
cum_minmax_compare(op, x, cummin_output, axis, data_type) cum_minmax_compare(op, x, cummin_output, axis, data_type)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("data_type", [np.uint8, np.uint32, np.int8, np.int32, np.int64, np.float16, np.float32]) @pytest.mark.parametrize("data_type", [np.uint8, np.uint32, np.int8, np.int32, np.int64, np.float16, np.float32])
@ -114,7 +114,7 @@ def test_cummax_multi_dims(data_type):
cum_minmax_compare(op, x, cummax_output, axis, data_type) cum_minmax_compare(op, x, cummax_output, axis, data_type)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("data_type", [np.float16, np.float32]) @pytest.mark.parametrize("data_type", [np.float16, np.float32])
@ -145,7 +145,7 @@ class VmapNet(nn.Cell):
return self.ops(x) return self.ops(x)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_cummin_vmap_net(): def test_cummin_vmap_net():
@ -170,7 +170,7 @@ def test_cummin_vmap_net():
cum_minmax_compare(op, x, cummin_output, axis, np.float32, is_vmap=True) cum_minmax_compare(op, x, cummin_output, axis, np.float32, is_vmap=True)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_cummax_vmap_net(): def test_cummax_vmap_net():

View File

@ -172,7 +172,7 @@ class Net(nn.Cell):
return self.op(x, 0) return self.op(x, 0)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_cumprod_dshape(): def test_cumprod_dshape():

View File

@ -31,7 +31,7 @@ class DataFormatVecPermuteNet(nn.Cell):
return self.op(x) return self.op(x)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_data_format_vec_permute_1d_input_int32(): def test_data_format_vec_permute_1d_input_int32():
@ -50,7 +50,7 @@ def test_data_format_vec_permute_1d_input_int32():
assert (z_ms.asnumpy() == expect).all() assert (z_ms.asnumpy() == expect).all()
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_data_format_vec_permute_2d_input_int64(): def test_data_format_vec_permute_2d_input_int64():

View File

@ -30,7 +30,7 @@ def np_all_close_with_loss(out, expect):
return np.allclose(out, expect, 0.0005, 0.0005, equal_nan=True) return np.allclose(out, expect, 0.0005, 0.0005, equal_nan=True)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize("data_type", [np.int32, np.int64]) @pytest.mark.parametrize("data_type", [np.int32, np.int64])
@ -54,7 +54,7 @@ def test_data_formata_dim_map_gpu(data_type):
assert np.allclose(output_3_gpu.asnumpy(), output_3_expect_gpu) assert np.allclose(output_3_gpu.asnumpy(), output_3_expect_gpu)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize("data_type", [np.int32, np.int64]) @pytest.mark.parametrize("data_type", [np.int32, np.int64])

View File

@ -23,7 +23,7 @@ from mindspore import Tensor
context.set_context(device_target="GPU") context.set_context(device_target="GPU")
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_deformable_conv2d(): def test_deformable_conv2d():

View File

@ -34,7 +34,7 @@ class NetDeformableOffsetsGrad(nn.Cell):
return self.grad_op(grad, input_x, offsets) return self.grad_op(grad, input_x, offsets)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize('dtype', [np.float16, np.float32]) @pytest.mark.parametrize('dtype', [np.float16, np.float32])
@ -68,7 +68,7 @@ def test_deformable_offsets_grad_nchw(dtype):
assert np.allclose(output[1].asnumpy(), expect_grad_offset, rtol) assert np.allclose(output[1].asnumpy(), expect_grad_offset, rtol)
@pytest.mark.level0 @pytest.mark.level1
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
@pytest.mark.parametrize('dtype', [np.float16, np.float32]) @pytest.mark.parametrize('dtype', [np.float16, np.float32])

View File

@ -1,44 +0,0 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common import dtype as mstype
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
class Net(nn.Cell):
def __init__(self, fill_mode=0):
super(Net, self).__init__()
self.det_triangle = P.DetTriangle(fill_mode=fill_mode)
def construct(self, x):
return self.det_triangle(x)
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_1D():
fill_mode = 0
input_x = np.array([[1, 0, 0], [2, 3, 0], [4, 5, 6]]).astype(np.float32)
net = Net(fill_mode=fill_mode)
tx = Tensor(input_x, mstype.float32)
output = net(tx)
assert output == 18

Some files were not shown because too many files have changed in this diff Show More