move stable testcases to speed up gate test

This commit is contained in:
yanghaoran 2023-09-19 19:43:34 +08:00
parent b36a978d32
commit 7f1339455e
553 changed files with 1227 additions and 1283 deletions

View File

@ -1652,7 +1652,7 @@ def test_auto_mixed_precision_controlflow_auto(pynative_save_graphs):
# op_cast should be located in order_list after abstract_specialize.
# Besides Ascend, it can work on CPU.
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -1866,7 +1866,7 @@ def test_print_assign_print():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_print_in_constant_returned_func():
@ -1893,7 +1893,7 @@ def test_print_in_constant_returned_func():
check_output(cap.output, patterns)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -167,7 +167,7 @@ class BackwardNet(Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -217,7 +217,7 @@ def test_load_convert_tensormove_2():
assert forward_res == 3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -248,7 +248,7 @@ def test_load_eliminate():
assert out == 5
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -278,7 +278,7 @@ def test_parameter_tuple_assign():
assert out[1] == 0
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -314,7 +314,7 @@ def test_parameter_tuple_assign_addn():
assert out == (3, 5, 9, 9)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -408,7 +408,7 @@ def test_parameter_tuple_assign_addn_inner_net_control_flow():
assert out == (9, 15)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -108,7 +108,7 @@ class SideEffectControlFlowAssignDependWhileNet(Cell):
return grad_out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_side_effect_control_flow_assign_depend_while_net():

View File

@ -123,7 +123,7 @@ class ControlGraphSupportNotEqual(Cell):
return out, out2, out3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -149,7 +149,7 @@ def test_ctrl_if_while_graph_support_not_equal_true():
allclose_nparray(out3, out_me[2].asnumpy(), 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -435,7 +435,7 @@ class SideEffectReturnParameterNet(Cell):
return grad_out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -23,7 +23,7 @@ from mindspore.ops import functional as F
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_monad_vmap():

View File

@ -482,7 +482,7 @@ class ApplyProximalGradientDescentNet(nn.Cell):
return self.var
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -269,7 +269,7 @@ def run_lenet_ps_twice(file_name, cache_path, log_file_name_first, log_file_name
shutil.rmtree(cache_path, ignore_errors=True)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@ -309,7 +309,7 @@ def test_compile_cache_net_with_control_flow():
"control_net_second.txt")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -38,7 +38,7 @@ def run_same_network_twice_in_one_process(file_name, log_file_name):
os.remove(log_file_name)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_mutable_compile_repeat():

View File

@ -174,7 +174,7 @@ class GradCell(nn.Cell):
return self.grad_all(self.net)(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -200,7 +200,7 @@ def test_grad_parameter_input(mode):
# PyNative run error.
# Support context.PYNATIVE_MODE later.
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -227,7 +227,7 @@ def test_grad_parameter_as_input_and_fv(mode):
# PyNative run error.
# Support context.PYNATIVE_MODE later.
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -251,7 +251,7 @@ def test_grad_same_parameter_both_input_and_fv(mode):
assert np.array_equal(a[1].asnumpy(), b[1].asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -295,7 +295,7 @@ class GradCellWithParameterTuple(nn.Cell):
return self.grad(self.net, self.params)(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -322,7 +322,7 @@ def test_grad_parameter_as_input_and_fv2(mode):
assert np.array_equal(a[1][1].asnumpy(), b[1][1].asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cell_mixed_arguments():
@ -343,7 +343,7 @@ def test_cell_mixed_arguments():
assert net(1, 2, 3, d=Tensor([6])).asnumpy() == [12]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cell_mixed_arguments_with_grad():
@ -373,7 +373,7 @@ def test_cell_mixed_arguments_with_grad():
assert grad_net(Tensor([1]), Tensor([2]), Tensor([3]), d=Tensor([4]), e=Tensor([5])).asnumpy() == [1]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_for_kwargs_with_scalar():

View File

@ -31,7 +31,7 @@ def test_level0_ascend_cases():
case_register.check_and_run("Ascend", 0)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_level0_gpu_cases():

View File

@ -149,7 +149,7 @@ def test_chinese_path_on_windows():
os.chdir(current_pwd)
add_and_remove_cv_file(dir_path + mindrecord_path)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_backslash_path_on_windows():

View File

@ -74,7 +74,7 @@ def create_dataset(data_path, batch_size=32, num_parallel_workers=1):
return mnist_ds
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.forked

View File

@ -65,7 +65,7 @@ def create_model():
return model_
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.forked

View File

@ -24,7 +24,7 @@ import numpy as np
import mindspore.dataset as ds
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize("num_epochs", (-1, 10))

View File

@ -70,7 +70,7 @@ def run_async_dump(test_name):
del os.environ['MINDSPORE_DUMP_CONFIG']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -228,7 +228,7 @@ class OperateSymbolNet(Cell):
return x
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -169,7 +169,7 @@ def test_cpu_e2e_dump_with_hccl_set():
del os.environ['RANK_ID']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@security_off_wrap
@ -295,7 +295,7 @@ def run_e2e_dump_execution_graph():
del os.environ['MINDSPORE_DUMP_CONFIG']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -387,7 +387,7 @@ def run_not_overflow_dump():
assert not os.path.exists(exe_graph_path)
del os.environ['MINDSPORE_DUMP_CONFIG']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -402,7 +402,7 @@ def test_ascend_overflow_dump():
run_overflow_dump()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -455,7 +455,7 @@ def run_saved_data_dump_test(scenario, saved_data):
del os.environ['MINDSPORE_DUMP_CONFIG']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@security_off_wrap
@ -469,7 +469,7 @@ def test_gpu_e2e_statistic_dump():
run_saved_data_dump_test('test_gpu_e2e_dump', 'statistic')
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@security_off_wrap
@ -536,7 +536,7 @@ def test_stat_dump_nulls():
assert output['Avg Value'] == 'null'
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -551,7 +551,7 @@ def test_ascend_statistic_dump():
run_saved_data_dump_test('test_async_dump', 'statistic')
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -630,7 +630,7 @@ class ConstantNet(nn.Cell):
return self.relu(construct_tensor(ops.shape(x_)))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -719,7 +719,7 @@ def test_constant_ascend_e2e_dump():
run_constant_e2e_dump()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -740,7 +740,7 @@ def test_save_cce_graph():
del os.environ["MS_COMPILER_OP_LEVEL"]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -80,7 +80,7 @@ def run_trans_flag(test_name):
del os.environ['MINDSPORE_DUMP_CONFIG']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -110,7 +110,7 @@ def test_ascend_e2e_trans_false():
run_trans_flag("test_e2e_dump_trans_false")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -279,7 +279,7 @@ def run_e2e_dump_execution_graph():
del os.environ['MINDSPORE_DUMP_CONFIG']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -139,7 +139,7 @@ def test_GPU_e2e_multi_root_graph_dump():
del os.environ['MS_DEV_FALLBACK_SUPPORT_LIST_DICT_INPLACE']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -177,7 +177,7 @@ def test_Ascend_async_multi_root_graph_dump():
del os.environ['MS_DEV_FALLBACK_SUPPORT_LIST_DICT_INPLACE']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -40,7 +40,7 @@ class ArgMinV2DynatimicShape(nn.Cell):
return y
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -213,7 +213,7 @@ def get_train_loss(numeric_columns, sparse_columns, data_list, mode):
return loss_list
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_train():

View File

@ -30,7 +30,7 @@ class Net(nn.Cell):
return self.expand_dims(tensor, -1)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -61,7 +61,7 @@ def test_sqeeze_net_ascend(data_type):
assert np.all(output.asnumpy() == expected)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize("data_type",
@ -91,7 +91,7 @@ def test_sqeeze_net_cpu(data_type):
assert np.all(output.asnumpy() == expected)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize("data_type",

View File

@ -31,7 +31,7 @@ class Net(nn.Cell):
return self.squeeze(tensor)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -61,7 +61,7 @@ def test_sqeeze_net_ascend(data_type):
assert np.all(output.asnumpy() == expected)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize("data_type",
@ -90,7 +90,7 @@ def test_sqeeze_net_cpu(data_type):
assert np.all(output.asnumpy() == expected)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize("data_type",

View File

@ -54,7 +54,7 @@ class UniqueWhile(nn.Cell):
return self.shape(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -73,7 +73,7 @@ def test_unique_if():
assert x_shape == 5
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -112,7 +112,7 @@ def test_unique_square():
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -47,7 +47,7 @@ class StridedSliceDynamicRank(nn.Cell):
return self.stridedslice(x, begin, end, strides)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard

View File

@ -211,7 +211,7 @@ def test_train_graph_mode_gpu():
assert np.allclose(graph_loss, expect_loss, 5e-3, 5e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_train_pynative_mode_gpu():

View File

@ -41,7 +41,7 @@ def lp_norm_np_bencmark(data_type):
return result
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -79,7 +79,7 @@ def case_dyn_shape():
check_result(output, expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -96,7 +96,7 @@ def test_dynamic_scatternd_dyn_input():
case_dyn_input()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -56,7 +56,7 @@ def test_unqiue():
assert (output[1].asnumpy() == expect2).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -75,7 +75,7 @@ def test_unqiue_func_1d():
assert (output[1].asnumpy() == expect2).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -77,7 +77,7 @@ def test_unique_square():
assert (output.asnumpy() == expect1).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_unqiue_func_1d():
@ -95,7 +95,7 @@ def test_unqiue_func_1d():
assert (output[1].asnumpy() == expect2).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_unqiue_func_2d():

View File

@ -19,7 +19,7 @@ from subprocess import Popen
import pytest
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single
@ -31,7 +31,7 @@ def test_hccl_init_fail():
assert grep_ret == 0
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single

View File

@ -20,7 +20,7 @@ from subprocess import Popen
import pytest
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single

View File

@ -329,7 +329,7 @@ def encrypt_func(model_stream, key):
return plain_data.getvalue()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -21,7 +21,7 @@ import mindspore.nn as nn
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -44,7 +44,7 @@ def test_fallback_runtime_abs():
assert res == 21
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -21,7 +21,7 @@ import mindspore as ms
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -45,7 +45,7 @@ def test_fallback_all_dict():
assert out[0] and out[1]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import Tensor, jit, context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -24,7 +24,7 @@ from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -47,7 +47,7 @@ def test_fallback_getattr_asnumpy():
assert out[0] == out[1] == 4
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -79,7 +79,7 @@ def test_fallback_getattr_asnumpy_custom_class():
assert out[0] == out[1] == 4
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -21,7 +21,7 @@ import mindspore as ms
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -49,7 +49,7 @@ def test_fallback_hasattr_custom_class():
assert res
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -70,7 +70,7 @@ def test_fallback_hasattr_asnumpy():
assert res
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import Tensor, jit, context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -39,7 +39,7 @@ def test_fallback_int():
assert res == 2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -23,7 +23,7 @@ import mindspore as ms
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -50,7 +50,7 @@ def test_fallback_isinstance():
assert not out[1]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -95,7 +95,7 @@ def test_fallback_isinstance_numpy_type():
assert out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -120,7 +120,7 @@ def test_fallback_isinstance_parameter():
assert out[0], out[1]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -164,7 +164,7 @@ class NetIsinstanceClass2:
return out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -220,7 +220,7 @@ def test_fallback_isinstance_jit_class_type_tuple():
assert not res[0] and not res[1]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import Tensor, jit, context, mutable
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -62,7 +62,7 @@ def test_len_numpy_string():
assert out[0] == 2, out[1] == 4
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -21,7 +21,7 @@ from mindspore import Tensor, jit, context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -44,7 +44,7 @@ def test_fallback_runtime_map():
assert isinstance(out, map)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -61,7 +61,7 @@ def check_output(output, patterns):
assert output.find(pattern) != -1, "Unexpected output:\n" + output + "\n--- pattern ---\n" + pattern
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -89,7 +89,7 @@ def test_fallback_print_asnumpy():
check_output(cap.output, patterns)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import context, Tensor
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -53,7 +53,7 @@ def test_round_cust_class():
assert out[0] == 100.91, out[1] == 100.91
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -19,7 +19,7 @@ from mindspore import jit, context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import Tensor, jit, context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -40,7 +40,7 @@ def test_fallback_runtime_str():
assert foo(x) == "[1 2 3]"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import Tensor, jit, context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -40,7 +40,7 @@ def test_fallback_runtime_type_numpy():
assert str(out) == "<class 'numpy.ndarray'>"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import Tensor, jit, context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -269,7 +269,7 @@ def test_print_validate_tuple():
@security_off_wrap
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -27,7 +27,7 @@ from mindspore import mutable, jit
ms.set_context(mode=ms.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -71,7 +71,7 @@ def test_getattr_list_with_wrong_attr():
assert "object has no attribute" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -93,7 +93,7 @@ def test_getattr_tuple_with_wrong_attr():
assert "object has no attribute" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -115,7 +115,7 @@ def test_getattr_dict_with_wrong_attr():
assert "object has no attribute" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -218,7 +218,7 @@ def test_pyexecute_with_scalar_input_4():
assert ret
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -260,7 +260,7 @@ def reduce_user_mul(x):
return out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -319,7 +319,7 @@ class CreateDynTensor(nn.Cell):
return output1 + output2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -376,7 +376,7 @@ class CreateDynTensorWithInputDtype(nn.Cell):
return output1 + output2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -403,7 +403,7 @@ class MakeTensorAsConstant(ms.nn.Cell):
return output1 + output2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -431,7 +431,7 @@ class MakeTensorWithShapeDtype(nn.Cell):
return output1 + output2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -492,7 +492,7 @@ def test_gelu():
assert np.all(res1.asnumpy() == res3.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -515,7 +515,7 @@ def test_np_save():
os.remove("x_data.npy")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -544,7 +544,7 @@ def test_np_save_with_args():
os.remove("data_from_args.npy")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -601,7 +601,7 @@ def test_np_save_with_call_kw2():
os.remove("data_from_kw_with_if.npy")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -664,7 +664,7 @@ def test_pyexecute_raise_error_with_dynamic_length_sequence_2():
assert "does not match the shape" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -46,7 +46,7 @@ class Net(ms.nn.Cell):
return self.np_function(a, b)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -64,7 +64,7 @@ def test_fallback_np():
np.testing.assert_almost_equal(output, const_output, 3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -126,7 +126,7 @@ def test_fallback_np_asnumpy_grad():
assert output == 0
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -22,7 +22,7 @@ from mindspore import mutable
ms.set_context(mode=ms.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -89,7 +89,7 @@ class SelfObjectGetattrNet(ms.nn.Cell):
return x
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -135,7 +135,7 @@ class GlobalObjectGetattrNet(ms.nn.Cell):
return x
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -156,7 +156,7 @@ def test_call_no_self_other_object_method_runtime():
assert np.all(result == z)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -219,7 +219,7 @@ class UNet(ms.nn.Cell):
return out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -237,7 +237,7 @@ def test_resolve_cust_class():
assert output == 200
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -274,7 +274,7 @@ class UserDefinedTupleNet:
return self.value * x, 100
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -302,7 +302,7 @@ class UserDefinedListNet:
return [self.value * x, 100]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -330,7 +330,7 @@ class UserDefinedDictNet:
return {"100": self.value * x}
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -348,7 +348,7 @@ def test_pyexecute_with_stub_tensor_3():
assert output["100"] == 100
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -384,7 +384,7 @@ def test_parser_fallback_nested_class_outer_grad():
assert output == 0
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -414,7 +414,7 @@ def test_create_custom_class_default():
assert out == 6
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -444,7 +444,7 @@ def test_create_custom_class_args():
assert out == 12
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_getattr_cust_class_const():

View File

@ -23,7 +23,7 @@ import mindspore.common.dtype as mstype
ms.set_context(mode=ms.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -90,7 +90,7 @@ def test_dict_return_2():
assert out == {'a': ms.Tensor(np.array(1), ms.int64)}
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -138,7 +138,7 @@ def test_dict_get_3():
assert out == {'y': ms.Tensor(np.array(1), ms.int64), 'a': 'a', 'b': 'c'}
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -483,7 +483,7 @@ def test_net_dict_2_grad():
assert np.allclose(outputs1.asnumpy(), outputs2.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -613,7 +613,7 @@ def test_nested_dict_with_parameter():
assert out2 == 2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -641,7 +641,7 @@ def test_return_nested_dict_with_parameter1():
@pytest.mark.skip('Not support list to PyExecute yet.')
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -668,7 +668,7 @@ def test_return_nested_dict_with_parameter2():
assert out == [{'params': [net.x, net.y], 'a': 1, 'b': False}, {'params': net.x, 'a': 2}]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -728,7 +728,7 @@ def test_nested_dict_with_parameter_constant2():
assert out2 == net.x
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -836,7 +836,7 @@ def test_return_nested_dict_with_parameter_constant4():
assert out == {'params': [net.x, net.y], 'a': 1, 'b': {'params': net.x, 'a': 2}}
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -860,7 +860,7 @@ def test_return_dict_with_dict_values():
assert out['x'] == x
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -932,7 +932,7 @@ def test_return_dict_in_if_else():
assert out == {"cba": x, "number": [3, 2, 1]}
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ import mindspore.common.dtype as mstype
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -36,7 +36,7 @@ class NumberNet(Cell):
@pytest.mark.skip(reason="Not support to change attribute of cell object")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -54,7 +54,7 @@ def test_change_net_number():
@pytest.mark.skip(reason="Not support to change attribute of cell object")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -84,7 +84,7 @@ class ListNet(Cell):
@pytest.mark.skip(reason="Not support to change attribute of cell object")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -102,7 +102,7 @@ def test_change_net_list():
@pytest.mark.skip(reason="Not support to change attribute of cell object")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -21,7 +21,7 @@ context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.skip(reason="No support yet.")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -27,7 +27,7 @@ context.set_context(mode=context.GRAPH_MODE)
global_list_1 = [1, 2, 3, 4]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -71,7 +71,7 @@ def test_global_list_used_in_graph_2():
global_numpy_list = [np.array([1, 2, 3]), np.array([4, 5, 6])]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -93,7 +93,7 @@ def test_global_numpy_list_used_in_graph():
global_list_2 = [1, 2, 3, 4, [3, 4], None]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -112,7 +112,7 @@ def test_global_nested_list_getitem_in_graph():
assert id(res) == id(global_list_2[4])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -195,7 +195,7 @@ def test_global_nested_list_return_in_graph_3():
assert id(res[1]) == id(global_list_3[3][1])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -342,7 +342,7 @@ def test_local_sequence_used_in_graph_with_operator():
global_list_for_reverse = [1, 2, 3, 4]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -419,7 +419,7 @@ def test_list_inplace_reverse_3():
global_list_for_reverse_4 = [[1, 2], [3, 4]]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -441,7 +441,7 @@ def test_list_inplace_reverse_element():
assert global_list_for_reverse_4 == [[2, 1], [3, 4]]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -489,7 +489,7 @@ def test_list_inplace_reverse_local_list_2():
global_list_for_pop = [1, 2, 3, 4]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -538,7 +538,7 @@ def test_list_inplace_pop_2():
assert id(out[1]) == id(global_list_for_pop_2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -612,7 +612,7 @@ def test_list_inplace_pop_local_3():
global_list_for_pop_extend = [1, 2, 3, 4]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -635,7 +635,7 @@ def test_list_inplace_extend():
global_list_for_pop_extend_2 = [1, 2, 3, 4]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -653,7 +653,7 @@ def test_list_inplace_extend_no_return():
assert global_list_for_pop_extend_2 == [1, 2, 3, 4, 1, 2, 3]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -680,7 +680,7 @@ def test_list_inplace_extend_local_list():
global_list_for_pop_insert = [1, 2, 3, 4]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -700,7 +700,7 @@ def test_list_inplace_insert():
assert id(out) == id(global_list_for_pop_insert)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -749,7 +749,7 @@ def test_list_inplace_insert_local_list_2():
assert out[1] == [("a", "b"), 1, 2, 3]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -814,7 +814,7 @@ def test_list_inplace_reverse_with_variable_3():
assert out == [3, 2, 1]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -860,7 +860,7 @@ def test_dynamic_len_list_inplace_op_2():
global_list_all_str = ['a', 'b', 'c']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -904,7 +904,7 @@ def test_list_inplace_with_all_str_2():
assert global_tuple_with_list_all_str == (['a', 'b', 'c', 'd', 'e'], 1, 2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -23,7 +23,7 @@ from mindspore import Tensor, mutable
ms.set_context(mode=ms.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -78,7 +78,7 @@ def test_fallback_add_meta_2():
assert ret == 7
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -217,7 +217,7 @@ def test_fallback_mul_meta():
assert np.all(ret.asnumpy() == np.array([10, 20, 30]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -271,7 +271,7 @@ def test_fallback_negative_meta_2():
assert np.allclose(ret.asnumpy(), Tensor([-11, -12, -13]).asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -334,7 +334,7 @@ def test_fallback_compare_meta_2():
assert ret == [True, True, True, True]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -420,7 +420,7 @@ def test_fallback_in_meta():
assert ret == (True, True)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -477,7 +477,7 @@ def test_fallback_meta_fg_not_support_type_in_3():
assert net()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -500,7 +500,7 @@ def test_fallback_meta_fg_not_support_type_add():
assert "unsupported operand type" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -523,7 +523,7 @@ def test_fallback_meta_fg_not_support_type_bitwise_and():
assert "unsupported operand type" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -546,7 +546,7 @@ def test_fallback_meta_fg_not_support_type_bitwise_or():
assert "unsupported operand type" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -592,7 +592,7 @@ def test_fallback_meta_fg_not_support_type_div():
assert "unsupported operand type" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -614,7 +614,7 @@ def test_fallback_meta_fg_not_support_type_equal():
assert res
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -637,7 +637,7 @@ def test_fallback_meta_fg_not_support_type_floordiv():
assert "unsupported operand type" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -706,7 +706,7 @@ def test_fallback_meta_fg_not_support_type_less_equal():
assert "'<=' not supported between" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -730,7 +730,7 @@ def test_fallback_meta_fg_not_support_type_aug_assign():
assert "For 'Sub', the 2th input var can not be implicitly converted" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -753,7 +753,7 @@ def test_fallback_meta_fg_not_support_type_less():
assert "'<' not supported between" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -775,7 +775,7 @@ def test_fallback_meta_fg_not_support_type_or():
assert res == [1, 2]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -798,7 +798,7 @@ def test_fallback_meta_fg_not_support_type_mod():
assert "unsupported operand type" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -844,7 +844,7 @@ def test_fallback_meta_fg_not_support_type_right_shift():
assert "unsupported operand type" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -867,7 +867,7 @@ def test_fallback_meta_fg_not_support_type_sub():
assert "unsupported operand type" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -890,7 +890,7 @@ def test_fallback_meta_fg_not_support_type_uadd():
assert "unsupported operand type" in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -911,7 +911,7 @@ def test_fallback_meta_fg_not_support_type_not():
assert res
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -933,7 +933,7 @@ def test_fallback_meta_fg_not_support_type_and():
assert res == 2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -1014,7 +1014,7 @@ def test_fallback_setitem_meta_2():
assert ret == [10, 2, 3, 4]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -64,7 +64,7 @@ def check_output(output, patterns):
assert output.find(pattern) != -1, "Unexpected output:\n" + output + "\n--- pattern ---\n" + pattern
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_none_compare():
@ -96,7 +96,7 @@ def test_none_compare():
check_output(cap.output, patterns)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -128,7 +128,7 @@ def test_none_is_sequence_input():
assert res == 7
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_inner_function_has_not_return():
@ -189,7 +189,7 @@ def test_inner_function_has_not_return_2():
assert res == 0
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_none_is_default_value_of_parameter():
@ -251,7 +251,7 @@ def test_none_is_default_value_of_parameter_2():
check_output(cap.output, patterns)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -282,7 +282,7 @@ def test_none_is_slice_in_list():
assert res == 0
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_none_assign_print():
@ -309,7 +309,7 @@ def test_none_assign_print():
check_output(cap.output, patterns)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -329,7 +329,7 @@ def test_none_is_input():
assert res is None
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -360,7 +360,7 @@ def test_none_is_condition():
check_output(cap.output, patterns)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -467,7 +467,7 @@ def test_none_is_output_of_function_with_side_effect_equal():
assert res == 4
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -490,7 +490,7 @@ def test_none_is_input_of_dict_return():
assert out == {'y': 'a', 'u': 9, 'v': False, 'w': None}
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -513,7 +513,7 @@ def test_none_nested_input_of_dict_return():
assert out == {'y': 'a', 'u': 9, 'v': False, 'w': (None, None), 'q': [1, (2, None), None]}
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -575,7 +575,7 @@ def test_none_is_input_of_tuple_return_2():
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -609,7 +609,7 @@ def test_none_is_return_of_sub_graph_control_flow():
check_output(cap.output, patterns)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -636,7 +636,7 @@ def test_none_is_return_of_sub_graph_control_flow_raise():
assert res.asnumpy() == 1
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -25,7 +25,7 @@ from mindspore import Tensor, jit, context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -165,7 +165,7 @@ def test_return_constant_list_7():
assert res == [1, "a", True, None, Tensor([2])]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -225,7 +225,7 @@ def test_return_make_list_node_3():
@pytest.mark.skip('backend not support different type in value tuple')
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -251,7 +251,7 @@ def test_return_make_list_node_4():
assert res[2] == (0, 1.0)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -308,7 +308,7 @@ def test_return_list_with_nest_3():
assert res == (([1, 1], [2, 2], (3, [4, 4])), [4, 5, 6])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -346,7 +346,7 @@ def test_return_make_list_with_nest_2():
assert res == ([Tensor([0]), ([Tensor([0]), 1],)], (Tensor([1]), Tensor([2])))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -384,7 +384,7 @@ def test_return_buildin_list_func_2():
assert res == [Tensor([1]), Tensor([2]), Tensor([3])]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -424,7 +424,7 @@ def test_return_dynamic_length_list_2():
assert res == [Tensor([0]), Tensor([1])]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -530,7 +530,7 @@ def test_return_list_from_dict_attribute_2():
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -551,7 +551,7 @@ def test_grad_for_return_list_graph():
assert np.allclose(res.asnumpy(), np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]).astype(np.float32))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -582,7 +582,7 @@ def test_grad_for_graph_with_list_input():
assert np.allclose(output[1].asnumpy(), expect[1])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -30,7 +30,7 @@ class GradNet(ms.nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_return_scalar():
@ -51,7 +51,7 @@ def test_return_scalar():
assert isinstance(out3, float) and abs(out3 - 11) < 1e-6
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_return_scalar_tuple():
@ -70,7 +70,7 @@ def test_return_scalar_tuple():
assert isinstance(out[2], int) and out[2] == 24
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -91,7 +91,7 @@ def test_builtin_int():
assert isinstance(out, int) and out == 1
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -112,7 +112,7 @@ def test_builtin_float():
assert isinstance(out, float) and math.isclose(out, 1, abs_tol=1e-5)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -133,7 +133,7 @@ def test_builtin_bool():
assert out is True
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -156,7 +156,7 @@ def test_builtin_scalar_grad():
assert out_grad == 0
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -178,7 +178,7 @@ def test_scalar_in_tuple_output():
assert isinstance(out[1], float) and math.isclose(out[1], 10, abs_tol=1e-5)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -199,7 +199,7 @@ def test_int_asnumpy():
assert out == 5
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -220,7 +220,7 @@ def test_int_asnumpy_calculation():
assert out == 6
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -241,7 +241,7 @@ def test_int_tensor_asnumpy_calculation():
assert out == 6
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -261,7 +261,7 @@ def test_int_mutable():
assert isinstance(out, int) and out == 1
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -281,7 +281,7 @@ def test_float_mutable():
assert isinstance(out, float) and math.isclose(out, 1, abs_tol=1e-5)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -304,7 +304,7 @@ def test_bool_condition():
assert out == 6
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -327,7 +327,7 @@ def test_int_condition():
assert out == 7
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -350,7 +350,7 @@ def test_float_condition():
assert out == 3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -373,7 +373,7 @@ def test_tensor_condition():
assert out == 10
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -393,7 +393,7 @@ def test_bool_asnumpy():
assert out is True
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -416,7 +416,7 @@ def test_bool_asnumpy_condition():
assert out == 10
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -437,7 +437,7 @@ def test_scalar_int_calculation():
assert isinstance(out, int) and out == 6
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -460,7 +460,7 @@ def test_combine_calculation():
assert isinstance(out[2], float) and abs(out[2] - 6.5) < 1e-6
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -486,7 +486,7 @@ def test_scalar_in_inner_function():
assert out == 11
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -512,7 +512,7 @@ def test_scalar_tuple_in_inner_function():
assert out[0] == 11
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_scalar_in_list():
@ -530,7 +530,7 @@ def test_scalar_in_list():
assert out == [1, 2, 3, 4]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_scalar_in_dict():
@ -549,7 +549,7 @@ def test_scalar_in_dict():
assert isinstance(out.get('y'), int)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_scalar_in_dict_with_int_value():
@ -567,7 +567,7 @@ def test_scalar_in_dict_with_int_value():
assert out == {'x': 1, 'y': 2}
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_scalar_in_dict_with_tuple_value():
@ -585,7 +585,7 @@ def test_scalar_in_dict_with_tuple_value():
assert out == {'x': (1, 2), 'y': (3, 4)}
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_scalar_in_dict_with_empty_tuple():

View File

@ -24,7 +24,7 @@ from mindspore import Tensor, Parameter, jit, jit_class
ms.set_context(mode=ms.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -102,7 +102,7 @@ def test_setattr_self_non_param_3():
assert np.all(test_net.data == np.array([1, 2, 3, 4]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -131,7 +131,7 @@ def test_setattr_self_repeat():
assert test_net.data == 3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -157,7 +157,7 @@ def test_setattr_self_non_param_not_used():
assert test_net.data == 2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -214,7 +214,7 @@ class AssignTarget:
self.x = 1
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -298,7 +298,7 @@ class OuterAssignTarget:
self.a = NestedAssignTarget()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -371,7 +371,7 @@ def test_setattr_local_object_attr():
assert np.all(res[1] == np.array([[1, 2], [3, 4]]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -458,7 +458,7 @@ def test_setattr_run_multiple_times_3():
assert np.all(ret3 == np.array([4, 5, 6]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -492,7 +492,7 @@ def test_setattr_with_augassign():
assert ret == 13
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -547,7 +547,7 @@ def test_setattr_in_control_flow_2():
assert ret == 4
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -18,7 +18,7 @@ from mindspore import context, jit
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -51,7 +51,7 @@ class UNet(ms.nn.Cell):
return out, self.para + 10
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fallback_side_effect_assign():
@ -133,7 +133,7 @@ def test_fallback_side_effect_dict_2():
assert out[1] == {'a': 1, 'b': 4, 'c': 3, 'd': 22}
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fallback_side_effect_nested_net():
@ -175,7 +175,7 @@ def test_fallback_side_effect_nested_net():
assert output == 52
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fallback_control_flow():
@ -207,7 +207,7 @@ def test_fallback_control_flow():
assert out[2] == {'a': 1, 'b': 2}
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fallback_side_effect_asnumpy():
@ -330,7 +330,7 @@ class PrintPyExecuteNet(ms.nn.Cell):
return out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -348,7 +348,7 @@ def test_print_pyexecute():
assert output == 200
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -371,7 +371,7 @@ def test_fallback_dtype_is_cond():
assert out == 0
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_if_after_for_in_if_numpy():

View File

@ -29,7 +29,7 @@ from . import utils
ms.set_context(mode=ms.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -49,7 +49,7 @@ def test_relative_import():
assert func(x, y) == 5
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -69,7 +69,7 @@ def test_operator_add():
assert func(x, y) == 3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -92,7 +92,7 @@ def add_func(x, y):
return x + y
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -116,7 +116,7 @@ def test_functools_partial():
assert np.all(out.asnumpy() == np.array([5, 7, 9]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -136,7 +136,7 @@ def test_np_logspace_func():
assert np.allclose(out, expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -158,7 +158,7 @@ def test_scipy_concatenate():
assert np.all(out == expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -26,7 +26,7 @@ from mindspore.common import mutable
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -49,7 +49,7 @@ def test_return_interpret_object():
assert np.all(output[2] == np.array([1, 2, 3, 4]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -71,7 +71,7 @@ def test_raise_error_in_variable_scene():
assert output == Tensor([1])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -91,7 +91,7 @@ def test_str_format_in_variable_scene():
assert output == "[2], [1]"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -112,7 +112,7 @@ def test_numpy_asarray_with_variable_scene():
assert np.all(output == np.array([1, 2]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -132,7 +132,7 @@ def test_in_with_none():
assert foo()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -217,7 +217,7 @@ def test_slice_with_mutable_input():
assert foo() == 1
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -264,7 +264,7 @@ def test_star_to_compress_input():
assert ret[1] == [2, 3, 4]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -333,7 +333,7 @@ def test_starred_to_unpack_input():
assert ret == "output is (1, 2, 3, 4)"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -357,7 +357,7 @@ def test_call_third_party_class():
assert out == deque([4, 3, 2, 1])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -384,7 +384,7 @@ def test_np_ix_with_variable():
assert (ret == [[0], [1], [2], [3], [4]]).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -406,7 +406,7 @@ def test_generate_tensor_using_variable_numpy_array():
assert (out == Tensor([0, 1])).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -46,7 +46,7 @@ def check_fusion_op_in_ir(ir_dir):
return False
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -487,7 +487,7 @@ def test_forward_with_parameter():
assert np.allclose(out[1].asnumpy(), expect_dy)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_forward_with_parameter_in_sub_cell():
@ -650,7 +650,7 @@ def test_pynative_forward_with_parameter():
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_pynative_forward_with_parameter_in_sub_cell():
@ -768,7 +768,7 @@ def test_pynative_forward_with_parameter_in_sub_cell_get_by_list():
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dde_self_define_cell_output_not_use():
@ -811,7 +811,7 @@ def test_dde_self_define_cell_output_not_use():
assert out[0] == ms.Tensor([3])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_bprop_defined_in_cell_attr_register():

View File

@ -366,7 +366,7 @@ def test_custom_vjp_fn_with_net():
assert np.allclose(out[1].asnumpy(), expect_dy)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_custom_vjp_forward_net_call_fn():

View File

@ -25,7 +25,7 @@ from mindspore import dtype as mstype
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -66,7 +66,7 @@ def test_backward_return_dict():
assert np.allclose(output['a'].asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -107,7 +107,7 @@ def test_forward_contain_make_dict_and_dict_getitem():
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -147,7 +147,7 @@ def test_forward_return_dict():
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -189,7 +189,7 @@ def test_forward_return_dict_backward_return_dict():
assert np.allclose(output['a'].asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -232,7 +232,7 @@ def test_forward_contain_make_dict_and_dict_getitem_backward_return_dict():
assert np.allclose(output['a'].asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -276,7 +276,7 @@ def test_forward_contain_make_dict_and_dict_setitem_backward_return_dict1():
assert np.allclose(output['a'].asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -64,7 +64,7 @@ def test_jvp_single_input_single_output_default_v_graph(mode):
assert np.allclose(grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@ -114,7 +114,7 @@ def test_jvp_single_input_multiple_outputs_default_v_graph(mode):
assert np.allclose(grad[1].asnumpy(), expect_grad_1.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@ -165,7 +165,7 @@ def test_jvp_multiple_inputs_single_output_default_v_graph(mode):
assert np.allclose(grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])

View File

@ -32,7 +32,7 @@ class MultipleInputsOutputNet(nn.Cell):
return 2 * x, y ** 3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@ -170,7 +170,7 @@ def test_vjp_construct_single_input_single_output_default_v_graph(mode):
assert np.allclose(gradient[0].asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])

View File

@ -79,7 +79,7 @@ def grad_wrap_with_msfunction_get_grad(x, y, z):
return output
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_single_input_single_output_cell_graph():
@ -95,7 +95,7 @@ def test_grad_single_input_single_output_cell_graph():
assert np.allclose(real_grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_single_input_multiple_outputs_cell_graph():
@ -111,7 +111,7 @@ def test_grad_single_input_multiple_outputs_cell_graph():
assert np.allclose(real_grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_multiple_inputs_single_output_cell_graph():
@ -133,7 +133,7 @@ def test_grad_multiple_inputs_single_output_cell_graph():
assert np.allclose(real_grad[1].asnumpy(), expect_grad2.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_multiple_inputs_multiple_outputs_cell_graph():
@ -155,7 +155,7 @@ def test_grad_multiple_inputs_multiple_outputs_cell_graph():
assert np.allclose(real_grad[1].asnumpy(), expect_grad2.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_iteration_function_graph():
@ -176,7 +176,7 @@ def test_grad_iteration_function_graph():
assert np.allclose(real_grad[1].asnumpy(), expect_grad2.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_wrap_with_msfunction_graph():
@ -193,7 +193,7 @@ def test_grad_wrap_with_msfunction_graph():
assert np.allclose(real_grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_with_grad_position_twice_graph():
@ -212,7 +212,7 @@ def test_grad_with_grad_position_twice_graph():
assert isinstance(out2, tuple)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_with_weights_twice_graph():
@ -234,7 +234,7 @@ def test_grad_with_weights_twice_graph():
assert np.allclose(out2[0].asnumpy(), expect2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_with_weights_has_aux_graph():
@ -310,7 +310,7 @@ def test_jit_function_grad_with_weights_has_aux_graph():
assert np.allclose(aux[1].asnumpy(), expect_aux2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_construct_grad_with_weights_has_aux_graph():
@ -355,7 +355,7 @@ def test_construct_grad_with_weights_has_aux_graph():
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_if_with_weights_has_aux_graph():
@ -394,7 +394,7 @@ def test_grad_if_with_weights_has_aux_graph():
assert np.allclose(aux[1].asnumpy(), expect_aux2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -438,7 +438,7 @@ def test_grad_nest_with_weights_has_aux_graph():
assert np.allclose(aux[0].asnumpy(), expect_aux)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_if_ith_train_one_step():
@ -500,7 +500,7 @@ def test_grad_if_ith_train_one_step():
train_one_if_net(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_net_d_net_g():
@ -594,7 +594,7 @@ def test_grad_net_d_net_g():
train_one_net(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_value_and_grad_with_weights_has_aux_graph():
@ -632,7 +632,7 @@ def test_value_and_grad_with_weights_has_aux_graph():
assert np.allclose(gradient[1][1].asnumpy(), expect_grad_weight2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_construct_value_and_grad_with_weights_has_aux_graph():
@ -678,7 +678,7 @@ def test_construct_value_and_grad_with_weights_has_aux_graph():
assert np.allclose(gradient[1][0].asnumpy(), expect_grad_weight1)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_value_and_grad_nest_with_weights_graph():
@ -767,7 +767,7 @@ def test_value_and_grad_nest_with_weights_has_aux_graph():
assert np.allclose(gradient[1][1].asnumpy(), expect_grad_weight2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_construct_grad_single_position_with_return_ids():
@ -848,7 +848,7 @@ def test_construct_grad_multiplt_positions_with_return_ids():
assert np.allclose(res[1][0], expect_grad_input2[0])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_construct_grad_with_weights_with_return_ids():
@ -890,7 +890,7 @@ def test_construct_grad_with_weights_with_return_ids():
assert res[1][0][0] == inner_net.w.name
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_construct_get_grad_by_position():
@ -968,7 +968,7 @@ def test_construct_get_grad_by_weight():
assert np.allclose(grad_out.asnumpy(), expect_grad_input)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_construct_get_grad_not_found():
@ -1044,7 +1044,7 @@ def test_construct_get_grad_not_found_from_empty_tuple():
grad_net(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_get_grad_wrap_with_msfunction_graph():
@ -1061,7 +1061,7 @@ def test_get_grad_wrap_with_msfunction_graph():
assert np.allclose(real_grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_primal_graph_call_others():
@ -1089,7 +1089,7 @@ def test_grad_primal_graph_call_others():
assert np.allclose(output.asnumpy(), expected.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_get_grad_outer_list_weight():

View File

@ -117,7 +117,7 @@ def test_grad_single_input_single_output_cell_pynative():
assert np.allclose(real_grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_single_input_multiple_outputs_cell_pynative():
@ -133,7 +133,7 @@ def test_grad_single_input_multiple_outputs_cell_pynative():
assert np.allclose(real_grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_multiple_inputs_single_output_cell_pynative():
@ -155,7 +155,7 @@ def test_grad_multiple_inputs_single_output_cell_pynative():
assert np.allclose(real_grad[1].asnumpy(), expect_grad2.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_multiple_inputs_multiple_outputs_cell_pynative():
@ -177,7 +177,7 @@ def test_grad_multiple_inputs_multiple_outputs_cell_pynative():
assert np.allclose(real_grad[1].asnumpy(), expect_grad2.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_iteration_function_pynative():
@ -198,7 +198,7 @@ def test_grad_iteration_function_pynative():
assert np.allclose(real_grad[1].asnumpy(), expect_grad2.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_wrap_with_msfunction_pynative():
@ -215,7 +215,7 @@ def test_grad_wrap_with_msfunction_pynative():
assert np.allclose(real_grad.asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_vmap_pynative():
@ -244,7 +244,7 @@ def test_grad_vmap_pynative():
assert np.allclose(outputs.asnumpy(), expect_value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_with_grad_position_twice_pynative():
@ -285,7 +285,7 @@ def test_grad_with_weights_twice_pynative():
assert np.allclose(out2[0].asnumpy(), expect2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_with_weights_has_aux_pynative():
@ -321,7 +321,7 @@ def test_grad_with_weights_has_aux_pynative():
assert np.allclose(aux[1].asnumpy(), expect_aux2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_if_with_weights_has_aux_pynative():
@ -360,7 +360,7 @@ def test_grad_if_with_weights_has_aux_pynative():
assert np.allclose(aux[1].asnumpy(), expect_aux2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -404,7 +404,7 @@ def test_grad_nest_with_weights_has_aux_pynative():
assert np.allclose(aux[0].asnumpy(), expect_aux)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_value_and_grad_with_weights_has_aux_pynative():
@ -442,7 +442,7 @@ def test_value_and_grad_with_weights_has_aux_pynative():
assert np.allclose(gradient[1][1].asnumpy(), expect_grad_weight2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_value_and_grad_nest_with_weights_pynative():
@ -485,7 +485,7 @@ def test_value_and_grad_nest_with_weights_pynative():
assert np.allclose(gradient[1][1].asnumpy(), expect_grad_weight2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -531,7 +531,7 @@ def test_value_and_grad_nest_with_weights_has_aux_pynative():
assert np.allclose(gradient[1][1].asnumpy(), expect_grad_weight2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_single_input_single_output_cell_graph_with_return_ids_pynative():
@ -548,7 +548,7 @@ def test_grad_single_input_single_output_cell_graph_with_return_ids_pynative():
assert np.allclose(real_grad[0], expect_grad[0])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_single_input_multiple_outputs_cell_graph_with_return_ids_pynative():
@ -565,7 +565,7 @@ def test_grad_single_input_multiple_outputs_cell_graph_with_return_ids_pynative(
assert np.allclose(real_grad[0], expect_grad[0])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_multiple_inputs_single_output_cell_graph_with_return_ids_pynative():
@ -589,7 +589,7 @@ def test_grad_multiple_inputs_single_output_cell_graph_with_return_ids_pynative(
assert np.allclose(real_grad[1][0], expect_grad2[0])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_multiple_inputs_multiple_outputs_cell_graph_with_return_ids_pynative():
@ -614,7 +614,7 @@ def test_grad_multiple_inputs_multiple_outputs_cell_graph_with_return_ids_pynati
assert np.allclose(real_grad[1][0], expect_grad2[0])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_iteration_function_graph_with_return_ids_pynative():
@ -640,7 +640,7 @@ def test_grad_iteration_function_graph_with_return_ids_pynative():
assert np.allclose(real_grad[1][0], expect_grad2[0])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_with_weights_with_return_ids_pynative():
@ -713,7 +713,7 @@ def test_jit_function_grad_with_weights_return_ids():
assert res[1][0][0] == weights[0].name
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_construct_grad_with_weights_with_return_ids():
@ -755,7 +755,7 @@ def test_construct_grad_with_weights_with_return_ids():
assert res[1][0][0] == weights[0].name
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_get_grad_by_position_pynative():
@ -784,7 +784,7 @@ def test_get_grad_by_position_pynative():
assert np.allclose(grad_out.asnumpy(), expect_grad_input)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_get_grad_with_parameter_pynative():
@ -813,7 +813,7 @@ def test_get_grad_with_parameter_pynative():
assert np.allclose(grad_out.asnumpy(), expect_grad_weight1)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_get_grad_not_found_pynative():
@ -841,7 +841,7 @@ def test_get_grad_not_found_pynative():
get_grad(res, 1)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_outer_list_weight():
@ -883,7 +883,7 @@ def test_grad_outer_list_weight():
assert np.allclose(out[0].asnumpy(), expect_value.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_with_only_input():
@ -921,7 +921,7 @@ def test_grad_with_only_parameter():
np.testing.assert_almost_equal(gradients[0].asnumpy(), expect_grad.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_grad_squence_out():

View File

@ -113,7 +113,7 @@ def test_jvp_single_input_multiple_outputs_default_v_graph(mode):
assert np.allclose(grad[1].asnumpy(), expect_grad_1.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@ -217,7 +217,7 @@ def test_jvp_multiple_inputs_multiple_outputs_default_v_graph(mode):
assert np.allclose(grad[1].asnumpy(), expect_grad_1.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])

View File

@ -19,7 +19,7 @@ from mindspore import context, jit
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -19,7 +19,7 @@ import mindspore as ms
from mindspore import jit, nn, Tensor, context
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_if_branch_have_two_return():
@ -71,7 +71,7 @@ def test_if_branch_has_one_return():
assert "return 0" in str(ex.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_if_branch_has_no_return():
@ -97,7 +97,7 @@ def test_if_branch_has_no_return():
assert "a = 0" in str(ex.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_while_body_has_return():
@ -120,7 +120,7 @@ def test_while_body_has_return():
assert "return 0" in str(ex.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_switch_layer_join_failed():

View File

@ -25,7 +25,7 @@ class Net2(Cell):
return tuple(a)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_pynative_list_slice_tensor_no_step():
@ -53,7 +53,7 @@ def test_pynative_list_slice_tensor_no_step():
assert graph_out == python_out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_pynative_list_slice_tensor_with_step():
@ -81,7 +81,7 @@ def test_pynative_list_slice_tensor_with_step():
assert python_out == graph_out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_graph_list_slice_assign_extended_number():

View File

@ -20,7 +20,7 @@ from mindspore import context, jit
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_list_mul_non_integer_number():

View File

@ -104,7 +104,7 @@ def test_and_bool_tensor_2():
assert ret == 1
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -22,7 +22,7 @@ from mindspore import context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -88,7 +88,7 @@ def test_bitwise_and_3():
assert np.allclose(result.asnumpy(), np.array([1, 0, 0]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -220,7 +220,7 @@ def test_bitwise_xor_3():
assert np.allclose(result.asnumpy(), np.array([0, 3, -3]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard

View File

@ -20,7 +20,7 @@ from mindspore import Tensor, context, nn
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import Tensor, context, nn, jit
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -66,7 +66,7 @@ def test_fallback_all_list_hybrid():
assert x and (not y) and (not z)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -89,7 +89,7 @@ def test_fallback_any_tensor():
assert (not out1) and out2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -21,7 +21,7 @@ from mindspore import Tensor, jit, context, jit_class
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -42,7 +42,7 @@ def test_getattr_tensor():
assert np.all(out.asnumpy() == np.array([1, 2, 3]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -74,7 +74,7 @@ class MSClass1:
self.none = None
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -129,7 +129,7 @@ def test_getattr_cell_obj_2():
assert out is None
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -154,7 +154,7 @@ def test_getattr_for_classtype_object():
assert out2 == 10
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import jit, context, Tensor
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -41,7 +41,7 @@ def test_len_numpy_with_variable():
assert out == 14
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -83,7 +83,7 @@ def test_len_dict_with_variable():
assert out == 14
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import Tensor, context, jit
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -76,7 +76,7 @@ def test_fallback_list_with_input_constant_tensor_2():
assert np.allclose(out[2].asnumpy(), np.array([5, 6]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -97,7 +97,7 @@ def test_builtin_function_list_with_non_constant_tensor():
assert np.all(ret[1].asnumpy() == np.array([4, 5, 6]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -147,7 +147,7 @@ def test_fallback_tuple_with_input_constant_tensor_2():
assert np.allclose(out[1].asnumpy(), np.array([3, 4]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -22,7 +22,7 @@ from mindspore.common import mutable
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -182,7 +182,7 @@ def test_builtin_function_min_with_tensor_1d(mode):
assert res == 1
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -206,7 +206,7 @@ def test_builtin_function_max_min_with_tuple_with_variable(mode):
assert res[1] == 1
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -21,7 +21,7 @@ from mindspore import nn, context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_partial_key_ward_arg_and_pos_arg_const_multi_assign_x():

View File

@ -21,7 +21,7 @@ from mindspore import dtype as mstype
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -44,7 +44,7 @@ def test_fallback_round_tensor():
np.testing.assert_almost_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -21,7 +21,7 @@ from mindspore import dtype as mstype
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -63,7 +63,7 @@ def test_fallback_sum_tensor_n_default_2():
assert np.allclose(out.asnumpy(), np.array([4, 6]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -105,7 +105,7 @@ def test_fallback_sum_with_x_tensor_n_not_default_2():
assert np.allclose(out.asnumpy(), np.array([9, 12]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -126,7 +126,7 @@ def test_fallback_sum_with_x_list_of_tensor():
assert np.allclose(out.asnumpy(), np.array([[4, 6], [8, 10]]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -148,7 +148,7 @@ def test_fallback_sum_with_tensor_0d(mode):
net()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import jit, context, Tensor
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -39,7 +39,7 @@ def test_fallback_type():
assert str(out) == "<class 'dict'>"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -59,7 +59,7 @@ def test_fallback_type_with_input_int():
assert str(out) == "<class 'int'>"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -79,7 +79,7 @@ def test_fallback_type_with_input_float():
assert str(out) == "<class 'float'>"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -99,7 +99,7 @@ def test_fallback_type_with_input_list():
assert str(out) == "<class 'list'>"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -119,7 +119,7 @@ def test_fallback_type_with_input_tuple():
assert str(out) == "<class 'tuple'>"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -139,7 +139,7 @@ def test_fallback_type_with_input_dict():
assert str(out) == "<class 'dict'>"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -159,7 +159,7 @@ def test_fallback_type_with_input_numpy_array():
assert str(out) == "<class 'numpy.ndarray'>"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -19,7 +19,7 @@ from mindspore import nn, context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_assert1():

View File

@ -26,7 +26,7 @@ from mindspore.ops.operations._inner_ops import TopTypeof
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_raise_with_variable_1():
@ -169,7 +169,7 @@ def test_raise_with_variable_tuple_2():
raise_info_string_tuple.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_raise_with_variable_joinedstr_tensor():
@ -293,7 +293,7 @@ class CheckNet(ms.nn.Cell):
return x
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_isolated_raise():
@ -310,7 +310,7 @@ def test_isolated_raise():
assert "Check failed. Wrong shape," in str(err.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_list_in_control_flow():
@ -408,7 +408,7 @@ def test_raise_parse_with_interpret():
assert "x:" in str(raise_info_joinedstr_tensor.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_raise_parse_with_interpret_2():
@ -430,7 +430,7 @@ def test_raise_parse_with_interpret_2():
assert net(input_x, input_y, input_z) is None
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_raise_with_input_error_type_1():
@ -475,7 +475,7 @@ def test_raise_with_input_error_type_2():
assert "The input can not be 11." in str(raise_info.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -546,7 +546,7 @@ class CellInList(nn.Cell):
return self.cell_list[index](x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -567,7 +567,7 @@ def test_cell_in_list():
assert ret
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -591,7 +591,7 @@ def test_raise_constant_folding():
assert "The input can not be 11." in str(raise_info_constant.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -615,7 +615,7 @@ def test_raise_constant_folding_int64():
assert "The input can not be 11." in str(raise_info_constant_int64.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -662,7 +662,7 @@ def judge_tuple_index_dim(data, tuple_index, x):
judge_tuple_index_dim_check_error(index_dim, data_dim, x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -22,7 +22,7 @@ from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -46,7 +46,7 @@ def test_list_comprehension_with_variable_tensor():
assert res[2] == 4
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -70,7 +70,7 @@ def test_list_comprehension_with_variable_dict():
assert res[1] == 1
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -52,7 +52,7 @@ class Sample2:
return self.num * 5
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -22,7 +22,7 @@ from mindspore import Tensor, context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensorarray_clear():

View File

@ -22,7 +22,7 @@ import mindspore.ops.operations as P
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_joinedstr_basic_variable_gpu():
@ -45,7 +45,7 @@ def test_joinedstr_basic_variable_gpu():
assert out == f"res: {result_tensor}"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -70,7 +70,7 @@ def test_joinedstr_basic_variable_ascend():
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -94,7 +94,7 @@ def test_joinedstr_basic_variable_2():
assert str(out) == "[1 2 3 4 5]"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -19,7 +19,7 @@ from mindspore import Tensor, Parameter
from mindspore.nn import Cell
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -53,7 +53,7 @@ def test_hyper_param():
assert output == output_expect
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

Some files were not shown because too many files have changed in this diff Show More