take down testcases that are passing all the time

This commit is contained in:
yanghaoran 2022-05-27 20:51:54 +08:00
parent 4cbfd148eb
commit e1d3e6edac
116 changed files with 214 additions and 215 deletions

View File

@ -144,6 +144,7 @@
"mindspore/tests/st/pynative/parser/test_parser_construct.py" "bad-super-call"
"mindspore/tests/ut/python/optimizer/test_auto_grad.py" "broad-except"
"mindspore/tests/st/fallback/control_flow/test_fallback_100_if_after_if.py" "unused-variable"
"mindspore/tests/st/numpy_native/test_array_ops.py" "useless-super-delegation"
#MindSpore Lite
"mindspore/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/experimental/HPC-generator/generator.py" "redefined-builtin"

View File

@ -98,7 +98,7 @@ class ImageInversionAttack:
return x_grad
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -108,7 +108,7 @@ class GradWrapWithLoss(Cell):
return gout[0]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -127,7 +127,7 @@ def test_grad_values_and_infer_shape():
assert out_shape == (64, 10), 'output shape should be (64, 10)'
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -308,7 +308,7 @@ def test_print_assign_if():
check_output(cap.output, patterns)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -408,7 +408,7 @@ def test_assign_read_after_write():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -639,7 +639,7 @@ def _check_shape(shape):
raise ValueError(f"Invalid shape {shape}")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -669,7 +669,7 @@ def test_constexpr_check():
print(out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -854,7 +854,7 @@ def test_assign_while_if():
np.testing.assert_almost_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -920,7 +920,7 @@ def test_assign_return_true():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -957,7 +957,7 @@ def test_unpack_call():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -996,7 +996,7 @@ def test_tuple_of_tuple():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -1025,7 +1025,7 @@ def test_write_read_write():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -1410,7 +1410,7 @@ def test_if_cast():
np.testing.assert_array_equal(r1.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -1437,7 +1437,7 @@ def test_while_forward():
assert np.allclose(output.asnumpy(), expect, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -1477,7 +1477,7 @@ def test_multi_add_assign():
np.testing.assert_array_equal(outputs, expects)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -60,7 +60,7 @@ def allclose_nparray(data_expected, data_me, rtol, atol, equal_nan=True):
assert True
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -114,7 +114,7 @@ class AutoMonadTwoAssignTwoAddnDependencyBenchmarkNet(Cell):
return out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -270,7 +270,7 @@ class ControlOneIfOneParaOneAddn(Cell):
return out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -626,7 +626,7 @@ class HighGrad(Cell):
return self.final_grad(*inputs)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -675,7 +675,7 @@ class SideEffectControlFlowAssignDependWhileNet(Cell):
return grad_out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard

View File

@ -400,7 +400,7 @@ def test_summary():
assert tags == {'tensor', 'histogram', 'scalar', 'image'}
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -539,7 +539,7 @@ class FusedSparseLazyAdamNet(nn.Cell):
return self.var, self.m, self.v
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -371,7 +371,7 @@ class DropoutGenMaskNet(nn.Cell):
return s1, s2, s3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -73,7 +73,7 @@ def test_float_not_overflow():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -43,7 +43,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -57,7 +57,7 @@ def test_forward():
assert expect == output
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -170,7 +170,7 @@ def test_if_in_if():
control_flow_if_in_if(IfInIfNet, x, expect1, expect2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -182,7 +182,7 @@ def test_if_in_if_01():
control_flow_if_in_if(IfInIfNet1, x, expect1, expect2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -194,7 +194,7 @@ def test_if_in_if_02():
control_flow_if_in_if(IfInIfNet2, x, expect1, expect2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -206,7 +206,7 @@ def test_if_in_if_03():
control_flow_if_in_if(IfInIfNet3, x, expect1, expect2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -110,7 +110,7 @@ def test_backward():
assert graph_grads == Tensor(np.array(21), mstype.int32)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -126,7 +126,7 @@ def test_forward_replace_break():
# Problem: Exceed function call depth limit 1000.
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -49,7 +49,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -64,7 +64,7 @@ def test_forward():
assert graph_out == Tensor(np.array(9), mstype.int32)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -57,7 +57,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_forward_gpu():
@ -71,7 +71,7 @@ def test_forward_gpu():
assert graph_mode_out == Tensor(np.array(9), mstype.int32)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -24,7 +24,7 @@ from mindspore.common import dtype as mstype
grad_all = C.GradOperation(get_all=True)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -71,7 +71,7 @@ def test_for_in_if_01():
assert graph_backward_res == (Tensor([64], mstype.int32),)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -122,7 +122,7 @@ def test_for_in_if_02():
assert graph_backward_res == (Tensor([1], mstype.float32),)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -138,7 +138,7 @@ def control_flow_if_after_if(input_net, x, y, expect1, expect2):
assert graph_backward_res == expect2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -151,7 +151,7 @@ def test_if_after_if():
control_flow_if_after_if(IfAfterIfNet, x, y, expect1, expect2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -164,7 +164,7 @@ def test_if_after_if_01():
control_flow_if_after_if(IfAfterIfNet1, x, y, expect1, expect2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -177,7 +177,7 @@ def test_if_after_if_02():
control_flow_if_after_if(IfAfterIfNet2, x, y, expect1, expect2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -141,7 +141,7 @@ def control_flow_if_after_if_in_if(input_net, x, expect1, expect2):
assert graph_backward_res == expect2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -176,7 +176,7 @@ def test_if_after_if_in_if_02():
control_flow_if_after_if_in_if(IfAfterIfInIfNet2, x, expect1, expect2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -187,7 +187,7 @@ def test_if_after_if_in_if_02_ascend():
control_flow_if_after_if_in_if(IfAfterIfInIfNet2, x, expect1, expect2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -58,7 +58,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -74,7 +74,7 @@ def test_forward():
assert graph_mode_out == Tensor(np.array(21), mstype.int32)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -58,7 +58,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -74,7 +74,7 @@ def test_forward():
assert graph_mode_out == (Tensor(np.array(-11), mstype.int32), Tensor(np.array(9), mstype.int32))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -61,7 +61,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -77,7 +77,7 @@ def test_forward():
assert graph_mode_out == (Tensor(np.array(-11), mstype.int32), Tensor(np.array(9), mstype.int32))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -57,7 +57,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -73,7 +73,7 @@ def test_forward():
assert graph_mode_out == Tensor(np.array(9), mstype.int32)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -22,7 +22,7 @@ from mindspore.common.parameter import Parameter
grad_all = C.GradOperation(get_all=True)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -22,7 +22,7 @@ from mindspore.common.parameter import Parameter
grad_all = C.GradOperation(get_all=True)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -56,7 +56,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -71,7 +71,7 @@ def test_forward():
assert graph_mode_out == Tensor(np.array(30), mstype.int32)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -53,7 +53,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -55,7 +55,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -58,7 +58,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -61,7 +61,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -77,7 +77,7 @@ def test_forward():
assert graph_mode_out == (Tensor(np.array(121), mstype.int32), Tensor(np.array(10), mstype.int32))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -58,7 +58,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -59,7 +59,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -58,7 +58,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -56,7 +56,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -56,7 +56,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -55,7 +55,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -22,7 +22,7 @@ from mindspore.common.parameter import Parameter
grad_all = C.GradOperation(get_all=True)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -53,7 +53,7 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -68,7 +68,7 @@ def test_forward():
assert graph_out == Tensor(np.array(18), mstype.int32)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -22,7 +22,7 @@ from mindspore.common.parameter import Parameter
grad_all = C.GradOperation(get_all=True)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -70,7 +70,7 @@ def test_for_after_if_in_if():
assert graph_backward_res == (Tensor(55, mstype.int32),)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -24,7 +24,7 @@ from mindspore.common import dtype as mstype
grad_all = C.GradOperation(get_all=True)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -93,7 +93,7 @@ def test_for_after_while_in_if_01():
assert graph_backward_res == (Tensor([0], mstype.int32), Tensor([0], mstype.int32))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -24,7 +24,7 @@ from mindspore.common import dtype as mstype
grad_all = C.GradOperation(get_all=True)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -85,7 +85,7 @@ def test_for_after_for_in_for_01():
assert graph_backward_res == (Tensor([0], mstype.int32), Tensor([1], mstype.int32))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -296,7 +296,7 @@ def test_if_by_if_by_if():
assert np.allclose(expect, output.asnumpy(), 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -329,7 +329,7 @@ def test_mixed_while_if():
assert np.allclose(expect, output.asnumpy(), 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -73,7 +73,7 @@ def test_while_grad():
assert np.allclose(graph_output[2].asnumpy(), expect_two, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -110,7 +110,7 @@ def test_while_with_const_param_grad():
assert np.allclose(graph_output[1].asnumpy(), expect_two, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -180,7 +180,7 @@ def test_while_with_param_forward():
assert np.allclose(graph_output.asnumpy(), expect, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -256,7 +256,7 @@ def test_while_with_param_grad():
assert np.allclose(graph_output[0].asnumpy(), expect, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -344,7 +344,7 @@ def test_while_opt_endless():
assert np.allclose(graph_output[2].asnumpy(), expect3, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -379,7 +379,7 @@ def test_no_while_call():
assert np.allclose(graph_output.asnumpy(), expect, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -425,7 +425,7 @@ def test_while_with_param_grad_with_const_branch():
assert np.allclose(graph_output[0].asnumpy(), expect, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -519,7 +519,7 @@ def test_for_while_with_param_grad_basic():
assert np.allclose(graph_output[0].asnumpy(), expect, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -564,7 +564,7 @@ def test_for_while_with_param_grad_normal():
assert np.allclose(graph_output[0].asnumpy(), expect, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -648,7 +648,7 @@ def test_while_with_param_basic_grad_mul():
assert np.allclose(graph_output[0].asnumpy(), expect, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -789,7 +789,7 @@ def test_while_if_with_param_grad():
assert np.allclose(graph_output[0].asnumpy(), expect, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -829,7 +829,7 @@ def test_while_with_param_grad_not_enter_while():
assert np.allclose(graph_output[0].asnumpy(), 1, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -866,7 +866,7 @@ def test_with_param_if_by_if_forward():
assert np.allclose(graph_output.asnumpy(), expect, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -911,7 +911,7 @@ def test_with_param_if_by_if_grad_inputs():
assert np.allclose(graph_output[2].asnumpy(), expect3, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -954,7 +954,7 @@ def test_with_param_if_by_if_grad_parameter():
assert np.allclose(graph_output[0].asnumpy(), expect, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -1080,7 +1080,7 @@ def test_if_by_if_forward():
assert np.allclose(graph_output.asnumpy(), expect, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -1149,7 +1149,7 @@ def test_if_by_if_forward_control_tuple_switch():
assert np.allclose(graph_output.asnumpy(), expect, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard

View File

@ -22,7 +22,7 @@ from mindspore.common import dtype as mstype
grad_all = C.GradOperation(get_all=True)
# Although we don't transform for to while any more, we keep this test case.
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -34,7 +34,7 @@ def hof(x):
ret = g(x)(x)
return ret
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -109,7 +109,7 @@ class SingleIfNet(nn.Cell):
return y
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@ -126,7 +126,7 @@ def test_export_lenet_grad_mindir():
assert os.path.exists(verify_name)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@ -150,7 +150,7 @@ def test_load_mindir_and_run():
assert np.allclose(outputs0.asnumpy(), outputs_after_load.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training

View File

@ -44,7 +44,7 @@ class RecrusiveNet(nn.Cell):
return f(x, z)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training

View File

@ -38,7 +38,7 @@ class CaseNet(nn.Cell):
return x
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@ -65,7 +65,7 @@ def test_mindir_switch_layer():
@pytest.mark.skip(reason="depend on export")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@ -84,7 +84,7 @@ def test_mindir_export():
@pytest.mark.skip(reason="depend on export")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -30,7 +30,7 @@ class SingleWhileNet(nn.Cell):
return y
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@ -94,7 +94,7 @@ class SingleWhileInlineNet(nn.Cell):
return y
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@ -112,7 +112,7 @@ def test_single_while_inline_export():
assert os.path.exists(mindir_name)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training

View File

@ -126,7 +126,7 @@ def op_network_with_step_num(dataset, step_num):
return op_network_with_epoch(net_with_dataset, step_num)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -148,7 +148,7 @@ def test_tdt_consume_beyond_produce():
assert True
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -37,7 +37,7 @@ def test_train(device_type):
test_tdt_consume_beyond_produce()
@security_off_wrap
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
def test_train_with_Ascend():

View File

@ -122,7 +122,7 @@ def run_e2e_dump():
del os.environ['MINDSPORE_DUMP_CONFIG']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -249,7 +249,7 @@ def test_async_dump_net_multi_layer_mode1():
del os.environ['MINDSPORE_DUMP_CONFIG']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -38,7 +38,7 @@ class Net(nn.Cell):
return self.concat((out1_shape, out2_shape))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -36,9 +36,7 @@ class Net(nn.Cell):
out2_shape = self.reshape(out2_unique, (1, -1, 2))
return self.concat((out1_shape, out2_shape))
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.level1
@pytest.mark.env_onecard
def test_dynamic_concat_cpu():
x1 = Tensor(np.array([1, 2, 3, 1, 4, 2]), mstype.int32)

View File

@ -36,7 +36,7 @@ class NetWithSparseGatherV2(nn.Cell):
return self.gather(self.weight1, indices, self.axis) + self.weight2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -72,7 +72,7 @@ class Net(nn.Cell):
return x1, x2, x3, x4, x5
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -34,7 +34,7 @@ class Net(nn.Cell):
return self.map_uniform(x, self.per_group_size, self.group_num)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -33,7 +33,7 @@ class Net(nn.Cell):
return self.pad_and_shift(x, y, self.shift_idx)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -34,7 +34,7 @@ class Net(nn.Cell):
return self.sub_and_filter(x, self.max_num, self.offset)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -42,7 +42,7 @@ class NetFunc(nn.Cell):
return self.unique(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -53,7 +53,7 @@ class UniqueSquare(nn.Cell):
return self.square(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -67,7 +67,7 @@ def test_unique_cpu():
assert (output[1].asnumpy() == expect2).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -191,7 +191,7 @@ class ForwardBGCF(nn.Cell):
items, i_neighs, i_gnew_neighs, 1)
return user_rep, item_rep
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -35,7 +35,7 @@ class SliceNet(nn.Cell):
return x
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -322,7 +322,7 @@ def resnet50(num_classes):
return ResNet(ResidualBlock, [3, 4, 6, 3], num_classes)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -98,7 +98,7 @@ class TrainOneStepCell(nn.Cell):
return self.optimizer(grads)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@ -114,7 +114,7 @@ def test_export_lenet_grad_mindir():
assert os.path.exists(verify_name)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -90,7 +90,7 @@ class WithLossCellLocal(nn.Cell):
return self.loss(out, label)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -156,7 +156,7 @@ def test_ascend_lenet():
return loss_output
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -328,7 +328,7 @@ def test_arange():
match_array(actual, expected, error=6)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -595,7 +595,7 @@ def onp_trace(arr):
return onp.trace(arr, offset=4, axis1=1, axis2=2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -626,7 +626,7 @@ def onp_meshgrid(*xi):
return a, b, c, d
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -763,7 +763,7 @@ def test_vander():
match_all_arrays(mnp_vander, onp_vander, error=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -884,7 +884,7 @@ def test_tril_indices_from():
match_all_arrays(mnp_res, onp_res)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -1050,7 +1050,7 @@ def test_empty_like_exception():
mnp.empty_like([[1, 2, 3], [4, 5]])
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -610,7 +610,7 @@ def onp_where(condition, x, y):
return onp.where(condition, x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -890,7 +890,7 @@ def onp_broadcast_to(x):
return a, b
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -1592,7 +1592,7 @@ def test_piecewise():
match_all_arrays(mnp_res, onp_res)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -185,7 +185,7 @@ def onp_isfinite(x):
return onp.isfinite(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -830,7 +830,7 @@ def onp_logaddexp(x1, x2):
return onp.logaddexp(x1, x2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -957,7 +957,7 @@ def onp_clip(x):
return a, b, c, d, e, f
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -1129,7 +1129,7 @@ def onp_remainder(x, y):
return onp.remainder(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -1764,7 +1764,7 @@ def test_cov():
match_all_arrays(mnp_res, onp_res, error=1e-5)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -1806,7 +1806,7 @@ def onp_lcm(x, y):
return onp.lcm(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -2082,7 +2082,7 @@ def test_argmin():
match_res(mnp.argmin, onp.argmin, x, axis=i)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -89,7 +89,7 @@ def test_tensor_copy_slices_bprop():
assert np.allclose(grad_output[0].asnumpy(), np.array([-6.75]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -97,7 +97,7 @@ def test_tensor_copy_slices_ascend_graph():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_tensor_copy_slices()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -47,7 +47,7 @@ def test_conv2d_depthwiseconv2d_initializer():
assert output.shape == (3, 128, 32, 28)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -88,7 +88,7 @@ class GRUWeightBias():
return w_ih_list, w_hh_list, b_ih_list, b_hh_list
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -133,7 +133,7 @@ def test_sit_lstm_forward_input_3_32_32_is_32_hs_16():
assert np.allclose(cy.asnumpy(), cy_pynative.asnumpy(), 0.002, 0.002)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -34,7 +34,7 @@ class Net(nn.Cell):
return self.add3(input1, input2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -37,7 +37,7 @@ class Net(nn.Cell):
return self.square(data)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -50,7 +50,7 @@ def test_basic():
assert np.allclose(expect, output, 1.e-4, 1.e-7)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_basic_gpu():
@ -58,7 +58,7 @@ def test_basic_gpu():
test_basic()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -53,14 +53,14 @@ def assign_add():
assert np.allclose(o1.asnumpy(), e1.asnumpy())
assert np.allclose(o2.asnumpy(), e2.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_assign_add_gpu():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
assign_add()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -124,7 +124,7 @@ def test_atomic_add_multi_output_gpu():
atomic_add_multi_output()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -133,7 +133,7 @@ def test_atomic_add_sum_output_ascend():
atomic_add_sum_output()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -142,7 +142,7 @@ def test_atomic_add_single_output_ascend():
atomic_add_single_output()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -50,7 +50,7 @@ def test_bias_add_grad(shape, dtype):
atol = 1.e-3
assert np.allclose(expect.asnumpy(), output.asnumpy(), rtol, atol, equal_nan=True)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -51,7 +51,7 @@ def test_basic():
output_np = output.asnumpy().copy()
assert np.allclose(expect_np, output_np, 5.e-3, 5.e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -59,7 +59,7 @@ def test_clip_by_norm_no_div_sum(shape0, shape1, shape2, shape3, dtype):
assert np.allclose(expect_np, output_np, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -54,7 +54,7 @@ def test_basic_gpu():
test_basic()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -71,7 +71,7 @@ def test_basic_gpu():
test_basic()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -91,7 +91,7 @@ def test_gelu_gpu():
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="GPU")
test_gelu()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -107,7 +107,7 @@ def test_gelu_grad_gpu():
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="GPU")
test_gelu_grad()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -74,7 +74,7 @@ def lamb_apply_optimizer_assign():
assert np.allclose(o3, e3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -53,7 +53,7 @@ def lamb_apply_weight_assign():
assert np.allclose(output, expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -129,7 +129,7 @@ def test_layernorm_gpu():
test_layernorm([4, 32, 32], np.float32, -1, -1)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -106,7 +106,7 @@ def test_logsoftmaxgrad_gpu():
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="GPU")
test_logsoftmaxgrad()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -114,7 +114,7 @@ def test_logsoftmax_asend():
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="Ascend")
test_logsoftmax()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -101,7 +101,7 @@ def test_gpu_case_2():
context.set_context(graph_kernel_flags="--enable_low_precision=true")
test_case_2()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -110,7 +110,7 @@ def test_ascend_case_1():
context.set_context(graph_kernel_flags="--enable_low_precision=true --disable_pass=highlevelopt2.atomic_clean")
test_case_1()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -69,7 +69,7 @@ def test_basic1():
output_np = output.asnumpy().copy()
assert np.allclose(expect_np, output_np, 6.e-4, 6.e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -77,7 +77,7 @@ def test_basic_ascend():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_basic()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -60,7 +60,7 @@ def test_batchmatmul():
output_np = output.asnumpy().copy()
assert np.allclose(expect_np, output_np, 6.e-4, 6.e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -68,7 +68,7 @@ def test_matmul_ascend():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_matmul()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -65,7 +65,7 @@ def test_maximum_grad_gpu():
test_maximum_grad()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -64,7 +64,7 @@ def test_minimum_grad_gpu():
test_minimum_grad()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -182,7 +182,7 @@ def test_gpu_5():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
test_basic3(Net5)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -190,7 +190,7 @@ def test_ascend_1():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_basic1(Net1)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -198,7 +198,7 @@ def test_ascend_2():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_basic2(Net2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -206,7 +206,7 @@ def test_ascend_3():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_basic2(Net3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -214,7 +214,7 @@ def test_ascend_4():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_basic1(Net4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -48,7 +48,7 @@ def test_reduce_mean_gpu():
test_reduce_mean()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -56,7 +56,7 @@ def test_relu_gpu():
test_relu((12, 1), np.float16)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -89,7 +89,7 @@ def test_castup_gpu():
test_castup()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -49,7 +49,7 @@ def test_tanh_grad_gpu():
test_tanh_grad()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -48,7 +48,7 @@ def test_tile(shape, dtype, multiples):
assert np.allclose(expect_np, output_np, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

Some files were not shown because too many files have changed in this diff Show More