prune less effective Ascend testcases to level1

This commit is contained in:
yanghaoran 2021-08-27 19:59:59 +08:00
parent 133d363fb2
commit a0ff742442
38 changed files with 113 additions and 113 deletions

View File

@ -320,7 +320,7 @@ def test_assign():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -341,7 +341,7 @@ def test_assign_implicit():
assert out.dtype == ms.float32
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -370,7 +370,7 @@ def test_assign_write_after_read():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -424,7 +424,7 @@ def test_assign_if():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -450,7 +450,7 @@ def test_if():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -471,7 +471,7 @@ def test_while():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -497,7 +497,7 @@ def test_assign_while():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -597,7 +597,7 @@ def test_print_assign_for():
check_output(cap.output, patterns)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -684,7 +684,7 @@ def test_if_lambda():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -712,7 +712,7 @@ def test_multi_assign():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -771,7 +771,7 @@ def test_multi_assign_print():
net.para2.data.asnumpy(), expect_para2.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -808,7 +808,7 @@ def test_matmul_assign_biasadd():
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -874,7 +874,7 @@ def test_isolate_call():
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -1370,7 +1370,7 @@ def test_auto_mixed_precision_controlflow_auto(pynative_save_graphs):
# op_cast should be located in order_list after abstract_specialize.
# Besides Ascend, it can work on CPU.
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -411,7 +411,7 @@ class SideEffectAssignAddnReluReturnParNet(Cell):
return grad_out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -312,7 +312,7 @@ class ScatterNdUpdateNet(nn.Cell):
return self.input_x
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -337,7 +337,7 @@ class ScatterNonAliasingAddNet(nn.Cell):
return out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -37,7 +37,7 @@ class AdamNet(nn.Cell):
return self.var, self.m, self.v
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -264,7 +264,7 @@ class ApplyFtrlNet(nn.Cell):
return self.var, self.accum, self.linear
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -321,7 +321,7 @@ class ApplyMomentumNet(nn.Cell):
return self.var, self.accum
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -351,7 +351,7 @@ class ApplyPowerSignNet(nn.Cell):
return self.var, self.m
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -439,7 +439,7 @@ class ApplyRMSPropNet(nn.Cell):
return self.var
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -508,7 +508,7 @@ class FusedSparseFtrlNet(nn.Cell):
return self.var, self.accum, self.linear
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -576,7 +576,7 @@ class FusedSparseProximalAdagradNet(nn.Cell):
return self.var, self.accum
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -667,7 +667,7 @@ class SparseApplyFtrlNet(nn.Cell):
return self.var, self.accum, self.linear
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -699,7 +699,7 @@ class SparseApplyFtrlV2Net(nn.Cell):
return self.var, self.accum, self.linear
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -729,7 +729,7 @@ class SparseApplyProximalAdagradNet(nn.Cell):
return self.var, self.accum
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -42,7 +42,7 @@ class Sampling(nn.Cell):
return s1, s2, s3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -68,7 +68,7 @@ class CompositeNormalNet(nn.Cell):
return s1, s2, s3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -95,7 +95,7 @@ class CompositeLaplaceNet(nn.Cell):
return s1, s2, s3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -122,7 +122,7 @@ class CompositeGammaNet(nn.Cell):
return s1, s2, s3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -175,7 +175,7 @@ class CompositeUniformNet(nn.Cell):
return s1, s2, s3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -231,7 +231,7 @@ class StandardLaplaceNet(nn.Cell):
return s1, s2, s3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -59,7 +59,7 @@ class NpuFloatNet(nn.Cell):
return out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -189,7 +189,7 @@ class NotOperation(nn.Cell):
return not x_sum
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -207,7 +207,7 @@ def test_simple_if():
assert np.allclose(expect, output.asnumpy(), 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -223,7 +223,7 @@ def test_simple_if_with_assign():
assert np.allclose(expect, output.asnumpy(), 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -237,7 +237,7 @@ def test_if_in_if():
assert np.allclose(expect, output.asnumpy(), 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -271,7 +271,7 @@ def test_simple_while():
assert np.allclose(expect, output.asnumpy(), 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -148,7 +148,7 @@ def test_while_with_variable_grad():
assert np.allclose(graph_output[1].asnumpy(), expect_two, 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -217,7 +217,7 @@ def test_while_endless_case():
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -298,7 +298,7 @@ def test_while_with_param_forward_with_const_branch():
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -626,7 +626,7 @@ def test_while_with_param_basic_grad():
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -1034,7 +1034,7 @@ def test_with_param_if_by_if_grad_param_excute_null():
assert np.allclose(graph_output[0].asnumpy(), pynative_output[0].asnumpy(), 0.0001, 0.0001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard

View File

@ -132,7 +132,7 @@ def while_in_while_in_while(x, y, z):
return out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@ -148,7 +148,7 @@ def test_if_by_if():
assert output == expect
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@ -158,7 +158,7 @@ def test_if_in_if():
assert output == expect
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@ -168,7 +168,7 @@ def test_simple_while():
assert output == expect
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@ -178,7 +178,7 @@ def test_while_by_while():
assert output == expect
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@ -188,7 +188,7 @@ def test_while_in_while():
assert output == expect
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@ -198,7 +198,7 @@ def test_while_by_while_in_while():
assert output == expect
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -37,7 +37,7 @@ class CaseNet(nn.Cell):
return x
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -35,7 +35,7 @@ class Net(nn.Cell):
return out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -37,7 +37,7 @@ class GradNet(nn.Cell):
return gradient_function(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -74,7 +74,7 @@ def test_lazy_adam_net():
np.allclose(net.weight2.asnumpy(), np.array([[[0.9, 0.9]], [[0.9, 0.9]], [[0.9, 0.9]]]))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -64,7 +64,7 @@ class TestLocalization:
self.saliency_gt = mock_gradient_call(self.explainer, self.data, self.target)
self.num_class = 2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -81,7 +81,7 @@ class TestLocalization:
res = pg.evaluate(self.explainer, self.data, targets=self.target, mask=self.masks)
assert np.max(np.abs(np.array([res_gt]) - res)) < 1e-5
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -100,7 +100,7 @@ class TestLocalization:
assert np.allclose(np.array([res_gt]), res)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -116,7 +116,7 @@ class TestLocalization:
res = pg.evaluate(self.explainer, self.data, targets=self.target, mask=self.masks, saliency=self.saliency_gt)
assert np.allclose(np.array([res_gt]), res)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -57,7 +57,7 @@ class Net(nn.Cell):
return x
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -59,7 +59,7 @@ class GradSec(nn.Cell):
return output
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -105,7 +105,7 @@ def test_sit_auto_mix_precision_train_o3():
assert np.allclose(out.asnumpy(), out_pynative.asnumpy(), 0.001, 0.001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -1382,7 +1382,7 @@ def test_negative():
match_array(mnp_neg.asnumpy(), onp_neg, 1e-5)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -2132,7 +2132,7 @@ def test_digitize():
match_res(mnp.digitize, onp.digitize, x, bins, right=True)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -25,7 +25,7 @@ from mindspore import Tensor
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@ -36,7 +36,7 @@ def test_conv2d_depthwiseconv2d_str():
assert output.shape == (3, 128, 32, 28)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -32,7 +32,7 @@ class DynamicGRUV2(nn.Cell):
return self.dynamic_gru(x, weight_i, weight_h, bias_i, bias_h, None, init_h)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -48,7 +48,7 @@ def test_net():
expect = np.array([1.0, 16.0, 81.0]).astype(np.float32)
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard

View File

@ -147,7 +147,7 @@ def test_layernorm_grad_gpu():
test_layernorm_grad([4, 32, 32], np.float32, -1, -1)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -106,7 +106,7 @@ def test_castdown_gpu():
test_castdown()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -55,7 +55,7 @@ def test_softmax_gpu():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
test_softmax([4, 32, 48], np.float32)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -16,7 +16,7 @@ import os
import pytest
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -232,7 +232,7 @@ def test_loss_scale_fp16_opt_rmsprop_overflow():
assert output_1[0].asnumpy() == output_2[0].asnumpy()
assert output_1[1].asnumpy() == output_2[1].asnumpy() == True
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -25,7 +25,7 @@ from mindspore.common.parameter import Parameter
def setup_module():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -25,7 +25,7 @@ from mindspore.ops import operations as P
def setup_module():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -72,7 +72,7 @@ class GradOfAllInputs(_Grad):
network=network, real_inputs_count=real_inputs_count)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -120,7 +120,7 @@ def test_row_tensor_in_while():
assert dense_shape == out[2]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -161,7 +161,7 @@ class Ms_Cell_Change_Shape(nn.Cell):
return dout
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -185,7 +185,7 @@ def test_pynative_lenet_train_hook_function_print_and_save_grad():
assert success
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -198,7 +198,7 @@ def test_pynative_custom_bprop_and_Cell_MulAdd():
(Tensor(1.0, mstype.float32), Tensor(2.0, mstype.float32))
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -211,7 +211,7 @@ def test_pynative_custom_bprop_and_Cell_Ms_Cell_Change_Shape():
assert "Shapes of input and parameter are different, input index" in str(ex.value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -406,7 +406,7 @@ def pynative_hook_child_cell_record_grad():
if ms_net.grad_output_list or ms_net.grad_input_list:
assert False
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -421,7 +421,7 @@ def test_pynative_hook_diff_hook_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_diff_hook()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -466,7 +466,7 @@ def test_pynative_hook_mul_change_input_grad_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_mul_change_input_grad()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -496,7 +496,7 @@ def test_pynative_hook_outermost_cell_change_grad_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_outermost_cell_change_grad()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -511,7 +511,7 @@ def test_pynative_hook_outermost_cell_record_grad_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_outermost_cell_record_grad()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -526,7 +526,7 @@ def test_pynative_hook_bprop_outermost_cell_record_grad_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_bprop_outermost_cell_record_grad()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -218,7 +218,7 @@ def argmaxwithvalue_input():
fact.forward_cmp()
fact.grad_cmp()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -128,7 +128,7 @@ class GradWrap(nn.Cell):
return grad_by_list(self.network, weights)(x, label)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_cpu
@ -166,7 +166,7 @@ def test_ascend_pynative_lenet():
assert loss_output.asnumpy() > 0.003
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_cpu

View File

@ -161,7 +161,7 @@ def test_mixed_precision_multiples_cell_gpu_01():
context.set_context(device_target="GPU")
mixed_precision_multiples_cell_01()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -176,7 +176,7 @@ def test_mixed_precision_multiples_cell_gpu_02():
context.set_context(device_target="GPU")
mixed_precision_multiples_cell_02()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -33,7 +33,7 @@ class GradofParams(nn.Cell):
out = self.grad(self.net, self.params)(*x)
return out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -25,7 +25,7 @@ def setup_module():
# GPU: does not supported op "FloorMod"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -73,7 +73,7 @@ def test_tesnsor_augassign_by_slice():
# GPU: does not supported op "FloorMod"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@ -134,7 +134,7 @@ def test_tesnsor_augassign_by_ellipsis():
# GPU: does not supported op "FloorMod"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard

View File

@ -79,7 +79,7 @@ class NetWorkSliceEllipsis(Cell):
return ret0, ret1, ret2, ret3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -111,7 +111,7 @@ class NetWorkReduceDimension(Cell):
return ret1, ret2, ret3, ret4
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -171,7 +171,7 @@ class TensorGetItemByThreeTensors(Cell):
return ret0, ret1, ret2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -359,7 +359,7 @@ class TensorSetItemByMixedTensors_0(Cell):
return ret
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -399,7 +399,7 @@ class TensorSetItemByMixedTensors_1(Cell):
return ret
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -803,7 +803,7 @@ def test_setitem_by_tensor_with_tuple_of_tensor_error():
net(index_0_ms, index_1_ms, index_2_ms, value_0_ms, value_1_ms)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -903,7 +903,7 @@ def test_tensor_assign_slice_value_1():
assert np.all(z == out.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -1099,7 +1099,7 @@ class TensorAssignWithBoolTensorIndex2Error(Cell):
return a
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -57,7 +57,7 @@ class NumpySetItemByList():
return x
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -73,7 +73,7 @@ def test_setitem_by_list():
setup_testcase(x, cases)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -90,7 +90,7 @@ def test_setitem_with_sequence():
setup_testcase(x, cases)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -107,7 +107,7 @@ def test_setitem_dtype():
setup_testcase(x, cases)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -124,7 +124,7 @@ def test_setitem_by_tuple_with_int():
setup_testcase(x, cases)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -142,7 +142,7 @@ def test_setitem_by_tuple_with_list():
setup_testcase(x, cases)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -177,7 +177,7 @@ def test_setitem_with_broadcast():
setup_testcase(x, cases)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -192,7 +192,7 @@ def test_setitem_mul_by_scalar():
setup_testcase(x, cases)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -212,7 +212,7 @@ def test_setitem_by_slice():
setup_testcase(x, cases)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training