move useless testcases from level0 to level1
This commit is contained in:
parent
e02a7c15d8
commit
64d34104e5
|
@ -1042,7 +1042,7 @@ def test_variable_from_outer_graph():
|
|||
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -1079,7 +1079,7 @@ def test_ctrl_while_by_while_and_if_in_first_while():
|
|||
net(input_me_a)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
|
|
|
@ -226,7 +226,7 @@ class SideEffectTwoAssignTwoAddnDependencyNet(Cell):
|
|||
return grad_out
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_ctrl_while_by_while_and_if_in_first_while():
|
||||
|
@ -262,7 +262,7 @@ def test_ctrl_while_by_while_and_if_in_first_while():
|
|||
net(input_me_a)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_ctrl_while_by_while_and_while_in_first_while():
|
||||
|
|
|
@ -507,7 +507,7 @@ class SideEffectControlFlowAssignDependTwoIfNet(Cell):
|
|||
return grad_out
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
|
|
|
@ -61,7 +61,7 @@ class MSELoss(Cell):
|
|||
return self.reduce_mean(self.square(diff), get_axis(diff))
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
|
|
|
@ -1484,7 +1484,7 @@ def test_if_by_if_forward_all_const_branch():
|
|||
assert np.allclose(graph_output.asnumpy(), pynative_output.asnumpy(), 0.0001, 0.0001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -1520,7 +1520,7 @@ def test_if_const_grad():
|
|||
net(a, b)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -1560,7 +1560,7 @@ def test_if_by_if_const_grad():
|
|||
net(a, b)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
@ -1594,7 +1594,7 @@ def test_while_const_grad():
|
|||
net(a, b)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
|
|
|
@ -36,7 +36,7 @@ class Net(nn.Cell):
|
|||
return x
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
|
|
|
@ -42,7 +42,7 @@ class Net(nn.Cell):
|
|||
return x
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
|
|
|
@ -42,7 +42,7 @@ class Net(nn.Cell):
|
|||
return x
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
|
|
|
@ -78,7 +78,7 @@ def train(net, data, label):
|
|||
assert np.all(diff < 1.e-6)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
|
|
|
@ -71,7 +71,7 @@ def train(net, data, label):
|
|||
assert res
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_lenet():
|
||||
|
|
|
@ -187,7 +187,7 @@ def create_dataset(data_path, batch_size=32, repeat_size=1,
|
|||
return mnist_ds
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_train_and_eval_lenet():
|
||||
|
|
|
@ -57,7 +57,7 @@ class Net2(nn.Cell):
|
|||
return self.bias_add1(self.bias_add(x, b), c)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_bias_add2():
|
||||
|
|
|
@ -33,7 +33,7 @@ class Net(nn.Cell):
|
|||
return self.dropout(x)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_net():
|
||||
|
@ -54,7 +54,7 @@ class Net1(nn.Cell):
|
|||
return self.dropout(x)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_net1():
|
||||
|
@ -75,7 +75,7 @@ class Net2(nn.Cell):
|
|||
return self.dropout(x)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_net2():
|
||||
|
|
|
@ -46,7 +46,7 @@ class NetGatherDGrad(nn.Cell):
|
|||
return self.grad(self.network)(inputx, index, output_grad)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_gatherd_grad_fp32():
|
||||
|
@ -64,7 +64,7 @@ def test_gatherd_grad_fp32():
|
|||
print(output_grad.asnumpy())
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_gatherd_grad_fp16():
|
||||
|
@ -82,7 +82,7 @@ def test_gatherd_grad_fp16():
|
|||
print(output_grad.asnumpy())
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_gatherd_grad_int32():
|
||||
|
|
|
@ -254,7 +254,7 @@ class MultiLayerBiLstmNet(nn.Cell):
|
|||
return self.lstm(self.x, (self.h, self.c))
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_multi_layer_bilstm():
|
||||
|
@ -345,7 +345,7 @@ class Net(nn.Cell):
|
|||
return self.lstm(self.x, (self.h, self.c))[0]
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_grad():
|
||||
|
|
|
@ -63,7 +63,7 @@ def gen_data(inputA_np, inputB_np, grad_=None):
|
|||
return output
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_min_tensor_grad_4d():
|
||||
|
|
|
@ -42,7 +42,7 @@ class MomentumNet(nn.Cell):
|
|||
return output
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_momentum():
|
||||
|
|
|
@ -35,7 +35,7 @@ class Net(nn.Cell):
|
|||
arr_x = np.array([[0], [1], [2], [3]]).astype(np.int32)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_net():
|
||||
|
@ -48,7 +48,7 @@ def test_net():
|
|||
arr_x = np.array([[0], [1], [2], [3]]).astype(np.float64)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_net_float64():
|
||||
|
@ -61,7 +61,7 @@ def test_net_float64():
|
|||
arr_x = np.array([[0], [1], [2], [3]]).astype(np.bool_)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_net_bool():
|
||||
|
|
|
@ -46,7 +46,7 @@ def test_error_on_dynamic_shape_input_is_dynamic():
|
|||
error_on_dynamic_shape_input.infer_shape([-1, -1, -1])
|
||||
assert "Input is dynamically shaped" in str(info.value)
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_error_on_dynamic_shape_input_not_dynamic():
|
||||
|
|
|
@ -42,7 +42,7 @@ class NetMomentum(nn.Cell):
|
|||
return output
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_momentum():
|
||||
|
|
|
@ -118,84 +118,84 @@ def test_print_multiple_types():
|
|||
net(x, y, z)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_print_bool():
|
||||
print_testcase(np.bool)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_print_int8():
|
||||
print_testcase(np.int8)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_print_int16():
|
||||
print_testcase(np.int16)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_print_int32():
|
||||
print_testcase(np.int32)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_print_int64():
|
||||
print_testcase(np.int64)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_print_uint8():
|
||||
print_testcase(np.uint8)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_print_uint16():
|
||||
print_testcase(np.uint16)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_print_uint32():
|
||||
print_testcase(np.uint32)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_print_uint64():
|
||||
print_testcase(np.uint64)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_print_float16():
|
||||
print_testcase(np.float16)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_print_float32():
|
||||
print_testcase(np.float32)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_print_string():
|
||||
|
|
|
@ -52,7 +52,7 @@ class CategoricalProb(nn.Cell):
|
|||
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_probability_categorical_prob_cdf_probs_none():
|
||||
|
|
|
@ -36,7 +36,7 @@ class CauchyMean(nn.Cell):
|
|||
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_probability_cauchy_mean_loc_scale_rand_2_ndarray():
|
||||
|
@ -61,7 +61,7 @@ class CauchyProb(nn.Cell):
|
|||
return out1, out2, out3, out4, out5, out6
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_probability_cauchy_prob_cdf_loc_scale_rand_4_ndarray():
|
||||
|
|
|
@ -193,7 +193,7 @@ def test_loss_scale_fp16_lr_overflow_set_sense_scale():
|
|||
assert output_1[0].asnumpy() == output_2[0].asnumpy()
|
||||
assert output_1[1].asnumpy() == output_2[1].asnumpy() == True
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
|
|
|
@ -179,7 +179,7 @@ def test_parser_switch_layer_inputs_tuple():
|
|||
assert np.allclose(goodout.asnumpy(), netout.asnumpy(), 0, 0)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
|
|
Loading…
Reference in New Issue