takedown always success testcases

This commit is contained in:
liuruotao 2023-12-19 11:13:10 +08:00 committed by fengyixing
parent 370abc06b2
commit 12f2251ea3
366 changed files with 1096 additions and 1096 deletions

View File

@ -201,7 +201,7 @@ class ForwardNet2(Cell):
return out
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -364,7 +364,7 @@ def test_parameter_tuple_assign_addn_inner_net():
assert out == (12, 15)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@ -454,7 +454,7 @@ def test_parameter_value_control_flow_ascend():
assert graph_out == (9, 4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training

View File

@ -63,7 +63,7 @@ def compute_process(q, device_id, device_num, enable_comm_subgraph):
q.put(res.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single

View File

@ -33,7 +33,7 @@ class TestGetitemMethodNet(nn.Cell):
return self.cell_dict['conv']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@ -69,7 +69,7 @@ class TestSetitemMethodNet(nn.Cell):
return self.cell_dict['conv']
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@ -105,7 +105,7 @@ class TestSetitemMethodErrCaseNet(nn.Cell):
return self.cell_dict[key]
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@ -165,7 +165,7 @@ class TestDelitemMethodNet(nn.Cell):
return len(self.cell_dict)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@ -199,7 +199,7 @@ class TestContainsMethodNet(nn.Cell):
return ret1, ret2
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@ -234,7 +234,7 @@ class TestClearMethodNet(nn.Cell):
return len(self.cell_dict)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@ -268,7 +268,7 @@ class TestPopMethodNet(nn.Cell):
return op, cell_dict_len
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@ -305,7 +305,7 @@ class TestKeysMethodNet(nn.Cell):
return self.cell_dict.keys()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@ -338,7 +338,7 @@ class TestValuesMethodNet(nn.Cell):
return self.cell_dict.values()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@ -380,7 +380,7 @@ class TestItemsMethodNet(nn.Cell):
return self.cell_dict.items()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@ -457,7 +457,7 @@ class TestUpdateMethodNet(nn.Cell):
return output1, output2, output3
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@ -505,7 +505,7 @@ class TestUpdateMethodEmbeddedNet(nn.Cell):
def construct(self, object_list):
self.cell_dict.update(object_list)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@ -567,7 +567,7 @@ class DupParaNameNet2(nn.Cell):
return a + b
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -33,7 +33,7 @@ class TestCellListInsertNet(nn.Cell):
return len(self.cell_list)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@ -69,7 +69,7 @@ class EmbeddedCellDictNet(nn.Cell):
return x
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -72,7 +72,7 @@ def abs_infervalue_func2():
return ops.auto_generate.abs(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -132,7 +132,7 @@ def test_abs_vmap(mode):
eltwise_case_vmap(abs_func, mode)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training

View File

@ -38,7 +38,7 @@ def add_infervalue_func2():
y = ms.Tensor(np.array([3, 5, 1]).astype(np.float32))
return ops.auto_generate.add(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -64,7 +64,7 @@ def test_add(mode):
compare(output_grads, expect_grads)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -88,7 +88,7 @@ def test_add_vmap(mode):
assert np.allclose(output_vmap.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -121,7 +121,7 @@ def test_add_dyn(mode):
compare(output_grads, expect_grads)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -20,7 +20,7 @@ from mindspore.ops import auto_generate as P
from test_argmin import argmin_argmax_case, argmin_argmax_case_dyn, argmin_argmax_case_vmap
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -36,7 +36,7 @@ def test_argmax(mode):
argmin_argmax_case(P.argmax_, np.argmax)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -52,7 +52,7 @@ def test_argmax_vmap(mode):
argmin_argmax_case_vmap(P.argmax_)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training

View File

@ -69,7 +69,7 @@ def argmin_argmax_case_dyn(op_func, np_func):
assert np.allclose(output2.asnumpy(), expect2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -85,7 +85,7 @@ def test_argmin(mode):
argmin_argmax_case(P.argmin_, np.argmin)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -101,7 +101,7 @@ def test_argmin_vmap(mode):
argmin_argmax_case_vmap(P.argmin_)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training

View File

@ -66,7 +66,7 @@ def test_assign_forward_ascend(mode):
output = assign_forward_func(variable, value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -35,7 +35,7 @@ def avg_pool_dyn_shape_func(x):
return ops.auto_generate.avg_pool(x, kernel_size=2, strides=2, pad_mode="VALID", data_format="NCHW")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -56,7 +56,7 @@ def test_avg_pool_forward(mode):
assert np.allclose(out.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -75,7 +75,7 @@ def test_avg_pool_backward(mode):
grads = avg_pool_backward_func(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -100,7 +100,7 @@ def test_avg_pool_vmap(mode):
assert np.allclose(out.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -61,7 +61,7 @@ def forward_datas_prepare(shape, num=2, axis=0, diff_shapes=False, need_expect=T
return tuple(tensor_inputs), np_expect
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -79,7 +79,7 @@ def test_concat_forward(mode):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -128,7 +128,7 @@ def test_concat_vmap(mode):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -164,7 +164,7 @@ def test_concat_forward_dynamic(mode, dyn_mode):
assert np.allclose(out3.asnumpy(), expect3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -199,7 +199,7 @@ def test_concat_backward_dynamic(mode, dyn_mode):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -33,7 +33,7 @@ def cosh_backward_func(x):
return ops.grad(cosh_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -54,7 +54,7 @@ def test_cosh_forward(mode):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -75,7 +75,7 @@ def test_cosh_backward(mode):
assert np.allclose(grads.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -58,7 +58,7 @@ def test_cummax_forward(context_mode, dtype):
assert (indices.asnumpy() == expect_indices).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -84,7 +84,7 @@ def test_cummax_vmap(context_mode, dtype):
assert (indices.asnumpy() == expect_indices).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -118,7 +118,7 @@ def test_cummax_dynamic(context_mode):
assert np.allclose(indices2.asnumpy(), expect_indices2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -37,7 +37,7 @@ def cumprod_dyn_shape_func(x, axis, exclusive, reverse):
return ops.auto_generate.cum_prod(x, axis, exclusive, reverse)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -63,7 +63,7 @@ def test_cumprod_forward(context_mode, dtype):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -88,7 +88,7 @@ def test_cumprod_backward(context_mode, dtype):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -120,7 +120,7 @@ def test_cumprod_dynamic(context_mode):
assert np.allclose(out2.asnumpy(), expect2, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -152,7 +152,7 @@ def test_cumprod_dynamic_rank(context_mode):
assert np.allclose(out2.asnumpy(), expect2, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -184,7 +184,7 @@ def test_cumprod_backward_dynamic(context_mode):
assert np.allclose(out2.asnumpy(), expect2, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -37,7 +37,7 @@ def cumsum_dyn_shape_func(x, axis, exclusive, reverse):
return ops.auto_generate.cum_sum(x, axis, exclusive, reverse)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -62,7 +62,7 @@ def test_cumsum_forward(context_mode, dtype):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -88,7 +88,7 @@ def test_cumsum_backward(context_mode, dtype):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -120,7 +120,7 @@ def test_cumsum_dynamic(context_mode):
assert np.allclose(out2.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -152,7 +152,7 @@ def test_cumsum_dynamic_rank(context_mode):
assert np.allclose(out2.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -184,7 +184,7 @@ def test_cumsum_backward_dynamic(context_mode):
assert np.allclose(out2.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -51,7 +51,7 @@ def div_infer_value1():
return div_forward_func(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -41,7 +41,7 @@ class AvgPoolCreateInstanceNet(nn.Cell):
return op(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -59,7 +59,7 @@ def test_avg_pool():
out = net(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -77,7 +77,7 @@ def test_avg_pool_create_instance_const_args():
out = net(x, 1, 1, "VALID", "NCHW")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def gcd_backward_func(x1, x2):
return ops.grad(gcd_forward_func, (0,))(x1, x2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -52,7 +52,7 @@ def test_gcd_forward(mode):
assert np.allclose(out.asnumpy(), expect_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -72,7 +72,7 @@ def test_gcd_backward(mode):
gcd_backward_func(x1, x2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -38,7 +38,7 @@ def gelu_dyn_shape_func(x):
return ops.auto_generate.gelu_(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -59,7 +59,7 @@ def test_gelu_forward(mode):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -80,7 +80,7 @@ def test_gelu_backward(mode):
assert np.allclose(grads.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -103,7 +103,7 @@ def test_gelu_vmap(mode):
assert np.allclose(out.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -133,7 +133,7 @@ def test_gelu_dynamic(mode):
assert np.allclose(output1.asnumpy(), expect1, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -37,7 +37,7 @@ def gelu_grad_dyn_shape_func(dy, x, y):
return ops.auto_generate.gelu_grad(dy, x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -59,7 +59,7 @@ def test_gelu_grad_forward(mode):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -93,7 +93,7 @@ def test_gelu_grad_dynamic(mode):
assert np.allclose(output1.asnumpy(), expect1, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def greater_equal_backward_func(x, y):
return ops.grad(greater_equal_forward_func, (0,))(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -52,7 +52,7 @@ def test_greater_equal_forward(mode):
assert np.allclose(out.asnumpy(), expect_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -73,7 +73,7 @@ def test_greater_equal_backward(mode):
assert np.allclose(grads.asnumpy(), expect_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def greater_backward_func(x, y):
return ops.grad(greater_forward_func, (0,))(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -52,7 +52,7 @@ def test_greater_forward(mode):
assert np.allclose(out.asnumpy(), expect_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -73,7 +73,7 @@ def test_greater_backward(mode):
assert np.allclose(grads.asnumpy(), expect_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -32,7 +32,7 @@ def grid_sampler_2d_backward_func(input_x, grid):
return ops.grad(grid_sampler_2d_forward_func, (0, 1))(input_x, grid)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -90,7 +90,7 @@ def test_grid_sampler_2d_forward(mode):
assert np.allclose(out.asnumpy(), expect_out, 1e-04, 1e-04)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -27,7 +27,7 @@ def grid_sampler_2d_grad_forward_func(grad, input_x, grid):
return grid_sampler_2d_grad
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -33,7 +33,7 @@ def grid_sampler_3d_backward_func(input_x, grid):
return ops.grad(grid_sampler_3d_forward_func, (0, 1))(input_x, grid)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -80,7 +80,7 @@ def test_grid_sampler_3d_forward(mode):
assert np.allclose(out.asnumpy(), except_out, 1e-04, 1e-04)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -139,7 +139,7 @@ def test_grid_sampler_3d_backward(mode):
assert np.allclose(grads[1].asnumpy(), expect_out2, 1e-04, 1e-04)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -28,7 +28,7 @@ def grid_sampler_3d_grad_forward_func(grad, input_x, grid):
return grid_sampler_3d_grad
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -96,7 +96,7 @@ def test_grid_sampler_3d_grad_forward(mode):
assert np.allclose(out[1].asnumpy(), expect_out2, 1e-04, 1e-04)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -37,7 +37,7 @@ def hshrink_dyn_shape_func(input_x, lambd):
return ops.auto_generate.hshrink(input_x, lambd)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -59,7 +59,7 @@ def test_hshrink_forward(context_mode):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -81,7 +81,7 @@ def test_hshrink_backward(context_mode):
assert np.allclose(grads.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -106,7 +106,7 @@ def test_hshrink_vmap(context_mode):
assert np.allclose(out.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -140,7 +140,7 @@ def test_hshrink_dynamic(mode):
assert np.allclose(out1.asnumpy(), expect1, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -32,7 +32,7 @@ def hshrink_grad_dyn_shape_func(gradients, features, lambd):
return ops.auto_generate.hshrink_grad(gradients, features, lambd)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -56,7 +56,7 @@ def test_hshrink_grad_forward(context_mode):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -38,7 +38,7 @@ def hsigmoid_dyn_shape_func(x):
return ops.auto_generate.hsigmoid(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -59,7 +59,7 @@ def test_hsigmoid_forward(mode):
assert np.allclose(out.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -80,7 +80,7 @@ def test_hsigmoid_backward(mode):
assert np.allclose(grads.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -103,7 +103,7 @@ def test_hsigmoid_vmap(mode):
assert np.allclose(out.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -130,7 +130,7 @@ def test_hsigmoid_dynamic(mode):
assert np.allclose(out1.asnumpy(), expect1, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -32,7 +32,7 @@ def hsigmoid_grad_dyn_shape_func(grads, x):
return ops.auto_generate.hsigmoid_grad(grads, x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -79,7 +79,7 @@ def test_hsigmoid_grad_vmap(mode):
assert np.allclose(out.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -111,7 +111,7 @@ def test_hsigmoid_grad_dynamic(mode):
assert np.allclose(out1.asnumpy(), expect1, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -36,7 +36,7 @@ def hswish_dyn_shape_func(x):
return ops.auto_generate.hardswish_(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -55,7 +55,7 @@ def test_hswish_forward(mode):
assert np.allclose(out.asnumpy(), expect_out, 1e-04, 1e-04)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -74,7 +74,7 @@ def test_hswish_backward(mode):
assert np.allclose(grads.asnumpy(), expect_out, 1e-04, 1e-04)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -97,7 +97,7 @@ def test_hswish_vmap(mode):
assert np.allclose(out.asnumpy(), expect_out, 1e-04, 1e-04)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -124,7 +124,7 @@ def test_hswish_dynamic(mode):
assert np.allclose(out1.asnumpy(), expect1, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def hswish_grad_dyn_shape_func(y_grad, x):
return ops.auto_generate.hswish_grad(y_grad, x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -52,7 +52,7 @@ def test_hswish_grad_forward(mode):
assert np.allclose(out.asnumpy(), expect_out, 1e-04, 1e-04)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -77,7 +77,7 @@ def test_hswish_grad_vmap(mode):
assert np.allclose(out.asnumpy(), expect_out, 1e-04, 1e-04)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -109,7 +109,7 @@ def test_hswish_grad_dynamic(mode):
assert np.allclose(out1.asnumpy(), expect1, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -45,7 +45,7 @@ def layer_norm_dyn_shape_func(input_x, gamma, beta):
epsilon=1e-7)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -74,7 +74,7 @@ def test_layer_norm_forward(mode):
atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -147,7 +147,7 @@ def test_layer_norm_vmap(mode):
atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -204,7 +204,7 @@ def test_layer_norm_dynamic_shape(mode):
atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -42,7 +42,7 @@ def layer_norm_grad_dyn_shape_func(x, dy, variance, mean, gamma):
begin_params_axis=1)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -125,7 +125,7 @@ def test_layer_norm_grad_vmap(mode):
atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -49,7 +49,7 @@ def less_infervalue_func2():
return ops.auto_generate.less(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -70,7 +70,7 @@ def test_less_forward(mode):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -93,7 +93,7 @@ def test_less_backward(mode):
assert np.allclose(grad_y.asnumpy(), expect_grad_y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -116,7 +116,7 @@ def test_less_vmap(mode):
assert np.allclose(out.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -150,7 +150,7 @@ def test_less_dynamic_shape(mode):
assert np.allclose(out_2.asnumpy(), expect_2, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -184,7 +184,7 @@ def test_less_dynamic_rank(mode):
assert np.allclose(out_2.asnumpy(), expect_2, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -49,7 +49,7 @@ def less_equal_infervalue_func2():
return ops.auto_generate.less_equal(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -70,7 +70,7 @@ def test_less_equal_forward(mode):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -93,7 +93,7 @@ def test_less_equal_backward(mode):
assert np.allclose(grad_y.asnumpy(), expect_grad_y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -116,7 +116,7 @@ def test_less_equal_vmap(mode):
assert np.allclose(out.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -150,7 +150,7 @@ def test_less_equal_dynamic_shape(mode):
assert np.allclose(out_2.asnumpy(), expect_2, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -184,7 +184,7 @@ def test_less_equal_dynamic_rank(mode):
assert np.allclose(out_2.asnumpy(), expect_2, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def masked_fill_backward_func(input_x, mask, value):
return ops.grad(masked_fill_forward_func, (0, 1))(input_x, mask, value)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -51,7 +51,7 @@ def test_masked_fill_forward(mode):
np.testing.assert_array_almost_equal(output.asnumpy(), expect_output, decimal=4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -74,7 +74,7 @@ def test_masked_fill_backward(mode):
np.testing.assert_array_almost_equal(mask_output.asnumpy(), expect_mask_output, decimal=4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -36,7 +36,7 @@ def maximum_vmap_func(x, y):
return ops.vmap(maximum_forward_func, in_axes=0, out_axes=0)(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -57,7 +57,7 @@ def test_maximum_op_forward(context_mode, data_type):
np.testing.assert_allclose(out.asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -79,7 +79,7 @@ def test_maximum_op_backward(context_mode, data_type):
np.testing.assert_allclose(grads[1].asnumpy(), expect_out[1], rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -100,7 +100,7 @@ def test_maximum_op_vmap(context_mode, data_type):
np.testing.assert_allclose(out.asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def maximum_grad_vmap_func(x1, x2, grads):
return ops.vmap(maximum_grad_forward_func, in_axes=0, out_axes=0)(x1, x2, grads)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -55,7 +55,7 @@ def test_maximum_grad_op_forward(context_mode, data_type):
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -78,7 +78,7 @@ def test_maximum_grad_op_vmap(context_mode, data_type):
np.testing.assert_allclose(out[1].asnumpy(), expect_out[1], rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -50,7 +50,7 @@ def minimum_infervalue_func2():
return ops.auto_generate.minimum(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -72,7 +72,7 @@ def test_minimum_op_forward(context_mode, data_type):
np.testing.assert_allclose(out.asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -95,7 +95,7 @@ def test_minimum_op_backward(context_mode, data_type):
np.testing.assert_allclose(grads[1].asnumpy(), expect_out[1], rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -117,7 +117,7 @@ def test_minimum_op_vmap(context_mode, data_type):
np.testing.assert_allclose(out.asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -139,7 +139,7 @@ def test_minimum_op_infervalue(context_mode):
np.testing.assert_array_equal(out_2.asnumpy(), expect_out_2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def minimum_grad_vmap_func(x1, x2, grads):
return ops.vmap(minimum_grad_forward_func, in_axes=0, out_axes=0)(x1, x2, grads)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -56,7 +56,7 @@ def test_minimum_grad_op_forward(context_mode, data_type):
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -80,7 +80,7 @@ def test_minimum_grad_op_vmap(context_mode, data_type):
np.testing.assert_allclose(out[1].asnumpy(), expect_out[1], rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -50,7 +50,7 @@ def mul_infervalue_func2():
return ops.auto_generate.mul(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -72,7 +72,7 @@ def test_mul_op_forward(context_mode, data_type):
np.testing.assert_allclose(out.asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -95,7 +95,7 @@ def test_mul_op_backward(context_mode, data_type):
np.testing.assert_allclose(grads[1].asnumpy(), expect_out[1], rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -117,7 +117,7 @@ def test_mul_op_vmap(context_mode, data_type):
np.testing.assert_allclose(out.asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -139,7 +139,7 @@ def test_mul_op_infervalue(context_mode):
np.testing.assert_array_equal(out_2.asnumpy(), expect_out_2)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -36,7 +36,7 @@ def neg_vmap_func(x):
return ops.vmap(neg_forward_func, in_axes=0, out_axes=0)(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -57,7 +57,7 @@ def test_neg_op_forward(context_mode, data_type):
np.testing.assert_allclose(out.asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -78,7 +78,7 @@ def test_neg_op_backward(context_mode, data_type):
np.testing.assert_allclose(grads.asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def next_after_backward_func(x, other):
return ops.grad(next_after_forward_func, 0)(x, other)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -53,7 +53,7 @@ def test_next_after_op_forward(context_mode, data_type):
@pytest.mark.skip(reason="dynamic shape not support now")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.parametrize("context_mode", [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
@ -72,7 +72,7 @@ def test_next_after_op_forward_cpu(context_mode, data_type):
np.testing.assert_allclose(out.asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -80,7 +80,7 @@ def test_nllloss_forward_ascend(mode, data_type):
assert np.allclose(actual_output[1].asnumpy(), expect_total_weight)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def nllloss_grad_vmap_func(logits, loss_grad, labels, weight, total_weight):
weight, total_weight)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -87,7 +87,7 @@ def get_grad_inputs_and_output(nptype_input, nptype_weight, reduction, input_typ
return inputs, outputs
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def nonzero_backward_func(x):
return ops.grad(nonzero_forward_func, 0)(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -52,7 +52,7 @@ def test_nonzero_op_forward(context_mode, data_type):
np.testing.assert_array_equal(out.asnumpy(), expect_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -50,7 +50,7 @@ def notequal_vmap_func(x, y):
return ops.vmap(notequal_forward_func, in_axes=0, out_axes=0)(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -72,7 +72,7 @@ def test_notequal_op_forward(context_mode, data_type):
np.testing.assert_array_equal(out.asnumpy(), expect_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -94,7 +94,7 @@ def test_notequal_op_infervalue(context_mode):
np.testing.assert_array_equal(out.asnumpy(), expect_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -116,7 +116,7 @@ def test_notequal_op_backward(context_mode, data_type):
np.testing.assert_allclose(grads[0].asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -37,7 +37,7 @@ def onehot_vmap_func(indices, depth, on_value, off_value, axis):
return ops.vmap(onehot_forward_func, in_axes=in_axis, out_axes=0)(indices, depth, on_value, off_value, axis)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -61,7 +61,7 @@ def test_onehot_op_forward(context_mode, data_type):
np.testing.assert_allclose(out.asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -84,7 +84,7 @@ def test_onehot_op_forward_depth_tensor(context_mode):
np.testing.assert_allclose(out.asnumpy(), expect_out, rtol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -108,7 +108,7 @@ def test_onehot_op_backward(context_mode, data_type):
np.testing.assert_allclose(grads[0].asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -36,7 +36,7 @@ def oneslike_vmap_func(x):
return ops.vmap(oneslike_forward_func, in_axes=0, out_axes=0)(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -57,7 +57,7 @@ def test_oneslike_op_forward(context_mode, data_type):
np.testing.assert_array_equal(out.asnumpy(), expect_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -78,7 +78,7 @@ def test_oneslike_op_backward(context_mode, data_type):
np.testing.assert_allclose(grads.asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -49,7 +49,7 @@ def test_bias_add_4d(mode):
assert np.all(output.asnumpy() == expect_output), "bias_add execute failed, please check current code commit"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -77,7 +77,7 @@ def test_bias_add_2d(mode):
assert np.all(output.asnumpy() == expect_output), "bias_add execute failed, please check current code commit"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -105,7 +105,7 @@ def test_bias_add_3d(mode):
assert np.all(output.asnumpy() == expect_output), "bias_add execute failed, please check current code commit"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -133,7 +133,7 @@ def test_bias_add_5d(mode):
assert np.all(output.asnumpy() == expect_output), "bias_add execute failed, please check current code commit"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -157,7 +157,7 @@ def test_bias_add_backward(mode):
assert np.all(output.asnumpy() == expect_output), "bias_add_grad execute failed, please check current code commit"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import ops
import test_utils
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -45,7 +45,7 @@ def test_bias_add_grad_2d(data_type, mode):
assert np.all(output.asnumpy() == expect_output), "bias_add_grad execute failed, please check current code commit"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -71,7 +71,7 @@ def test_bias_add_grad_4d(data_type, mode):
assert np.all(output.asnumpy() == expect_output), "bias_add_grad execute failed, please check current code commit"
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training

View File

@ -31,7 +31,7 @@ class Net(ms.nn.Cell):
return self.Cast(x, self.dtype)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -53,7 +53,7 @@ def test_cast_forward(mode):
assert output.asnumpy().dtype == 'float64'
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -74,7 +74,7 @@ def test_cast_backward(mode):
assert np.allclose(grads.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -35,7 +35,7 @@ def ceil_dyn_shape_func(x):
return ops.auto_generate.ceil(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -56,7 +56,7 @@ def test_ceil_forward(mode):
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -77,7 +77,7 @@ def test_ceil_backward(mode):
assert np.allclose(grads.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -103,7 +103,7 @@ def test_ceil_vmap(mode):
assert np.allclose(vmap_out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -132,7 +132,7 @@ def test_ceil_dynamic(mode):
assert np.allclose(output1.asnumpy(), expect1)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -35,7 +35,7 @@ def celu_dyn_shape_func(x, alpha=1.0):
return ops.auto_generate.celu_(x, alpha)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -57,7 +57,7 @@ def test_celu_forward(mode):
np.testing.assert_allclose(output.asnumpy(), expect, rtol=error)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -79,7 +79,7 @@ def test_celu_backward(mode):
np.testing.assert_allclose(grads.asnumpy(), expect, rtol=error)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -102,7 +102,7 @@ def test_celu_vmap(mode):
np.testing.assert_allclose(vmap_out.asnumpy(), expect, rtol=error)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -131,7 +131,7 @@ def test_celu_dynamic(mode):
np.testing.assert_allclose(output1.asnumpy(), expect1, rtol=error)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -26,7 +26,7 @@ def cholesky_forward_func(x, upper):
return P.Cholesky(upper)(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ class CholeskyInverseNet(nn.Cell):
return self.cholesky_inverse(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def conj_backward_func(x):
return ops.grad(conj_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -36,7 +36,7 @@ def cos_dyn_shape_func(x):
return ops.auto_generate.cos(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -57,7 +57,7 @@ def test_cos_forward(mode):
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -78,7 +78,7 @@ def test_cos_backward(mode):
assert np.allclose(grads.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -104,7 +104,7 @@ def test_cos_vmap(mode):
assert np.allclose(vmap_out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -133,7 +133,7 @@ def test_cos_dynamic(mode):
assert np.allclose(output1.asnumpy(), expect1)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def elu_backward_func(x):
return ops.grad(elu_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -50,7 +50,7 @@ def test_elu_forward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-3, atol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -70,7 +70,7 @@ def test_elu_backward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -44,7 +44,7 @@ def equal_infervalue_func2():
return ops.auto_generate.equal(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -65,7 +65,7 @@ def test_equal_forward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -86,7 +86,7 @@ def test_equal_backward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -108,7 +108,7 @@ def test_equal_vmap(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def erf_backward_func(x):
return ops.grad(erf_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -50,7 +50,7 @@ def test_erf_forward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=0.001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -70,7 +70,7 @@ def test_erf_backward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=0.001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def erfc_backward_func(x):
return ops.grad(erfc_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -50,7 +50,7 @@ def test_erfc_forward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -70,7 +70,7 @@ def test_erfc_backward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def erfinv_backward_func(x):
return ops.grad(erfinv_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -50,7 +50,7 @@ def test_erfinv_forward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -70,7 +70,7 @@ def test_erfinv_backward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def exp_backward_func(x):
return ops.grad(exp_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -50,7 +50,7 @@ def test_exp_forward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=0.001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -70,7 +70,7 @@ def test_exp_backward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=0.001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def expand_dims_backward_func(x, axis):
return ops.grad(expand_dims_forward_func, (0,))(x, axis)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -51,7 +51,7 @@ def test_expand_dims_forward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=0.001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -72,7 +72,7 @@ def test_expand_dims_backward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=0.001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def expm1_backward_func(x):
return ops.grad(expm1_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -50,7 +50,7 @@ def test_expm1_forward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -70,7 +70,7 @@ def test_expm1_backward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def flatten_backward_func(x):
return ops.grad(flatten_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -50,7 +50,7 @@ def test_flatten_forward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=0.001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -70,7 +70,7 @@ def test_flatten_backward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=0.001)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -34,7 +34,7 @@ def floor_dyn_shape_func(x):
return ops.auto_generate.floor(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -54,7 +54,7 @@ def test_floor_forward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -74,7 +74,7 @@ def test_floor_backward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -95,7 +95,7 @@ def test_floor_vmap(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -124,7 +124,7 @@ def test_floor_dynamic(mode):
assert np.allclose(output1.asnumpy(), expect1)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def floor_div_backward_func(x, y):
return ops.grad(floor_div_forward_func, (0,))(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -51,7 +51,7 @@ def test_floor_div_forward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -72,7 +72,7 @@ def test_floor_div_backward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def floor_mod_backward_func(x, y):
return ops.grad(floor_mod_forward_func, (0,))(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -51,7 +51,7 @@ def test_floor_mod_forward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -72,7 +72,7 @@ def test_floor_mod_backward(mode):
assert np.allclose(output.asnumpy(), expect, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def gather_backward_func(input_params, input_indices, axis, batch_dims=0):
return ops.grad(gather_forward_func, (0,))(input_params, input_indices, axis, batch_dims)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def gather_d_backward_func(x, dim, index):
return ops.grad(gather_d_forward_func, (0,))(x, dim, index)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -53,7 +53,7 @@ def test_gather_d_forward(mode):
assert np.allclose(out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -74,7 +74,7 @@ def test_gather_d_backward(mode):
assert np.allclose(grads.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -35,7 +35,7 @@ def identity_dyn_shape_func(x):
return ops.auto_generate.deepcopy(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -55,7 +55,7 @@ def test_identity_forward(mode):
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -75,7 +75,7 @@ def test_identity_backward(mode):
assert np.allclose(grads.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -100,7 +100,7 @@ def test_identity_vmap(mode):
assert np.allclose(vmap_out.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -128,7 +128,7 @@ def test_identity_dynamic(mode):
assert np.allclose(output1.asnumpy(), expect1)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -35,7 +35,7 @@ def lin_sapce_dyn_shape_func(start, stop, num=5):
return ops.auto_generate.lin_space_(start, stop, num)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -55,7 +55,7 @@ def test_lin_space_forward(mode):
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -21,7 +21,7 @@ import mindspore.ops.operations.manually_defined as F
import test_utils
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -61,7 +61,7 @@ def test_scalar_add(mode):
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -98,7 +98,7 @@ def test_scalar_sub(mode):
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -134,7 +134,7 @@ def test_scalar_mul(mode):
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -170,7 +170,7 @@ def test_scalar_div(mode):
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -206,7 +206,7 @@ def test_scalar_mod(mode):
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -242,7 +242,7 @@ def test_scalar_floordiv(mode):
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -278,7 +278,7 @@ def test_scalar_eq(mode):
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -314,7 +314,7 @@ def test_scalar_ge(mode):
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -350,7 +350,7 @@ def test_scalar_gt(mode):
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -386,7 +386,7 @@ def test_scalar_le(mode):
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -421,7 +421,7 @@ def test_scalar_lt(mode):
mutable_grad_output = scalar_lt_backward_func(ms.mutable(input_x), ms.mutable(input_y))
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training

View File

@ -20,7 +20,7 @@ from mindspore import ops
import mindspore.ops.operations.manually_defined as F
import test_utils
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -54,7 +54,7 @@ def test_scalar_bool_mutable(mode):
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -88,7 +88,7 @@ def test_scalar_uadd_mutable(mode):
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -122,7 +122,7 @@ def test_scalar_usub_mutable(mode):
assert np.allclose(mutable_grad_output, expect_grad_out)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training

View File

@ -35,7 +35,7 @@ def split_dyn_shape_func(x, axis=0, output_num=2):
return ops.auto_generate.split_(x, axis, output_num)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -57,7 +57,7 @@ def test_split_forward(mode):
assert np.allclose(res.asnumpy(), exp)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -77,7 +77,7 @@ def test_split_backward(mode):
assert np.allclose(grads.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -100,7 +100,7 @@ def test_split_vmap(mode):
assert np.allclose(res.asnumpy(), exp)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -130,7 +130,7 @@ def test_split_dynamic(mode):
assert np.allclose(res.asnumpy(), exp)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -36,7 +36,7 @@ def pow_vmap_func(x, y):
return ops.vmap(pow_forward_func, in_axes=0, out_axes=0)(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -58,7 +58,7 @@ def test_pow_op_forward(context_mode, data_type):
np.testing.assert_allclose(out.asnumpy(), expect_out, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -81,7 +81,7 @@ def test_pow_op_backward(context_mode, data_type):
np.testing.assert_allclose(grads[1].asnumpy(), expect_out[1], rtol=1e-3)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def prelu_backward_func(x, weight):
return ops.grad(prelu_forward_func, (0, 1))(x, weight)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -57,7 +57,7 @@ def test_prelu_forward(mode):
np.testing.assert_array_almost_equal(output.asnumpy(), expect_output, decimal=4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -26,7 +26,7 @@ def prelu_grad_func(y, x, weight):
return ops.auto_generate.prelu_grad(y, x, weight)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def qr_backward_func(x, full_matrices):
return ops.grad(qr_forward_func, (0, 1))(x, full_matrices)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def range_backward_func(start, limit, delta):
return ops.grad(range_forward_func, (0, 1, 2))(start, limit, delta)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
@ -52,7 +52,7 @@ def test_range_forward_tensor_input(mode):
np.testing.assert_array_equal(output.asnumpy(), expect_output)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -75,7 +75,7 @@ def test_range_forward(mode):
np.testing.assert_array_equal(output.asnumpy(), expect_output)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def rank_backward_func(x):
return ops.grad(rank_forward_func, (0))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -50,7 +50,7 @@ def test_rank_forward(mode):
np.testing.assert_equal(output, expect_output)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def real_backward_func(x):
return ops.grad(real_forward_func, (0))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def real_div_backward_func(x, y):
return ops.grad(real_div_forward_func, (0, 1))(x, y)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -52,7 +52,7 @@ def test_real_div_forward(mode):
np.testing.assert_allclose(output.asnumpy(), expect_output)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -75,7 +75,7 @@ def test_real_div_backward(mode):
np.testing.assert_array_almost_equal(dy.asnumpy(), except_dy, decimal=4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def reciprocal_backward_func(x):
return ops.grad(reciprocal_forward_func, (0))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -50,7 +50,7 @@ def test_reciprocal_forward(mode):
np.testing.assert_array_almost_equal(output.asnumpy(), expect_output, decimal=4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -70,7 +70,7 @@ def test_reciprocal_backward(mode):
np.testing.assert_array_almost_equal(output.asnumpy(), expect_output, decimal=4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -25,7 +25,7 @@ def reciprocal_grad_func(y, dy):
return ops.auto_generate.reciprocal_grad(y, dy)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -46,7 +46,7 @@ def test_reciprocal_grad(mode):
np.testing.assert_array_almost_equal(output.asnumpy(), expect_output, decimal=4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -68,7 +68,7 @@ def test_reciprocal_grad_vmap(mode):
np.testing.assert_equal(output.asnumpy(), expect_out.asnumpy())
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def reduce_all_backward_func(x):
return ops.grad(reduce_all_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -56,7 +56,7 @@ def test_reduce_all(mode):
assert (grad.asnumpy() == expect_grad).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def reduce_any_backward_func(x):
return ops.grad(reduce_any_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -56,7 +56,7 @@ def test_reduce_any(mode):
assert (grad.asnumpy() == expect_grad).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -32,7 +32,7 @@ def reduce_max_backward_func(x):
return ops.grad(reduce_max_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -58,7 +58,7 @@ def test_reduce_max(mode):
assert (grad.asnumpy() == expect_grad).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -32,7 +32,7 @@ def reduce_mean_backward_func(x):
return ops.grad(reduce_mean_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -58,7 +58,7 @@ def test_reduce_mean(mode):
assert np.allclose(grad.asnumpy(), expect_grad, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -32,7 +32,7 @@ def reduce_min_backward_func(x):
return ops.grad(reduce_min_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -58,7 +58,7 @@ def test_reduce_min(mode):
assert (grad.asnumpy() == expect_grad).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def reduce_prod_backward_func(x):
return ops.grad(reduce_prod_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -57,7 +57,7 @@ def test_reduce_prod(mode):
assert np.allclose(grad.asnumpy(), expect_grad, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -32,7 +32,7 @@ def reduce_sum_backward_func(x):
return ops.grad(reduce_sum_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -58,7 +58,7 @@ def test_reduce_sum(mode):
assert np.allclose(grad.asnumpy(), expect_grad, rtol=1e-4, atol=1e-4)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def relu_backward_func(x):
return ops.grad(relu_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -61,7 +61,7 @@ def test_relu(mode):
assert (grad.asnumpy() == expect_grad).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -31,7 +31,7 @@ def relu6_backward_func(x):
return ops.grad(relu6_forward_func, (0,))(x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -61,7 +61,7 @@ def test_relu6(mode):
assert (grad.asnumpy() == expect_grad).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -27,7 +27,7 @@ def relu6_grad_func(dy, x):
return _relu6_grad(dy, x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -54,7 +54,7 @@ def test_relu6_grad(mode):
assert (out.asnumpy() == expect_out).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -27,7 +27,7 @@ def relu_grad_func(dy, x):
return _relu_grad(dy, x)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -54,7 +54,7 @@ def test_relu_grad(mode):
assert (out.asnumpy() == expect_out).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

View File

@ -30,7 +30,7 @@ def reshape_backward_func(x, shape):
return ops.grad(reshape_forward_func, (0,))(x, shape)
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@ -56,7 +56,7 @@ def test_reshape_op(mode):
assert (grad.asnumpy() == expect_grad).all()
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training

Some files were not shown because too many files have changed in this diff Show More