forked from mindspore-Ecosystem/mindspore
fix st case
This commit is contained in:
parent
716d598b37
commit
8f97ce2e62
|
@ -37,6 +37,7 @@ class Net(nn.Cell):
|
|||
return self.concat((out1_shape, out2_shape))
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_concat_cpu():
|
||||
x1 = Tensor(np.array([1, 2, 3, 1, 4, 2]), mstype.int32)
|
||||
|
|
|
@ -104,21 +104,3 @@ class DynShapeJointNet(nn.Cell):
|
|||
else:
|
||||
out = y
|
||||
return out
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_diff_size_join_as_dyn_rank():
|
||||
"""
|
||||
Feature: Dynamic shape join for control flow.
|
||||
Description: Two different size shapes joined as dynamic rank shape.
|
||||
Expectation: No exception.
|
||||
"""
|
||||
x = np.arange(2 * 3 * 2).reshape((2, 3, 2))
|
||||
y = np.arange(88, 2 * 3 * 2 * 2 + 88).reshape((2, 3, 2, 2))
|
||||
input_x_dyn = Tensor(shape=[2, None, 2], dtype=mstype.float32)
|
||||
input_y_dyn = Tensor(shape=[2, 3, None, None], dtype=mstype.float32)
|
||||
dyn_net = DynShapeJointNet()
|
||||
dyn_net.set_inputs(input_x_dyn, input_y_dyn)
|
||||
dyn_net(Tensor(x.astype(np.float32)), Tensor(y.astype(np.float32)))
|
||||
|
|
|
@ -40,7 +40,7 @@ class Net(Cell):
|
|||
return self.cast(x, self.dtype)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_cast_bool():
|
||||
|
@ -70,7 +70,7 @@ def test_cast_bool():
|
|||
assert output.asnumpy().shape == (3, 2)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_cast_float16():
|
||||
|
@ -132,7 +132,7 @@ def test_cast_float32():
|
|||
assert output.asnumpy().shape == (3, 2)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_cast_float64():
|
||||
|
@ -163,7 +163,7 @@ def test_cast_float64():
|
|||
assert output.asnumpy().shape == (3, 2)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_cast_int8():
|
||||
|
@ -194,7 +194,7 @@ def test_cast_int8():
|
|||
assert output.asnumpy().shape == (3, 2)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_cast_int16():
|
||||
|
@ -225,7 +225,7 @@ def test_cast_int16():
|
|||
assert output.asnumpy().shape == (3, 2)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_cast_int32():
|
||||
|
@ -256,7 +256,7 @@ def test_cast_int32():
|
|||
assert output.asnumpy().shape == (3, 2)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_cast_int64():
|
||||
|
@ -287,7 +287,7 @@ def test_cast_int64():
|
|||
assert output.asnumpy().shape == (3, 2)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_cast_uint8():
|
||||
|
@ -318,7 +318,7 @@ def test_cast_uint8():
|
|||
assert output.asnumpy().shape == (3, 2)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_cast_uint16():
|
||||
|
@ -349,7 +349,7 @@ def test_cast_uint16():
|
|||
assert output.asnumpy().shape == (3, 2)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_cast_uint32():
|
||||
|
@ -380,7 +380,7 @@ def test_cast_uint32():
|
|||
assert output.asnumpy().shape == (3, 2)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_cast_uint64():
|
||||
|
|
|
@ -154,7 +154,7 @@ def test_dynamic_getitem_bool():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -192,7 +192,7 @@ def test_dynamic_getitem_none():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -280,7 +280,7 @@ def test_dynamic_getitem_tensor_001():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -356,7 +356,7 @@ def test_dynamic_getitem_slice_001():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -394,7 +394,7 @@ def test_dynamic_getitem_int():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
|
|
@ -140,7 +140,7 @@ def test_dynamic_setitem_int_number():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -167,7 +167,7 @@ def test_dynamic_setitem_int_tensor():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -220,7 +220,7 @@ def test_dynamic_setitem_tensor_number():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -248,7 +248,7 @@ def test_dynamic_setitem_tensor_tensor():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -301,7 +301,7 @@ def test_dynamic_setitem_none_number():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -328,7 +328,7 @@ def test_dynamic_setitem_none_tensor():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -354,7 +354,7 @@ def test_dynamic_setitem_none_sequence():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -380,7 +380,7 @@ def test_dynamic_setitem_ellipsis_number():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -407,7 +407,7 @@ def test_dynamic_setitem_ellipsis_tensor():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -459,7 +459,7 @@ def test_dynamic_setitem_bool_number():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -486,7 +486,7 @@ def test_dynamic_setitem_bool_tensor():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -538,7 +538,7 @@ def test_dynamic_setitem_list_number():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
@ -565,7 +565,7 @@ def test_dynamic_setitem_list_tensor():
|
|||
fact.grad_impl()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
|
|
|
@ -145,46 +145,3 @@ def test_resize_bilinear_cpu_fp16():
|
|||
"""
|
||||
case_input_dyn(context.GRAPH_MODE, "CPU", "float16")
|
||||
case_input_dyn(context.PYNATIVE_MODE, "CPU", "float16")
|
||||
|
||||
|
||||
class NetResizeBilinearSizeDyn(nn.Cell):
|
||||
def construct(self, x, y, indices_x, indices_y, axis_x, axis_y):
|
||||
unique_x_index, _ = ops.unique(indices_x)
|
||||
x_dyn = ops.gather(x, unique_x_index, axis_x)
|
||||
unique_y_index, _ = ops.unique(indices_y)
|
||||
y_dyn = ops.gather(y, unique_y_index, axis_y)
|
||||
size_dyn = ops.TensorShape()(y_dyn)
|
||||
return ops.interpolate(x_dyn, None, None, size_dyn, "asymmetric", "bilinear")
|
||||
|
||||
|
||||
def case_input_size_dyn(mode, device_target):
|
||||
context.set_context(mode=mode, device_target=device_target)
|
||||
x_data, size, expected = get_data("float32")
|
||||
y = np.random.rand(*size).astype(np.float32)
|
||||
resize_nn = NetResizeBilinearSizeDyn()
|
||||
axis_x = 3
|
||||
indices_x = np.array([i for i in range(x_data.shape[axis_x])], dtype=np.int32)
|
||||
axis_y = 1
|
||||
indices_y = np.array([i for i in range(y.shape[axis_y])], dtype=np.int32)
|
||||
output = resize_nn(Tensor(x_data), Tensor(y), Tensor(indices_x), Tensor(indices_y), axis_x, axis_y)
|
||||
assert np.allclose(output.asnumpy(), expected, 1e-3, 1e-3)
|
||||
|
||||
|
||||
def test_resize_bilinear_size_dyn_ascend():
|
||||
"""
|
||||
Feature: Test resize_bilinear on Ascend.
|
||||
Description: The shape of input and size is dynamic.
|
||||
Expectation: Assert that results are consistent with expect.
|
||||
"""
|
||||
case_input_size_dyn(context.GRAPH_MODE, "Ascend")
|
||||
case_input_size_dyn(context.PYNATIVE_MODE, "Ascend")
|
||||
|
||||
|
||||
def test_resize_bilinear_size_dyn_gpu():
|
||||
"""
|
||||
Feature: Test resize_bilinear on GPU.
|
||||
Description: The shape of input and size is dynamic.
|
||||
Expectation: Assert that results are consistent with expect.
|
||||
"""
|
||||
case_input_size_dyn(context.GRAPH_MODE, "GPU")
|
||||
case_input_size_dyn(context.PYNATIVE_MODE, "GPU")
|
||||
|
|
Loading…
Reference in New Issue