fix the core dump of MsRunGraph related to auto-monad; modify testcases
related to auto-monad, do not judge or assert the generated IR file on CI test cases.
This commit is contained in:
parent
d58bd65da7
commit
1086132dc3
|
@ -135,6 +135,9 @@ void PushInputTensor(const BaseRef &arg, std::vector<tensor::TensorPtr> *inputs)
|
|||
} else if (value->isa<Scalar>()) {
|
||||
tensor::TensorPtr scalar_tensor = ScalarToTensor(value->cast<ScalarPtr>());
|
||||
inputs->push_back(scalar_tensor);
|
||||
} else if (value->isa<Monad>()) {
|
||||
// If value is a monad, replace it with an unused tensor.
|
||||
inputs->push_back(std::make_shared<tensor::Tensor>(int64_t(0), kBool));
|
||||
} else {
|
||||
inputs->push_back(value->cast<tensor::TensorPtr>());
|
||||
}
|
||||
|
|
|
@ -48,7 +48,6 @@ def _with_save_graphs():
|
|||
clean_all_ir_files('./')
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Ignore print detection")
|
||||
def test_print():
|
||||
class Print(Cell):
|
||||
def __init__(self):
|
||||
|
@ -72,7 +71,6 @@ def test_print():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Ignore print detection")
|
||||
def test_print_add():
|
||||
class Print_Add(Cell):
|
||||
def __init__(self):
|
||||
|
@ -100,7 +98,6 @@ def test_print_add():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Ignore print detection")
|
||||
def test_print_assign():
|
||||
class Print_Assign(Cell):
|
||||
def __init__(self):
|
||||
|
@ -128,7 +125,6 @@ def test_print_assign():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Ignore print detection")
|
||||
def test_print_assign_add():
|
||||
class Print_Assign_Add(Cell):
|
||||
def __init__(self):
|
||||
|
@ -159,7 +155,6 @@ def test_print_assign_add():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Ignore print detection")
|
||||
def test_print_while():
|
||||
class Print_While(Cell):
|
||||
def __init__(self):
|
||||
|
@ -194,7 +189,6 @@ def test_print_while():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Ignore print detection")
|
||||
def test_print_if():
|
||||
class Print_If(Cell):
|
||||
def __init__(self):
|
||||
|
@ -225,7 +219,6 @@ def test_print_if():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Ignore print detection")
|
||||
def test_print_assign_while():
|
||||
class Print_Assign_While(Cell):
|
||||
def __init__(self):
|
||||
|
@ -269,7 +262,6 @@ def test_print_assign_while():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Ignore print detection")
|
||||
def test_print_assign_if():
|
||||
class Print_Assign_If(Cell):
|
||||
def __init__(self):
|
||||
|
@ -525,7 +517,6 @@ def test_for():
|
|||
np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy())
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Ignore print detection")
|
||||
def test_print_for():
|
||||
class Print_For(Cell):
|
||||
def __init__(self):
|
||||
|
@ -562,7 +553,6 @@ def test_print_for():
|
|||
check_output(cap.output, patterns)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Ignore print detection")
|
||||
def test_print_assign_for():
|
||||
class Print_Assign_For(Cell):
|
||||
def __init__(self):
|
||||
|
@ -749,7 +739,6 @@ def test_multi_assign_addn():
|
|||
np.testing.assert_almost_equal(out.asnumpy(), expect.asnumpy())
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Ignore print detection")
|
||||
def test_multi_assign_print():
|
||||
class Multi_Assign_Print(Cell):
|
||||
def __init__(self):
|
||||
|
@ -1285,10 +1274,6 @@ def use_build_train_network_check_cast_num(network, level, inputs, label, cast_n
|
|||
return out_me
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_auto_mixed_precision_train_prelunet(with_save_graphs):
|
||||
net2 = NetRrelu(3, 12)
|
||||
input32 = Tensor(np.ones([1, 3, 2, 2]).astype(np.float32))
|
||||
|
@ -1313,22 +1298,14 @@ class AssignNet(Cell):
|
|||
return x
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_auto_mixed_precision_train_021(pynative_save_graphs):
|
||||
def test_auto_mixed_precision_train_1(pynative_save_graphs):
|
||||
net = AssignNet()
|
||||
input32 = Tensor(np.ones([1, 3, 2, 2]).astype(np.float32))
|
||||
label32 = Tensor(np.zeros([1, 3]).astype(np.float32))
|
||||
use_build_train_network_check_cast_num(net, "O0", input32, label32, 0)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_auto_mixed_precision_train_022(pynative_save_graphs):
|
||||
def test_auto_mixed_precision_train_2(pynative_save_graphs):
|
||||
net = AssignNet()
|
||||
input32 = Tensor(np.ones([1, 3, 2, 2]).astype(np.float32))
|
||||
label32 = Tensor(np.zeros([1, 3]).astype(np.float32))
|
||||
|
@ -1406,11 +1383,7 @@ def use_build_train_network_controlflow_check_cast_num(network, level, input_x,
|
|||
return out_me
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_auto_mixed_precision_controlflow_auto_1(pynative_save_graphs):
|
||||
def test_auto_mixed_precision_controlflow_auto(pynative_save_graphs):
|
||||
net = MixControlNet(3, 5)
|
||||
input_x = Tensor(
|
||||
np.random.randint(2, size=(1, 3, 2, 2)).astype((np.float32)))
|
||||
|
|
|
@ -129,10 +129,6 @@ class SideEffectCastAll(Cell):
|
|||
return out_a, out_b
|
||||
|
||||
|
||||
# @pytest.mark.level0
|
||||
# @pytest.mark.platform_x86_gpu_training
|
||||
# @pytest.mark.env_onecard
|
||||
@pytest.mark.skip(reason="not stable")
|
||||
def test_side_effect_castall():
|
||||
clear_files()
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
|
||||
|
@ -230,8 +226,9 @@ class SideEffectTwoAssignTwoAddnDependencyNet(Cell):
|
|||
return grad_out
|
||||
|
||||
|
||||
# an infinite loop exists.
|
||||
@pytest.mark.skip(reason="not supported yet")
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_ctrl_while_by_while_and_if_in_first_while():
|
||||
class Net(Cell):
|
||||
def __init__(self):
|
||||
|
@ -265,8 +262,9 @@ def test_ctrl_while_by_while_and_if_in_first_while():
|
|||
net(input_me_a)
|
||||
|
||||
|
||||
# an infinite loop exists.
|
||||
@pytest.mark.skip(reason="not supported yet")
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_ctrl_while_by_while_and_while_in_first_while():
|
||||
class Net(Cell):
|
||||
def __init__(self):
|
||||
|
@ -336,10 +334,6 @@ class InplaceNet(Cell):
|
|||
return output
|
||||
|
||||
|
||||
# @pytest.mark.level0
|
||||
# @pytest.mark.platform_x86_gpu_training
|
||||
# @pytest.mark.env_onecard
|
||||
@pytest.mark.skip(reason="not stable")
|
||||
def test_ir_fusion_inplace_bn_conv_conv():
|
||||
clear_files()
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
|
||||
|
@ -467,11 +461,7 @@ def use_build_train_network_controlflow_check_cast_num(network, level, input_x,
|
|||
return out_me
|
||||
|
||||
|
||||
# @pytest.mark.level0
|
||||
# @pytest.mark.platform_x86_gpu_training
|
||||
# @pytest.mark.env_onecard
|
||||
@pytest.mark.skip(reason="not stable")
|
||||
def test_auto_mixed_precision_controlflow_auto_1():
|
||||
def test_auto_mixed_precision_controlflow_auto():
|
||||
context.set_context(mode=context.PYNATIVE_MODE, save_graphs=True)
|
||||
net = MixControlNet(3, 5)
|
||||
input_x = Tensor(
|
||||
|
@ -485,10 +475,6 @@ def test_auto_mixed_precision_controlflow_auto_1():
|
|||
label, cast_num)
|
||||
|
||||
|
||||
# @pytest.mark.level0
|
||||
# @pytest.mark.platform_x86_gpu_training
|
||||
# @pytest.mark.env_onecard
|
||||
@pytest.mark.skip(reason="not stable")
|
||||
def test_updatestate_between_assigns():
|
||||
class UpdateState_Assigns(Cell):
|
||||
def __init__(self):
|
||||
|
@ -514,10 +500,6 @@ def test_updatestate_between_assigns():
|
|||
assert len(updatestate_num) == 1
|
||||
|
||||
|
||||
# @pytest.mark.level0
|
||||
# @pytest.mark.platform_x86_gpu_training
|
||||
# @pytest.mark.env_onecard
|
||||
@pytest.mark.skip(reason="not stable")
|
||||
def test_updatestate_between_maketuple_assign():
|
||||
class UpdateState_MakeTuple_Assign(Cell):
|
||||
def __init__(self):
|
||||
|
@ -545,10 +527,6 @@ def test_updatestate_between_maketuple_assign():
|
|||
assert len(updatestate_num) == 1
|
||||
|
||||
|
||||
# @pytest.mark.level0
|
||||
# @pytest.mark.platform_x86_gpu_training
|
||||
# @pytest.mark.env_onecard
|
||||
@pytest.mark.skip(reason="not stable")
|
||||
def test_updatestate_between_assign_maketuple():
|
||||
class UpdateState_Assign_MakeTuple(Cell):
|
||||
def __init__(self):
|
||||
|
|
Loading…
Reference in New Issue