add side effect cases

This commit is contained in:
chenfei 2022-02-18 11:37:35 +08:00
parent 66f15f1b1f
commit 1652592529
14 changed files with 50 additions and 47 deletions

View File

@ -601,6 +601,10 @@ bool AnfRuntimeAlgorithm::HasNodeAttr(const std::string &key, const CNodePtr &no
MS_LOG(WARNING) << "Only cnode has attr, but this anf is " << node->DebugString();
return false;
}
// call node's input0 is not a primitive.
if (!IsValueNode<Primitive>(node->cast<CNodePtr>()->input(0))) {
return false;
}
// single op cnode.
auto primitive = AnfAlgo::GetCNodePrimitive(node);
if (primitive != nullptr) {

View File

@ -61,6 +61,8 @@
namespace mindspore {
namespace pipeline {
namespace {
bool ExistControlFlow(const FuncGraphPtr &func_graph) { return !func_graph->func_graphs_used_total().empty(); }
void UpdateFuncGraphParameter(const FuncGraphPtr &func_graph) {
MS_EXCEPTION_IF_NULL(func_graph);
std::vector<AnfNodePtr> new_paras;
@ -101,9 +103,15 @@ void DisableMindRT(const ResourcePtr &res) {
}
auto func_graph = res->func_graph();
MS_EXCEPTION_IF_NULL(func_graph);
auto parallel_context = parallel::ParallelContext::GetInstance();
MS_EXCEPTION_IF_NULL(parallel_context);
auto parallel_mode = parallel_context->parallel_mode();
bool is_parallel_mode = parallel_mode == parallel::SEMI_AUTO_PARALLEL || parallel_mode == parallel::AUTO_PARALLEL;
bool enable_old_runtime = (common::GetEnv("MS_DEV_ENABLE_CLOSURE") == "0");
if (enable_old_runtime ||
(func_graph != nullptr && func_graph->exist_multi_target() && IsDynamicShapeGraph(func_graph))) {
bool use_old_vm_for_dynamic_shape = func_graph->exist_multi_target() && IsDynamicShapeGraph(func_graph);
bool use_old_vm_for_control_parallel =
func_graph->exist_multi_target() && ExistControlFlow(func_graph) && is_parallel_mode;
if (enable_old_runtime || use_old_vm_for_dynamic_shape || use_old_vm_for_control_parallel) {
// Heterogeneous scenario + dynamic_shape runs in MsBackend.
MS_LOG(INFO) << "Disable mindRT in the heterogeneous + dynamic shape scenario.";
context_ptr->set_param<bool>(MS_CTX_ENABLE_MINDRT, false);
@ -778,18 +786,6 @@ bool ExistTarget(const std::vector<AnfNodePtr> &all_nodes, const std::string &ta
return false;
}
bool ExistControlNode(const std::vector<AnfNodePtr> &all_nodes) {
std::vector<PrimitivePtr> control_ops = {prim::kPrimSwitch, prim::kPrimCall, prim::kPrimSwitchLayer};
for (auto &node : all_nodes) {
auto contain_control_node = std::any_of(control_ops.begin(), control_ops.end(),
[&](const PrimitivePtr &prim) { return IsPrimitiveCNode(node, prim); });
if (contain_control_node) {
return true;
}
}
return false;
}
void SetRunMode(const ResourcePtr &res) {
MS_EXCEPTION_IF_NULL(res);
auto context_ptr = MsContext::GetInstance();
@ -829,7 +825,7 @@ void SetRunMode(const ResourcePtr &res) {
// GRAPH | Closure\ENV\While scenario : KernelByKernel path in MindRT.
auto graphs = func_graph->func_graphs_used_total();
graphs.insert(func_graph);
bool exist_func = HasIncorporateCall(all_nodes);
bool exist_func = ExistControlFlow(func_graph) ? HasIncorporateCall(all_nodes) : false;
bool exist_while =
std::any_of(graphs.cbegin(), graphs.cend(), [](const FuncGraphPtr &fg) { return fg->recursive(); });
MS_LOG(INFO) << func_graph->ToString() << " exist_func: " << exist_func << " exist_while: " << exist_while;
@ -842,7 +838,7 @@ void SetRunMode(const ResourcePtr &res) {
// Multiple device targets scenario.
if (func_graph->exist_multi_target()) {
// Heterogeneous scenario + ControlFlow : KernelByKernel path in MindRT.
if (ExistControlNode(all_nodes)) {
if (ExistControlFlow(func_graph)) {
MS_LOG(INFO) << "Run graph mode with kernelbykernel.";
set_ctx(false, false, false);
return;

View File

@ -1115,7 +1115,6 @@ EvalResultPtr GetEvaluatedValueForClassAttrOrMethod(const AnalysisEnginePtr &eng
std::string item_name = item_value->cast<StringImmPtr>()->value();
MS_LOG(DEBUG) << "Resolve name: " << cls->tag().name();
MS_LOG(DEBUG) << "Resolve item: " << item_name;
MS_EXCEPTION_IF_NULL(cls);
AbstractBasePtr attr = cls->GetAttribute(item_name);
if (attr != nullptr) {
return std::make_shared<EvalResult>(attr, nullptr);

View File

@ -127,6 +127,13 @@ class IfInIfNet4(nn.Cell):
out += self.param_b
return out
def func(self, x):
x += 10
if x > self.param_a:
self.param_b += 1
x += self.param_a
return x
class GradNet(nn.Cell):
def __init__(self, net):
@ -175,7 +182,6 @@ def test_if_in_if_01():
control_flow_if_in_if(IfInIfNet1, x, expect1, expect2)
@pytest.mark.skip(reason="Ascend compile error in multigraph sink.")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -183,8 +189,8 @@ def test_if_in_if_01():
@pytest.mark.env_onecard
def test_if_in_if_02():
x = Tensor(2, mstype.int32)
expect1 = 0
expect2 = 0
expect1 = Tensor(5, mstype.int32)
expect2 = (Tensor(1, mstype.int32),)
control_flow_if_in_if(IfInIfNet2, x, expect1, expect2)
@ -200,13 +206,12 @@ def test_if_in_if_03():
control_flow_if_in_if(IfInIfNet3, x, expect1, expect2)
@pytest.mark.skip(reason="Result not correct in ascend vm")
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_04():
x = Tensor(2, mstype.int32)
expect1 = 0
expect2 = 0
expect1 = Tensor(22, mstype.int32)
expect2 = (Tensor(1, mstype.int32),)
control_flow_if_in_if(IfInIfNet4, x, expect1, expect2)

View File

@ -57,7 +57,6 @@ class BackwardNet(nn.Cell):
return grads
@pytest.mark.skip(reason="GPU backward result is error!")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard

View File

@ -174,7 +174,6 @@ def test_for_in_if_03():
assert graph_backward_res == (Tensor([3], mstype.float32),)
@pytest.mark.skip(reason="Ascend control multi sink result error")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -221,10 +220,9 @@ def test_for_in_if_04():
graph_backward_res = net(x)
assert graph_forward_res == Tensor([45], mstype.int32)
assert graph_backward_res == Tensor([9], mstype.int32)
assert graph_backward_res == (Tensor([9], mstype.int32),)
@pytest.mark.skip(reason="Ascend control multi sink result error")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -273,4 +271,4 @@ def test_for_in_if_05():
graph_backward_res = net(x)
assert graph_forward_res == Tensor([-91], mstype.int32)
assert graph_backward_res == Tensor([13], mstype.int32)
assert graph_backward_res == (Tensor([13], mstype.int32),)

View File

@ -190,7 +190,6 @@ def test_if_after_if_03():
control_flow_if_after_if(IfAfterIfNet3, x, y, expect1, expect2)
# @pytest.mark.skip(reason="Result is not correct in vm ascend.")
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -150,6 +150,7 @@ def test_if_after_if_in_if():
x = Tensor(2, mstype.int32)
expect1 = Tensor(14, mstype.int32)
expect2 = (Tensor(1, mstype.int32),)
control_flow_if_after_if_in_if(IfAfterIfInIfNet, x, expect1, expect2)
@ -174,6 +175,7 @@ def test_if_after_if_in_if_02():
expect2 = (Tensor(1, mstype.int32),)
control_flow_if_after_if_in_if(IfAfterIfInIfNet2, x, expect1, expect2)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -184,6 +186,7 @@ def test_if_after_if_in_if_02_ascend():
expect2 = (Tensor(1, mstype.int32),)
control_flow_if_after_if_in_if(IfAfterIfInIfNet2, x, expect1, expect2)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training

View File

@ -133,7 +133,6 @@ def control_flow_if_after_if_in_for(input_net, x, expect1, expect2):
assert graph_backward_res == expect2
# @pytest.mark.skip(reason="ME EvalCNode error")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training

View File

@ -68,7 +68,6 @@ def test_forward():
assert graph_out == Tensor(np.array(18), mstype.int32)
@pytest.mark.skip(reason="Ascend kernel compiler error!")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training

View File

@ -70,7 +70,6 @@ def test_forward():
assert graph_out == Tensor(np.array(36), mstype.int32)
@pytest.mark.skip(reason="Ascend kernel compiler error!")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training

View File

@ -22,8 +22,6 @@ from mindspore.common.parameter import Parameter
grad_all = C.GradOperation(get_all=True)
@pytest.mark.skip(
reason="GPU backward result error!Maybe redudunt graph cause.") # Ascend is passed because run with multi graph sink.
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@ -72,7 +70,6 @@ def test_for_after_if_in_if():
assert graph_backward_res == (Tensor(55, mstype.int32),)
@pytest.mark.skip(reason="Ascend vm backward result error!Maybe redudunt graph cause.")
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

View File

@ -22,7 +22,6 @@ from mindspore.common.parameter import Parameter
grad_all = C.GradOperation(get_all=True)
@pytest.mark.skip(reason="not supported side effect")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training

View File

@ -244,8 +244,7 @@ def test_if_after_for_in_if_break():
assert graph_backward_res == (Tensor(1, mstype.int32),)
@pytest.mark.skip(reason="ME EvalCNode error.")
@pytest.mark.level0
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -285,10 +284,8 @@ def test_if_after_for_in_for_break():
net = Grad(if_after_for_in_for_net)
graph_backward_res = net(x)
print("test_if_after_for_in_for_break graph_forward_res:", graph_forward_res)
print("test_if_after_for_in_for_break graph_backward_res:", graph_backward_res)
# assert graph_forward_res == Tensor(12285, mstype.int32)
# assert graph_backward_res == (Tensor(1025, mstype.int32),)
assert graph_forward_res == Tensor(106, mstype.int32)
assert graph_backward_res == (Tensor(10, mstype.int32),)
class WhileAfterWhileInWhileBreakForwardNet(nn.Cell):
@ -398,7 +395,11 @@ class ForInFor2BreakForwardNet(nn.Cell):
return out
@pytest.mark.skip(reason="Get wrong parent graph")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_for_in_for_break():
x = Tensor(np.array(7), mstype.float32)
y = Tensor(np.array(20), mstype.float32)
@ -408,8 +409,11 @@ def test_for_in_for_break():
print("test_for_in_for_break graph out:", graph_out)
# raise a endless loop exception.
@pytest.mark.skip(reason="Infer raise a endless loop exception")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_while_true_break():
class WhileTrueBreakNet(nn.Cell):
def __init__(self, t):
@ -438,8 +442,11 @@ def test_while_true_break():
print(grad_out)
# stuck in vm backend
@pytest.mark.skip(reason="Stuck in vm backend")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_continue_stuck_in_vm():
class NetWork(nn.Cell):
def __init__(self, t):