diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/special_op_eliminate.h b/mindspore/ccsrc/frontend/optimizer/irpass/special_op_eliminate.h index 976948864b2..13bde1e30a1 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/special_op_eliminate.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/special_op_eliminate.h @@ -255,7 +255,7 @@ class PynativeEliminater : public OptimizerCaller { MS_LOG(DEBUG) << "Start FillZero"; ValuePtr out = nullptr; if (value->isa()) { - return MakeValue(0); + return value; } if (value->isa()) { diff --git a/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc b/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc index 93dd17c8db1..a998993574f 100644 --- a/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc +++ b/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc @@ -70,7 +70,6 @@ const std::set vm_operators = {"make_ref", "HookBackward", "InsertG namespace mindspore { namespace pynative { - static std::shared_ptr session = nullptr; PynativeExecutorPtr PynativeExecutor::executor_ = nullptr; std::mutex PynativeExecutor::instance_lock_; @@ -1213,7 +1212,6 @@ AnfNodePtr PynativeExecutor::GetInput(const py::object &obj, bool op_mask) { } return graph_info_map_[df_builder_].param_map[obj_id].first; } - // if input is graph output if (graph_info_map_[curr_g_].param_map.count(obj_id) != 0) { // op(x, y) @@ -1227,20 +1225,16 @@ AnfNodePtr PynativeExecutor::GetInput(const py::object &obj, bool op_mask) { // out = op((x, y)) // out = cell((x, y)) auto tuple = obj.cast(); - // cell((1,2)): support not mix (scalar, tensor) if (!tuple.empty() && !py::isinstance(tuple[0])) { return MakeValueNode(obj, obj_id); } - std::vector args; args.push_back(NewValueNode(prim::kPrimMakeTuple)); - auto tuple_size = static_cast(tuple.size()); for (int i = 0; i < tuple_size; i++) { args.push_back(GetInput(tuple[i], false)); } - auto cnode = curr_g_->NewCNode(args); set_obj_node_map(curr_g_, GetId(obj), cnode); node = cnode;