From a137fa1d0b655a09f45c8f7a890452ad1db3c630 Mon Sep 17 00:00:00 2001 From: Zhang Qinghua Date: Fri, 27 Aug 2021 10:33:35 +0800 Subject: [PATCH] Optimize the Executors routines. - Fix the key generating. - Distinguish the executors. --- mindspore/_extends/parse/parser.py | 6 +- mindspore/ccsrc/debug/debugger/debugger.cc | 8 +- mindspore/ccsrc/frontend/optimizer/py_pass.cc | 2 +- .../parallel/graph_util/graph_info.cc | 2 +- .../ccsrc/frontend/parallel/step_parallel.cc | 2 +- mindspore/ccsrc/pipeline/jit/action.cc | 8 +- mindspore/ccsrc/pipeline/jit/init.cc | 45 ++-- mindspore/ccsrc/pipeline/jit/pipeline.cc | 249 +++++++++--------- mindspore/ccsrc/pipeline/jit/pipeline.h | 30 +-- mindspore/ccsrc/pipeline/jit/pipeline_ge.cc | 2 +- mindspore/ccsrc/pipeline/jit/resource.cc | 4 +- mindspore/ccsrc/pipeline/jit/resource.h | 7 +- .../pipeline/pynative/pynative_execute.cc | 2 +- mindspore/common/api.py | 125 +++++---- mindspore/compression/export/quant_export.py | 2 +- mindspore/nn/cell.py | 35 +-- mindspore/ops/composite/base.py | 20 +- mindspore/train/_utils.py | 16 +- mindspore/train/model.py | 4 +- mindspore/train/serialization.py | 2 +- .../research/3d/DeepLM/lm_solver/solver.py | 4 +- .../utils/block_util.py | 4 +- tests/ops_common.py | 10 +- tests/perf_test/test_lenet.py | 6 +- tests/perf_test/test_resnet_infer.py | 4 +- tests/perf_test/test_resnet_train.py | 4 +- .../data_transfer/test_tdt_data_transfer.py | 4 +- tests/st/gnn/test_gat_model.py | 4 +- tests/st/gnn/test_gnn_aggregator.py | 10 +- tests/st/ops/ascend/test_tdt_data_ms.py | 6 +- .../gtest_input/pipeline/parse/parse_class.py | 4 +- .../pipeline/parse/parser_integrate.py | 4 +- tests/ut/cpp/utils/callback_test.cc | 4 +- tests/ut/python/communication/test_comm.py | 14 +- .../communication/test_data_parallel_dense.py | 4 +- tests/ut/python/exec/test_eval.py | 4 +- tests/ut/python/ir/test_tensor.py | 2 +- tests/ut/python/ir/test_tensor_py.py | 8 +- tests/ut/python/model/res18_example.py | 8 +- tests/ut/python/model/test_lenet.py | 8 +- .../model/test_lenet_core_after_exception.py | 4 +- tests/ut/python/model/test_mix_precision.py | 6 +- tests/ut/python/nn/optim/test_ada_grad.py | 4 +- tests/ut/python/nn/optim/test_adafactor.py | 26 +- tests/ut/python/nn/optim/test_adam.py | 18 +- tests/ut/python/nn/optim/test_ftrl.py | 8 +- tests/ut/python/nn/optim/test_lamb.py | 8 +- tests/ut/python/nn/optim/test_lars.py | 6 +- tests/ut/python/nn/optim/test_lazyadam.py | 8 +- tests/ut/python/nn/optim/test_momentum.py | 4 +- .../python/nn/optim/test_proximal_ada_grad.py | 8 +- tests/ut/python/nn/optim/test_rmsprop.py | 4 +- .../ut/python/nn/probability/dpn/test_vae.py | 4 +- tests/ut/python/nn/test_activation.py | 8 +- tests/ut/python/nn/test_batchnorm.py | 6 +- tests/ut/python/nn/test_cell.py | 4 +- tests/ut/python/nn/test_cell_wrapper.py | 12 +- tests/ut/python/nn/test_central_crop.py | 8 +- tests/ut/python/nn/test_dense.py | 18 +- tests/ut/python/nn/test_flatten.py | 4 +- tests/ut/python/nn/test_image_gradients.py | 8 +- .../python/nn/test_learning_rate_schedule.py | 16 +- tests/ut/python/nn/test_msssim.py | 16 +- tests/ut/python/nn/test_nn_embedding.py | 10 +- tests/ut/python/nn/test_norm.py | 4 +- tests/ut/python/nn/test_pooling.py | 8 +- tests/ut/python/nn/test_psnr.py | 16 +- tests/ut/python/nn/test_ssim.py | 14 +- tests/ut/python/nn/test_transformer.py | 60 ++--- tests/ut/python/ops/test_ops_check.py | 6 +- .../test_optimizer_with_parameter_groups.py | 10 +- tests/ut/python/optimizer/test_python_pass.py | 13 +- tests/ut/python/parallel/test_adafactor.py | 4 +- .../parallel/test_add_relu_redistribution.py | 4 +- .../python/parallel/test_allreduce_fusion.py | 4 +- tests/ut/python/parallel/test_alltoall.py | 22 +- tests/ut/python/parallel/test_arithmetic.py | 10 +- .../parallel/test_auto_parallel_BN_PReLU.py | 4 +- .../parallel/test_auto_parallel_activation.py | 4 +- .../parallel/test_auto_parallel_arithmetic.py | 12 +- ...t_auto_parallel_assign_sub_with_ref_key.py | 6 +- .../parallel/test_auto_parallel_cast.py | 6 +- .../test_auto_parallel_common_parameter.py | 4 +- .../test_auto_parallel_double_sources.py | 6 +- .../test_auto_parallel_double_star.py | 4 +- .../test_auto_parallel_double_subgraphs.py | 8 +- .../parallel/test_auto_parallel_fc_nobias.py | 4 +- .../parallel/test_auto_parallel_for_loop.py | 6 +- ...t_auto_parallel_for_loop_multi_subgraph.py | 6 +- .../test_auto_parallel_for_loop_reshape.py | 6 +- .../test_auto_parallel_four_matmul.py | 4 +- .../test_auto_parallel_l2normalize.py | 4 +- .../test_auto_parallel_matmul_drop.py | 4 +- .../test_auto_parallel_matmul_prelu.py | 6 +- .../test_auto_parallel_multi_graph.py | 4 +- .../parallel/test_auto_parallel_onehot.py | 4 +- .../test_auto_parallel_parameter_cast.py | 6 +- .../test_auto_parallel_partial_strategy.py | 4 +- .../test_auto_parallel_reduce_method.py | 4 +- .../parallel/test_auto_parallel_reshape.py | 24 +- .../parallel/test_auto_parallel_resnet.py | 14 +- ...to_parallel_resnet_sharding_propagation.py | 4 +- ...o_parallel_resnet_sharding_propagation2.py | 4 +- .../parallel/test_auto_parallel_rhombus.py | 4 +- .../test_auto_parallel_segment_min.py | 4 +- .../test_auto_parallel_segment_sum.py | 4 +- .../test_auto_parallel_shard_propagation.py | 4 +- .../test_auto_parallel_shard_propagation2.py | 4 +- .../test_auto_parallel_shard_propagation3.py | 4 +- .../test_auto_parallel_softmax_loss.py | 4 +- ...est_auto_parallel_star_partial_strategy.py | 10 +- .../test_auto_parallel_transformer.py | 4 +- .../parallel/test_auto_parallel_transpose.py | 6 +- .../test_auto_parallel_triangle_overwrite.py | 4 +- .../test_auto_parallel_tuple_depend.py | 4 +- .../parallel/test_auto_parallel_two_bn.py | 6 +- .../parallel/test_auto_parallel_two_matmul.py | 6 +- .../test_auto_parallel_two_partial_matmul.py | 4 +- .../parallel/test_auto_parallel_zig_zag.py | 4 +- .../parallel/test_auto_star_elimination.py | 4 +- tests/ut/python/parallel/test_batch_matmul.py | 4 +- .../ut/python/parallel/test_batch_parallel.py | 4 +- .../parallel/test_batch_parallel_dropout.py | 4 +- .../parallel/test_batch_parallel_tensoradd.py | 4 +- tests/ut/python/parallel/test_batchmm.py | 4 +- tests/ut/python/parallel/test_batchnorm.py | 6 +- tests/ut/python/parallel/test_broadcast_to.py | 6 +- .../parallel/test_comm_not_recompute.py | 4 +- .../parallel/test_comparison_function_info.py | 4 +- tests/ut/python/parallel/test_concat.py | 4 +- tests/ut/python/parallel/test_conv2d.py | 4 +- .../python/parallel/test_conv2d_transpose.py | 4 +- tests/ut/python/parallel/test_dataset.py | 4 +- tests/ut/python/parallel/test_dense_matmul.py | 4 +- .../test_different_type_for_div_op.py | 4 +- tests/ut/python/parallel/test_dropout.py | 4 +- .../python/parallel/test_dropout_do_mask.py | 4 +- .../ut/python/parallel/test_dynamic_shape.py | 6 +- .../parallel/test_element_wise_function.py | 4 +- .../python/parallel/test_embeddinglookup.py | 14 +- tests/ut/python/parallel/test_eval.py | 10 +- tests/ut/python/parallel/test_expand_dims.py | 4 +- .../ut/python/parallel/test_forward_graph.py | 4 +- tests/ut/python/parallel/test_gather_v2.py | 30 +-- tests/ut/python/parallel/test_gatherd.py | 4 +- tests/ut/python/parallel/test_get_next.py | 6 +- .../parallel/test_get_parameter_layout.py | 2 +- tests/ut/python/parallel/test_gpu_dropout.py | 10 +- .../test_hybird_parallel_activation.py | 4 +- .../parallel/test_initializer_weight_slice.py | 10 +- tests/ut/python/parallel/test_l2normalize.py | 4 +- tests/ut/python/parallel/test_layer_norm.py | 4 +- .../parallel/test_layer_norm_further.py | 4 +- tests/ut/python/parallel/test_linear.py | 4 +- .../python/parallel/test_loop_two_matmul.py | 4 +- .../parallel/test_loss_and_optimizer.py | 4 +- .../parallel/test_manual_embedding_lookup.py | 4 +- .../python/parallel/test_manual_gatherv2.py | 4 +- .../ut/python/parallel/test_matmul_dropout.py | 4 +- .../ut/python/parallel/test_matmul_tensor.py | 4 +- .../python/parallel/test_maxpool_avgpool.py | 4 +- .../test_mix_precision_hybrid_parallel.py | 4 +- tests/ut/python/parallel/test_mul_div_bn.py | 4 +- .../parallel/test_multi_field_embedding.py | 4 +- tests/ut/python/parallel/test_neg.py | 4 +- .../python/parallel/test_neighborexchange.py | 24 +- tests/ut/python/parallel/test_one_dev.py | 4 +- tests/ut/python/parallel/test_one_hot_net.py | 6 +- .../parallel/test_one_weight_parameter.py | 4 +- tests/ut/python/parallel/test_onehot.py | 4 +- tests/ut/python/parallel/test_onehot_2dim.py | 4 +- tests/ut/python/parallel/test_optimizer.py | 4 +- .../parallel/test_optimizer_clone_weight.py | 4 +- tests/ut/python/parallel/test_pack.py | 10 +- .../parallel/test_parallel_optimizer.py | 10 +- .../parallel/test_parameter_multi_users.py | 4 +- tests/ut/python/parallel/test_prelu.py | 6 +- tests/ut/python/parallel/test_print.py | 6 +- .../parallel/test_reduce_method_info.py | 6 +- tests/ut/python/parallel/test_reluv2.py | 4 +- .../ut/python/parallel/test_repeated_calc.py | 4 +- tests/ut/python/parallel/test_reshape.py | 4 +- .../python/parallel/test_reshape_optimized.py | 4 +- .../python/parallel/test_reshape_parameter.py | 4 +- .../test_reshape_skip_redistribution.py | 4 +- .../python/parallel/test_reshape_unexpand.py | 20 +- tests/ut/python/parallel/test_scalar_loss.py | 4 +- .../ut/python/parallel/test_self_attention.py | 4 +- .../parallel/test_semi_auto_two_subgraphs.py | 4 +- .../test_shared_param_and_mix_precision.py | 4 +- .../test_sigmoid_cross_entropy_with_logits.py | 4 +- tests/ut/python/parallel/test_slice.py | 4 +- .../test_softmax_cross_entropy_loss.py | 4 +- .../parallel/test_sparse_feature_bprop.py | 8 +- .../python/parallel/test_sparse_gather_v2.py | 28 +- tests/ut/python/parallel/test_split.py | 6 +- .../python/parallel/test_split_grad_sens.py | 8 +- tests/ut/python/parallel/test_square.py | 4 +- tests/ut/python/parallel/test_squeeze_info.py | 4 +- .../ut/python/parallel/test_step_parallel.py | 4 +- .../parallel/test_strategy_checkpoint.py | 10 +- tests/ut/python/parallel/test_stridedslice.py | 4 +- tests/ut/python/parallel/test_sum_as_loss.py | 4 +- tests/ut/python/parallel/test_tile.py | 4 +- .../ut/python/parallel/test_train_and_eval.py | 6 +- tests/ut/python/parallel/test_two_matmul.py | 4 +- .../parallel/test_two_weights_parameter.py | 4 +- .../test_uniform_candidate_sampler.py | 4 +- .../parallel/test_unsortedsegmentmax.py | 4 +- .../parallel/test_unsortedsegmentmin.py | 4 +- .../parallel/test_unsortedsegmentsum.py | 4 +- .../parallel/test_virtual_dataset_3_input.py | 4 +- .../test_virtual_dataset_with_strategy.py | 4 +- .../ut/python/parallel/test_virtual_output.py | 10 +- .../ut/python/pipeline/parse/test_fix_bug.py | 6 +- tests/ut/python/pipeline/parse/test_list.py | 6 +- tests/ut/python/pipeline/parse/test_parse.py | 4 +- 217 files changed, 938 insertions(+), 940 deletions(-) diff --git a/mindspore/_extends/parse/parser.py b/mindspore/_extends/parse/parser.py index e3b0afee226..7f7ca3ea297 100644 --- a/mindspore/_extends/parse/parser.py +++ b/mindspore/_extends/parse/parser.py @@ -30,7 +30,7 @@ from mindspore import context from mindspore import log as logger from mindspore import nn from mindspore import ops -from mindspore.common.api import _MindSporeFunction +from mindspore.common.api import _MindsporeFunctionExecutor from mindspore.common.dtype import pytype_to_dtype from .namespace import CellNamespace, ClosureNamespace, ClassMemberNamespace from .resources import parse_object_map, convert_object_map, trope_ns, SYMBOL_UNDEFINE, NO_IMPLEMENT @@ -177,8 +177,8 @@ def resolve_symbol(namespace, symbol): logger.debug("resolve exception occurred, value = %r", e) logger.debug("resolve type is invalid, namespace = %s, symbol = %s", namespace.__str__(), symbol) - if isinstance(resolve_, _MindSporeFunction): - logger.debug("resolve class _MindSporeFunction, resolve fn instead.") + if isinstance(resolve_, _MindsporeFunctionExecutor): + logger.debug("resolve class _MindsporeFunctionExecutor, resolve fn instead.") resolve_ = resolve_.fn return resolve_ diff --git a/mindspore/ccsrc/debug/debugger/debugger.cc b/mindspore/ccsrc/debug/debugger/debugger.cc index 25a921f15b0..138238718e4 100644 --- a/mindspore/ccsrc/debug/debugger/debugger.cc +++ b/mindspore/ccsrc/debug/debugger/debugger.cc @@ -342,7 +342,7 @@ void Debugger::PreExecute(const KernelGraphPtr &graph_ptr) { // Multiple graph, and not the initial step, // stop only when receive the first sub run graph for each step // if we have stopped for the last kernel before, no need to stop again - if (pipeline::ExecutorPy::GetDebugTerminate()) { + if (pipeline::GraphExecutorPy::GetDebugTerminate()) { return; } if (!(run_level_ == "node" && suspended_at_last_kernel_)) { @@ -449,7 +449,7 @@ void Debugger::PostExecuteGraphDebugger() { void Debugger::PostExecute() { // access lock for public method std::lock_guard a_lock(access_lock_); - if (pipeline::ExecutorPy::GetDebugTerminate()) { + if (pipeline::GraphExecutorPy::GetDebugTerminate()) { return; } if (debugger_->DebuggerBackendEnabled()) { @@ -486,7 +486,7 @@ bool Debugger::ReadNodeDataRequired(const CNodePtr &kernel) const { void Debugger::PostExecuteNode(const CNodePtr &kernel, bool last_kernel) { // access lock for public method std::lock_guard a_lock(access_lock_); - if (pipeline::ExecutorPy::GetDebugTerminate()) { + if (pipeline::GraphExecutorPy::GetDebugTerminate()) { return; } if (debugger_enabled_ && !is_dataset_graph_) { @@ -1074,7 +1074,7 @@ void Debugger::Exit() { // debugger will notify main thread to exit because main thread can only exit at step boundary. MS_LOG(INFO) << "Exit Debugger"; SetEnableHeartbeat(false); - pipeline::ExecutorPy::DebugTerminate(true); + pipeline::GraphExecutorPy::DebugTerminate(true); } std::list Debugger::CheckWatchpoints(const std::string &watchnode, const CNodePtr &kernel, diff --git a/mindspore/ccsrc/frontend/optimizer/py_pass.cc b/mindspore/ccsrc/frontend/optimizer/py_pass.cc index e0037601a12..7904df5c039 100644 --- a/mindspore/ccsrc/frontend/optimizer/py_pass.cc +++ b/mindspore/ccsrc/frontend/optimizer/py_pass.cc @@ -201,7 +201,7 @@ void ReflectParamBackToPython(const AnfNodePtr ¶m, const string ¶m_name, // 1. Get current cell object auto ppm = opt::python_pass::PyPassManager::GetInstance(); auto resource = ppm->GetResource(); - py::object top_cell = resource->input(); + py::object top_cell = resource->source_input(); if (py::isinstance(top_cell)) { MS_LOG(EXCEPTION) << "Failed to get top cell from resource."; } diff --git a/mindspore/ccsrc/frontend/parallel/graph_util/graph_info.cc b/mindspore/ccsrc/frontend/parallel/graph_util/graph_info.cc index 86cab87673f..6d8e10f710e 100644 --- a/mindspore/ccsrc/frontend/parallel/graph_util/graph_info.cc +++ b/mindspore/ccsrc/frontend/parallel/graph_util/graph_info.cc @@ -70,7 +70,7 @@ bool GetLoopIndexFromCNode(const CNodePtr &cnode, size_t *loop_index) { } void SetOpsNumToExecutor(size_t num_ops) { - auto executor = pipeline::ExecutorPy::GetInstance(); + auto executor = pipeline::GraphExecutorPy::GetInstance(); executor->SetNumOpsInfo(num_ops); } } // namespace parallel diff --git a/mindspore/ccsrc/frontend/parallel/step_parallel.cc b/mindspore/ccsrc/frontend/parallel/step_parallel.cc index a20615d4384..5d965424f0f 100644 --- a/mindspore/ccsrc/frontend/parallel/step_parallel.cc +++ b/mindspore/ccsrc/frontend/parallel/step_parallel.cc @@ -2952,7 +2952,7 @@ static AnfNodePtr FindGrad(const CNodePtr &cnode, size_t curr_depth) { void HandleRootReshapeAndSaveStrategy(const std::vector &all_nodes) { // If root graph has reshape op. Find the corresponding parameter. // Reshape's shape is the shape of the parameter. - auto executor = pipeline::ExecutorPy::GetInstance(); + auto executor = pipeline::GraphExecutorPy::GetInstance(); for (auto &node : all_nodes) { if (!node->isa()) { continue; diff --git a/mindspore/ccsrc/pipeline/jit/action.cc b/mindspore/ccsrc/pipeline/jit/action.cc index 3224e11cb76..f3f753e1695 100644 --- a/mindspore/ccsrc/pipeline/jit/action.cc +++ b/mindspore/ccsrc/pipeline/jit/action.cc @@ -319,11 +319,11 @@ void CheckRootInputShapeAndType(const ResourcePtr &res, const FuncGraphPtr &load bool ParseAction(const ResourcePtr &res) { MS_EXCEPTION_IF_NULL(res); - if (!res->input()) { + if (!res->source_input()) { MS_LOG(EXCEPTION) << "Parse error"; } - py::object input = res->input(); + py::object input = res->source_input(); parse::Parser::InitParserEnvironment(input); py::module path = py::module::import("os.path"); std::string dir = path.attr("dirname")(py::globals()["__file__"]).cast(); @@ -642,7 +642,7 @@ bool EliminateForwardCNode(const ResourcePtr &res) { return true; } - auto graph_executor = pipeline::ExecutorPy::GetInstance(); + auto graph_executor = pipeline::GraphExecutorPy::GetInstance(); MS_EXCEPTION_IF_NULL(graph_executor); auto phase = graph_executor->phase(); MS_LOG(DEBUG) << "The phase of current pipeline graph is: " << phase; @@ -913,7 +913,7 @@ bool ValidateAction(const ResourcePtr &res) { return ValidatePass(res); } bool SetMindIRGraphAction(const ResourcePtr &res) { MS_EXCEPTION_IF_NULL(res); res->set_is_load(true); - auto cell = py::cast(res->input()); + auto cell = py::cast(res->source_input()); if (cell == nullptr) { MS_LOG(EXCEPTION) << "The graph loaded from mindir is null."; } diff --git a/mindspore/ccsrc/pipeline/jit/init.cc b/mindspore/ccsrc/pipeline/jit/init.cc index 5f8af3bfefe..3c12d2bddb2 100644 --- a/mindspore/ccsrc/pipeline/jit/init.cc +++ b/mindspore/ccsrc/pipeline/jit/init.cc @@ -44,7 +44,7 @@ namespace py = pybind11; using EnvInstance = mindspore::EnvInstance; -using ExecutorPy = mindspore::pipeline::ExecutorPy; +using GraphExecutorPy = mindspore::pipeline::GraphExecutorPy; using Pipeline = mindspore::pipeline::Pipeline; using PrimitivePy = mindspore::PrimitivePy; using MetaFuncGraph = mindspore::MetaFuncGraph; @@ -70,40 +70,41 @@ PYBIND11_MODULE(_c_expression, m) { mindspore::ScopedLongRunning::SetHook(std::make_unique()); // Class Pipeline interface - (void)py::class_>(m, "Executor_") - .def_static("get_instance", &ExecutorPy::GetInstance, "Executor get_instance.") - .def("__call__", &ExecutorPy::Run, py::arg("args"), py::arg("phase") = py::str(""), "Executor run function.") - .def("del_net_res", &ExecutorPy::DelNetRes, py::arg("network_id") = py::str(""), "Delete network resource.") - .def("get_func_graph", &ExecutorPy::GetFuncGraph, py::arg("phase") = py::str(""), "Get graph pointer.") - .def("get_func_graph_proto", &ExecutorPy::GetFuncGraphProto, py::arg("phase") = py::str(""), + (void)py::class_>(m, "GraphExecutor_") + .def_static("get_instance", &GraphExecutorPy::GetInstance, "Executor get_instance.") + .def("__call__", &GraphExecutorPy::Run, py::arg("args"), py::arg("phase") = py::str(""), "Executor run function.") + .def("del_net_res", &GraphExecutorPy::DelNetRes, py::arg("network_id") = py::str(""), "Delete network resource.") + .def("get_func_graph", &GraphExecutorPy::GetFuncGraph, py::arg("phase") = py::str(""), "Get graph pointer.") + .def("get_func_graph_proto", &GraphExecutorPy::GetFuncGraphProto, py::arg("phase") = py::str(""), py::arg("type") = py::str("onnx_ir"), "Get graph proto string by specifying ir type.") - .def("compile", &ExecutorPy::Compile, py::arg("obj"), py::arg("args"), py::arg("phase") = py::str(""), + .def("compile", &GraphExecutorPy::Compile, py::arg("obj"), py::arg("args"), py::arg("phase") = py::str(""), py::arg("use_vm") = py::bool_(false), py::arg("queue_name"), "Compile obj by executor.") - .def("updata_param_node_default_input", &ExecutorPy::UpdataParamNodeDefaultInput, py::arg("phase"), + .def("updata_param_node_default_input", &GraphExecutorPy::UpdataParamNodeDefaultInput, py::arg("phase"), py::arg("params"), "Fetch the inputs of Conv or Matmul for quant export.") - .def("get_parameter_layout", &ExecutorPy::GetParameterLayout, py::arg("phase") = py::str("train"), + .def("get_parameter_layout", &GraphExecutorPy::GetParameterLayout, py::arg("phase") = py::str("train"), "Get Parameter Tensor Layout Dictionary.") - .def("get_parallel_parameter_name_list", &ExecutorPy::GetParallelParameterNameList, + .def("get_parallel_parameter_name_list", &GraphExecutorPy::GetParallelParameterNameList, py::arg("phase") = py::str("train"), "Get Parallel Parameter Name List.") - .def("get_strategy", &ExecutorPy::GetCNodeStrategy, py::arg("phase") = py::str("train"), + .def("get_strategy", &GraphExecutorPy::GetCNodeStrategy, py::arg("phase") = py::str("train"), "Get CNode Strategy Dictionary.") - .def("get_num_parallel_ops", &ExecutorPy::GetNumOpsInfo, py::arg("phase") = py::str("train"), + .def("get_num_parallel_ops", &GraphExecutorPy::GetNumOpsInfo, py::arg("phase") = py::str("train"), "Get the number of parallel operators.") - .def("get_allreduce_fusion", &ExecutorPy::GetAllreduceFusion, py::arg("phase") = py::str("train"), + .def("get_allreduce_fusion", &GraphExecutorPy::GetAllreduceFusion, py::arg("phase") = py::str("train"), "Get Allreduce Fusion Dictionary.") - .def("fetch_info_for_quant_export", &ExecutorPy::FetchInfoForQuantExport, py::arg("phase") = py::str("train"), + .def("fetch_info_for_quant_export", &GraphExecutorPy::FetchInfoForQuantExport, py::arg("phase") = py::str("train"), "Fetch the inputs of Conv or Matmul for quant export.") - .def("build_data_graph", &ExecutorPy::BuildGraph, py::arg("build_params"), py::arg("phase") = py::str("train"), + .def("build_data_graph", &GraphExecutorPy::BuildGraph, py::arg("build_params"), py::arg("phase") = py::str("train"), py::arg("broadcast_params") = py::dict(), "Build data graph.") - .def("has_compiled", &ExecutorPy::HasCompiled, py::arg("phase") = py::str(""), "get if cell compiled.") - .def("run_init_graph", &ExecutorPy::RunInitGraph, "Run init Graph.") - .def("set_py_exe_path", &ExecutorPy::PyExePath, py::arg("py_exe_path") = py::str(""), "set python executable path.") - .def("set_kernel_build_server_dir", &ExecutorPy::KernelBuildServerDir, - py::arg("kernel_build_server_dir") = py::str(""), "set kernel build server directory path."); + .def("has_compiled", &GraphExecutorPy::HasCompiled, py::arg("phase") = py::str(""), "Get if cell compiled.") + .def("run_init_graph", &GraphExecutorPy::RunInitGraph, "Run init Graph.") + .def("set_py_exe_path", &GraphExecutorPy::PyExePath, py::arg("py_exe_path") = py::str(""), + "Set python executable path.") + .def("set_kernel_build_server_dir", &GraphExecutorPy::KernelBuildServerDir, + py::arg("kernel_build_server_dir") = py::str(""), "Set kernel build server directory path."); (void)py::class_>(m, "EnvInstance_").def(py::init()); - (void)m.def("generate_key", &mindspore::pipeline::GenerateKey, "Generate the function graph key."); + (void)m.def("generate_arguments_key", &mindspore::pipeline::GenerateArgumentsKey, "Generate unique key of argument."); (void)m.def("real_run_op", &mindspore::pynative::RealRunOp, "Run op pynatively."); (void)m.def("reset_op_id", &mindspore::pipeline::ResetOpId, "Reset Operator Id"); (void)m.def("init_hccl", &mindspore::pipeline::InitHccl, "Init Hccl"); diff --git a/mindspore/ccsrc/pipeline/jit/pipeline.cc b/mindspore/ccsrc/pipeline/jit/pipeline.cc index 7c051bc86f4..3db37050b78 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline.cc +++ b/mindspore/ccsrc/pipeline/jit/pipeline.cc @@ -99,11 +99,11 @@ const char IR_TYPE_ANF[] = "anf_ir"; const char IR_TYPE_ONNX[] = "onnx_ir"; const char IR_TYPE_MINDIR[] = "mind_ir"; -ExecutorPyPtr ExecutorPy::executor_ = nullptr; -std::mutex ExecutorPy::instance_lock_; -bool ExecutorPy::debugger_terminate_ = false; +GraphExecutorPyPtr GraphExecutorPy::executor_ = nullptr; +std::mutex GraphExecutorPy::instance_lock_; +bool GraphExecutorPy::debugger_terminate_ = false; -std::unordered_map g_args_cache; @@ -236,30 +236,33 @@ void CheckArgsValid(const py::tuple &args) { } } -py::tuple GenerateKey(const std::string &name, const std::unordered_map &defaults) { - MS_LOG(DEBUG) << "GenerateKey args size:" << defaults.size(); +py::object GenerateArgumentsKey(const std::unordered_map &args) { + MS_LOG(DEBUG) << "GenerateArgumentsKey args size:" << args.size(); abstract::AbstractBasePtrList args_spec; - for (const auto &arg : defaults) { + for (const auto &arg : args) { if (py::isinstance(arg.second)) { - MS_LOG(EXCEPTION) << "GenerateKey failed, argument input should not be py::module"; + MS_LOG(EXCEPTION) << "GenerateArgumentsKey failed, argument input should not be py::module"; } ValuePtr converted = nullptr; if (!parse::ConvertData(arg.second, &converted)) { - MS_LOG(EXCEPTION) << "GenerateKey convert arg failed"; + MS_LOG(EXCEPTION) << "GenerateArgumentsKey convert arg failed"; } args_spec.push_back(ArgsToAbstract(converted)); } - if (g_args_cache.count(args_spec) == 0) { - static int64_t key = 0; - MS_LOG(INFO) << "Start new args and compile key:" << key; - g_args_cache[args_spec] = key++; + + uint64_t key; + auto iter = g_args_cache.find(args_spec); + if (iter == g_args_cache.end()) { + static uint64_t key_counter = 0; + key = key_counter; + ++key_counter; + g_args_cache[args_spec] = key; + MS_LOG(INFO) << "Generate a new compile key for new args, key: " << key; + } else { + key = iter->second; } - constexpr size_t arg_size = 2; - auto argSpec = py::tuple(arg_size); - argSpec[0] = name; - argSpec[1] = g_args_cache[args_spec]; - return argSpec; + return py::int_(key); } py::bool_ VerifyInputSignature(const py::list &input_signature, const py::tuple &inputs) { @@ -300,9 +303,9 @@ py::bool_ VerifyInputSignature(const py::list &input_signature, const py::tuple return true; } -ExecutorPy::ExecutorPy() {} +GraphExecutorPy::GraphExecutorPy() {} -ResourcePtr ExecutorPy::GetResource(const std::string &phase) { +ResourcePtr GraphExecutorPy::GetResource(const std::string &phase) { MS_LOG(DEBUG) << "Phase size:" << info_.size(); if (info_.count(phase) == 0) { return nullptr; @@ -310,14 +313,14 @@ ResourcePtr ExecutorPy::GetResource(const std::string &phase) { return info_[phase]->resource; } -FuncGraphPtr ExecutorPy::GetFuncGraph(const std::string &phase) { +FuncGraphPtr GraphExecutorPy::GetFuncGraph(const std::string &phase) { if (info_.count(phase) == 0) { - MS_LOG(EXCEPTION) << "No phase in executor:" << GetPhasePrefix(phase); + MS_LOG(EXCEPTION) << "No executor info. found for phase: " << phase; } return info_[phase]->func_graph; } -FuncGraphPtr ExecutorPy::GetGradGraph(const std::string &phase) { +FuncGraphPtr GraphExecutorPy::GetGradGraph(const std::string &phase) { if (phase.empty()) { MS_LOG(EXCEPTION) << "The input phase is empty."; } @@ -332,7 +335,7 @@ FuncGraphPtr ExecutorPy::GetGradGraph(const std::string &phase) { return grad_graph; } -void ExecutorPy::SetGradGraph(const FuncGraphPtr &grad_graph, const std::string &phase) { +void GraphExecutorPy::SetGradGraph(const FuncGraphPtr &grad_graph, const std::string &phase) { if (phase.empty()) { MS_LOG(EXCEPTION) << "The input phase is empty."; } @@ -349,7 +352,7 @@ void ExecutorPy::SetGradGraph(const FuncGraphPtr &grad_graph, const std::string execute_info->grad_graph = grad_graph; } -compile::VmEvalFuncPtr ExecutorPy::GetVmEvalFunc(const std::string &phase) { +compile::VmEvalFuncPtr GraphExecutorPy::GetVmEvalFunc(const std::string &phase) { ResourcePtr res = GetResource(phase); MS_EXCEPTION_IF_NULL(res); if (res->results().find(kOutput) != res->results().end() && res->results()[kOutput].is()) { @@ -359,14 +362,14 @@ compile::VmEvalFuncPtr ExecutorPy::GetVmEvalFunc(const std::string &phase) { return nullptr; } -bool ExecutorPy::HasCompiled(const std::string &phase) const { +bool GraphExecutorPy::HasCompiled(const std::string &phase) const { if (info_.count(phase) == 0) { return false; } return true; } -py::bytes ExecutorPy::GetFuncGraphProto(const std::string &phase, const std::string &ir_type) { +py::bytes GraphExecutorPy::GetFuncGraphProto(const std::string &phase, const std::string &ir_type) { FuncGraphPtr fg_ptr = GetFuncGraph(phase); if (fg_ptr == nullptr) { for (auto &item : info_) { @@ -402,40 +405,40 @@ py::bytes ExecutorPy::GetFuncGraphProto(const std::string &phase, const std::str MS_LOG(EXCEPTION) << "Unknown ir type: " << ir_type; } -py::dict ExecutorPy::GetParameterLayout(const std::string &phase) { +py::dict GraphExecutorPy::GetParameterLayout(const std::string &phase) { MS_LOG(DEBUG) << "GetParameterLayout!"; std::string layout_graph = phase + kStepParallelGraph; auto graph = GetFuncGraph(layout_graph); return mindspore::parallel::GetParameterLayout(graph); } -py::dict ExecutorPy::GetCNodeStrategy(const std::string &phase) { +py::dict GraphExecutorPy::GetCNodeStrategy(const std::string &phase) { MS_LOG(DEBUG) << "GetCNodeStrategy!"; return stra_dict_[phase]; } -py::list ExecutorPy::GetParallelParameterNameList(const std::string &phase) { +py::list GraphExecutorPy::GetParallelParameterNameList(const std::string &phase) { std::string param_graph = phase + kStepParallelGraph; auto graph = GetFuncGraph(param_graph); return mindspore::parallel::GetParallelParameterNameList(graph); } -void ExecutorPy::SetCNodeStrategy(const std::string &name, const parallel::Strategys &strategy) { +void GraphExecutorPy::SetCNodeStrategy(const std::string &name, const parallel::Strategys &strategy) { MS_LOG(DEBUG) << "SetCNodeStrategy!"; stra_dict_[phase_][py::str(name)] = strategy; } -size_t ExecutorPy::GetNumOpsInfo(const std::string &phase) { +size_t GraphExecutorPy::GetNumOpsInfo(const std::string &phase) { MS_LOG(DEBUG) << "GetNumOpsInfo!"; return phase_to_num_op_info_[phase]; } -void ExecutorPy::SetNumOpsInfo(size_t num_ops) { +void GraphExecutorPy::SetNumOpsInfo(size_t num_ops) { MS_LOG(DEBUG) << "SetNumOpsInfo!"; phase_to_num_op_info_[phase_] = num_ops; } -py::dict ExecutorPy::GetAllreduceFusion(const std::string &phase) { +py::dict GraphExecutorPy::GetAllreduceFusion(const std::string &phase) { MS_LOG(INFO) << "GetAllreduceFusion!"; auto graph = GetFuncGraph(phase); return mindspore::parallel::GetAllreduceFusion(graph); @@ -443,7 +446,7 @@ py::dict ExecutorPy::GetAllreduceFusion(const std::string &phase) { // Not support multi thread, not support nested call too. // Here using nested_called flg to avoid nested call. -void ExecutorPy::DelNetRes(const std::string &id) { +void GraphExecutorPy::DelNetRes(const std::string &id) { static bool nested_called = false; if (nested_called) { return; @@ -479,18 +482,19 @@ void ExecutorPy::DelNetRes(const std::string &id) { nested_called = false; } -void ExecutorPy::ClearRes() { +void GraphExecutorPy::ClearRes() { MS_LOG(INFO) << "Clean executor resource!"; executor_ = nullptr; } -ExecutorPy::~ExecutorPy() { +GraphExecutorPy::~GraphExecutorPy() { MS_LOG(INFO) << "Release Executor!"; ConfigManager::GetInstance().ResetConfig(); } -void ExecutorPy::GetWeightInfo(const CNodePtr &root_node, const AnfNodePtr &weight_node, - std::map> *fake_quant_table) { +void GraphExecutorPy::GetWeightInfo( + const CNodePtr &root_node, const AnfNodePtr &weight_node, + std::map> *fake_quant_table) { MS_EXCEPTION_IF_NULL(root_node); MS_EXCEPTION_IF_NULL(fake_quant_table); std::string weight_name; @@ -557,11 +561,11 @@ void ExecutorPy::GetWeightInfo(const CNodePtr &root_node, const AnfNodePtr &weig (*fake_quant_table)[weight_name] = std::make_pair(quant_op->adapter(), fakequant_min_node_name); } -std::map> ExecutorPy::FetchInfoForQuantExport( - const std::string &phase_s) { - FuncGraphPtr func_graph = info_[phase_s]->resource->func_graph(); +std::map> GraphExecutorPy::FetchInfoForQuantExport( + const std::string &phase) { + FuncGraphPtr func_graph = info_[phase]->resource->func_graph(); MS_EXCEPTION_IF_NULL(func_graph); - MS_LOG(DEBUG) << "FetchInfoForQuantExport func graph(" << func_graph->ToString() << ") phase(" << phase_s << ")!"; + MS_LOG(DEBUG) << "FetchInfoForQuantExport func graph(" << func_graph->ToString() << ") phase(" << phase << ")!"; std::map> fake_quant_table; auto filter = [](const AnfNodePtr &node) { return !(IsPrimitiveCNode(node, prim::kPrimConv2D) || IsPrimitiveCNode(node, prim::kPrimMatMul) || @@ -605,21 +609,21 @@ std::map> ExecutorPy: return fake_quant_table; } -void ExecutorPy::SaveCompiledGraph(const std::string &phase_s) { - // save the graph to ExecutorPy - FuncGraphPtr func_graph = info_[phase_s]->resource->func_graph(); +void GraphExecutorPy::SaveCompiledGraph(const std::string &phase) { + // save the graph to GraphExecutorPy + FuncGraphPtr func_graph = info_[phase]->resource->func_graph(); MS_EXCEPTION_IF_NULL(func_graph); MS_EXCEPTION_IF_NULL(parallel::ParallelContext::GetInstance()); std::string parallel_mode = parallel::ParallelContext::GetInstance()->parallel_mode(); - MS_LOG(INFO) << "Save compiled func graph(" << func_graph->ToString() << ") phase(" << phase_s << ")!"; - info_[phase_s]->func_graph = func_graph; + MS_LOG(INFO) << "Save compiled func graph(" << func_graph->ToString() << ") phase(" << phase << ")!"; + info_[phase]->func_graph = func_graph; if ((func_graph != nullptr) && func_graph->has_flag(parallel::AUTO_PARALLEL) && ((parallel_mode == parallel::AUTO_PARALLEL) || (parallel_mode == parallel::SEMI_AUTO_PARALLEL))) { MS_LOG(DEBUG) << "Save model parallel parameter layout graph!"; - func_graph = info_[phase_s]->resource->results()[kStepParallelGraph].cast(); + func_graph = info_[phase]->resource->results()[kStepParallelGraph].cast(); ExecutorInfoPtr executor_info = std::make_shared(); - std::string layout_graph = phase_s + kStepParallelGraph; + std::string layout_graph = phase + kStepParallelGraph; executor_info->func_graph = func_graph; info_[layout_graph] = executor_info; } else { @@ -628,7 +632,7 @@ void ExecutorPy::SaveCompiledGraph(const std::string &phase_s) { MS_LOG(INFO) << "End save compiled func graph!"; } -void ExecutorPy::GetGeBackendPolicy() const { +void GraphExecutorPy::GetGeBackendPolicy() const { auto ms_context = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(ms_context); std::string backend = ms_context->backend_policy(); @@ -637,24 +641,24 @@ void ExecutorPy::GetGeBackendPolicy() const { } } -bool IsPhaseExportAir(const std::string &phase_s) { +bool IsPhaseExportAir(const std::string &phase) { auto phase_to_export = "export.air"; - return phase_s.rfind(phase_to_export) != std::string::npos; + return phase.rfind(phase_to_export) != std::string::npos; } -bool IsPhaseTrain(const std::string &phase_s) { +bool IsPhaseTrain(const std::string &phase) { const std::string phase_to_train = "train"; - return phase_s.rfind(phase_to_train) != std::string::npos; + return phase.rfind(phase_to_train) != std::string::npos; } -bool IsPhaseLoadFromMindIR(const std::string &phase_s) { +bool IsPhaseLoadFromMindIR(const std::string &phase) { const std::string mindir_graph = "graph_load_from_mindir"; - return phase_s.rfind(mindir_graph) != std::string::npos; + return phase.rfind(mindir_graph) != std::string::npos; } -std::vector GetPipeline(const ResourcePtr &resource, const std::string &phase_s, bool use_vm) { +std::vector GetPipeline(const ResourcePtr &resource, const std::string &phase, bool use_vm) { MS_EXCEPTION_IF_NULL(resource); - bool is_air = IsPhaseExportAir(phase_s); + bool is_air = IsPhaseExportAir(phase); std::string backend = MsContext::GetInstance()->backend_policy(); @@ -682,11 +686,11 @@ std::vector GetPipeline(const ResourcePtr &resource, const std::stri resource->results()[kBackend] = backend_ptr; // If the 'use_frontend_compile_cache' context has been set true and the cache is read successfully, // do the backend actions only. - if (IsPhaseTrain(phase_s) && MsContext::GetInstance()->get_param(MS_CTX_LOAD_COMPILE_CACHE) && + if (IsPhaseTrain(phase) && MsContext::GetInstance()->get_param(MS_CTX_LOAD_COMPILE_CACHE) && resource->func_graph() != nullptr) { return BackendPipeline(); } - if (IsPhaseLoadFromMindIR(phase_s)) { + if (IsPhaseLoadFromMindIR(phase)) { return MindIRPipeline(); } return VmPipeline(); @@ -694,28 +698,30 @@ std::vector GetPipeline(const ResourcePtr &resource, const std::stri return GePipeline(); } -bool ExecutorPy::CompileInner(const py::object &obj, const py::tuple &args, const py::object &phase, bool use_vm, - const std::string &queue_name) { - MS_LOG(DEBUG) << "Start ExecutorPy compile!"; - if ((!py::isinstance(phase))) { - MS_LOG(ERROR) << "Arg phase must be string."; +bool GraphExecutorPy::CompileInner(const py::object &source_obj, const py::tuple &args, const py::object &phase_obj, + bool use_vm, const std::string &queue_name) { + // Check if the phase is valid. + if ((!py::isinstance(phase_obj))) { + MS_LOG(ERROR) << "The `phase` must be string."; return false; } - // check the function or net is valid - if (py::isinstance(obj)) { - MS_LOG(ERROR) << "Find error: parse obj is None."; + // Check if the function or net is valid. + if (py::isinstance(source_obj)) { + MS_LOG(ERROR) << "The source object to compile should not be None."; return false; } - // check the args of function or net is valid + // Check if the args of function or net is valid. CheckArgsValid(args); + + auto phase = py::cast(phase_obj); + MS_LOG(INFO) << "Start compiling, phase: " << phase << ", source: {" << py::str(source_obj) << "}"; + MS_LOG(DEBUG) << "args: " << py::str(const_cast(args)); + #ifdef ENABLE_GE GetGeBackendPolicy(); #endif ExecutorInfoPtr executor_info = std::make_shared(); - auto phase_s = py::cast(phase); - phase_ = phase_s; - MS_LOG(INFO) << "ExecutorPy compile phase:" << phase_s << "!"; - ResourcePtr resource = std::make_shared(obj); + ResourcePtr resource = std::make_shared(source_obj); if (MsContext::GetInstance()->get_param(MS_CTX_LOAD_COMPILE_CACHE)) { #ifdef ENABLE_PROFILE @@ -728,41 +734,42 @@ bool ExecutorPy::CompileInner(const py::object &obj, const py::tuple &args, cons #endif } - auto p_actions = GetPipeline(resource, phase_s, use_vm); - std::shared_ptr pip = std::make_shared(resource, FilterActions(p_actions, phase_s)); + phase_ = phase; + auto actions = GetPipeline(resource, phase, use_vm); + std::shared_ptr pip = std::make_shared(resource, FilterActions(actions, phase)); - // get the parameters items and add the value to args_spec + // Get the parameters items and add the value to args_spec. abstract::AbstractBasePtrList args_spec; std::size_t size = args.size(); for (std::size_t i = 0; i < size; i++) { ValuePtr converted = nullptr; bool succ = parse::ConvertData(args[i], &converted); if (!succ) { - MS_LOG(EXCEPTION) << "Args convert error"; + MS_LOG(EXCEPTION) << "Fail to convert the " << i << "th argument, args[" << i << "]: " << py::str(args[i]); } args_spec.push_back(ArgsToAbstract(converted)); } - resource->set_args_spec(args_spec); executor_info->arg_list_size = size; executor_info->resource = resource; - info_[phase_s] = executor_info; - pip->Run(phase_s); + info_[phase] = executor_info; + pip->Run(phase); - // save the run graph func to MsPipeLine - SaveCompiledGraph(phase_s); + // Save the compiled graph to MsPipeLine. + SaveCompiledGraph(phase); opt::python_pass::PyPassManager::GetInstance()->ClearPipelineRes(); abstract::AnalysisContext::ClearContext(); - // Reclaim all resource used by optimizer; + // Reclaim all resource used by optimizer. ReclaimOptimizer(); resource->Clean(); - MS_LOG(INFO) << "End ExecutorPy compile!"; + MS_LOG(INFO) << "Finish compiling."; return true; } -std::vector ExecutorPy::FilterActions(const std::vector &actions, const std::string &phase) { +std::vector GraphExecutorPy::FilterActions(const std::vector &actions, + const std::string &phase) { // filter action after validate when 'export'. if (GetPhasePrefix(phase).rfind("export", 0) == std::string::npos) { return actions; @@ -778,7 +785,7 @@ std::vector ExecutorPy::FilterActions(const std::vector return filtered_actions; } -void ExecutorPy::ReleaseResource(const py::object &phase) { +void GraphExecutorPy::ReleaseResource(const py::object &phase) { ResourcePtr res = GetResource(py::cast(phase)); if (res != nullptr) { res->Clean(); @@ -787,17 +794,11 @@ void ExecutorPy::ReleaseResource(const py::object &phase) { ReclaimOptimizer(); } -static std::string PrintArgs(const py::tuple &args) { - py::print(args); - return ""; -} - -bool ExecutorPy::Compile(const py::object &obj, const py::tuple &args, const py::object &phase, bool use_vm, - const std::string &queue_name) { +bool GraphExecutorPy::Compile(const py::object &source_obj, const py::tuple &args, const py::object &phase, bool use_vm, + const std::string &queue_name) { bool ret_value = false; try { - MS_LOG(DEBUG) << PrintArgs(args); - ret_value = CompileInner(obj, args, phase, use_vm, queue_name); + ret_value = CompileInner(source_obj, args, phase, use_vm, queue_name); } catch (const py::error_already_set &ex) { if (!StaticAnalysisException::Instance().HasException()) { // print function call stack info before release @@ -840,8 +841,8 @@ bool ExecutorPy::Compile(const py::object &obj, const py::tuple &args, const py: return ret_value; } -void CacheValidateFuncGraph(const std::string &phase_s, const ResourcePtr &resource) { - if (IsPhaseTrain(phase_s) && MsContext::GetInstance()->get_param(MS_CTX_SAVE_COMPILE_CACHE)) { +void CacheValidateFuncGraph(const std::string &phase, const ResourcePtr &resource) { + if (IsPhaseTrain(phase) && MsContext::GetInstance()->get_param(MS_CTX_SAVE_COMPILE_CACHE)) { #ifdef ENABLE_PROFILE double t1 = GetTime(); #endif @@ -853,12 +854,12 @@ void CacheValidateFuncGraph(const std::string &phase_s, const ResourcePtr &resou } } -void Pipeline::Run(const std::string &phase_s) { +void Pipeline::Run(const std::string &phase) { MS_LOG(INFO) << "Pipeline run"; MS_EXCEPTION_IF_NULL(resource_); FuncGraphPtr user_graph = nullptr; - WITH(MsProfile::GetProfile())[&user_graph, &phase_s, this]() { + WITH(MsProfile::GetProfile())[&user_graph, &phase, this]() { size_t i = 0; for (auto &action : actions_) { #ifdef ENABLE_TIMELINE @@ -874,7 +875,7 @@ void Pipeline::Run(const std::string &phase_s) { if (action.first == "task_emit") { SetLoopCount(resource_); } else if (action.first == "validate") { - CacheValidateFuncGraph(phase_s, resource_); + CacheValidateFuncGraph(phase, resource_); } if (!result) { MS_LOG(EXCEPTION) << "Pipeline running to end, failed in step:" << action.first; @@ -983,11 +984,11 @@ void ProcessVmArgInner(const py::tuple &args, const ResourcePtr &res, VectorRef } } -void ExecutorPy::ProcessVmArg(const py::tuple &args, const std::string &phase, VectorRef *const arg_list) { +void GraphExecutorPy::ProcessVmArg(const py::tuple &args, const std::string &phase, VectorRef *const arg_list) { ProcessVmArgInner(args, GetResource(phase), arg_list); } -void ExecutorPy::TerminateDebugger() { +void GraphExecutorPy::TerminateDebugger() { if (debugger_terminate_) { MS_LOG(INFO) << "Terminate debugger and clear resources!"; ClearResAtexit(); @@ -995,23 +996,23 @@ void ExecutorPy::TerminateDebugger() { } } -py::object ExecutorPy::Run(const py::tuple &args, const py::object &phase) { +py::object GraphExecutorPy::Run(const py::tuple &args, const py::object &phase_obj) { // Mindspore debugger notify main thread to exit after one step, and will not run next step TerminateDebugger(); std::size_t size = args.size(); - if (!py::isinstance(phase)) { + if (!py::isinstance(phase_obj)) { MS_LOG(EXCEPTION) << "Run failed, phase input is not a str"; } - auto phase_s = py::cast(phase); + auto phase = py::cast(phase_obj); std::string backend = MsContext::GetInstance()->backend_policy(); #ifdef ENABLE_GE if (backend == "ge") { - return ExecDFGraph(info_, args, phase_s); + return ExecDFGraph(info_, args, phase); } #else auto ret_val = std::make_shared(); - if (info_.count(phase_s) != 0 && info_[phase_s]->func_graph != nullptr) { - if (IsGraphOutputValueNodeOrParameter(info_[phase_s]->func_graph->output(), args, ret_val)) { + if (info_.count(phase) != 0 && info_[phase]->func_graph != nullptr) { + if (IsGraphOutputValueNodeOrParameter(info_[phase]->func_graph->output(), args, ret_val)) { // Check the input arg must be Tensor when backend is "ms". if (MsContext::GetInstance()->backend_policy() == kMsConvert) { for (std::size_t i = 0; i < size; i++) { @@ -1032,24 +1033,24 @@ py::object ExecutorPy::Run(const py::tuple &args, const py::object &phase) { return args; } #endif - auto iter = info_.find(phase_s); + auto iter = info_.find(phase); if (iter == info_.end()) { - MS_LOG(EXCEPTION) << "No phase in executor:" << GetPhasePrefix(phase_s); + MS_LOG(EXCEPTION) << "No executor info. found for phase: " << phase; } auto &execute_info = iter->second; MS_EXCEPTION_IF_NULL(execute_info); if (size > execute_info->arg_list_size) { MS_LOG(WARNING) << "The arg num : size = " << size << ". full_arg_size = " << execute_info->arg_list_size; } - ProcessVmArg(args, phase_s, &execute_info->arg_list); + ProcessVmArg(args, phase, &execute_info->arg_list); // Start to run phase. - compile::VmEvalFuncPtr run = GetVmEvalFunc(phase_s); + compile::VmEvalFuncPtr run = GetVmEvalFunc(phase); if (run == nullptr) { - MS_LOG(EXCEPTION) << "Can't find run graph func for " << phase_s; + MS_LOG(EXCEPTION) << "Can't find run graph func for " << phase; } // Set loopsink size for each phase. - bool vm_loop_flag = info_[phase_s]->resource->vm_loop_flag(); - int64_t loop_size = info_[phase_s]->resource->loop_size(); + bool vm_loop_flag = info_[phase]->resource->vm_loop_flag(); + int64_t loop_size = info_[phase]->resource->loop_size(); int64_t vm_loop = 1; if (vm_loop_flag) { vm_loop = loop_size; @@ -1069,8 +1070,8 @@ py::object ExecutorPy::Run(const py::tuple &args, const py::object &phase) { return ret; } -FuncGraphPtr ExecutorPy::BuildGraph(const py::dict &init_params, const std::string &phase, - const py::object &broadcast_params) { +FuncGraphPtr GraphExecutorPy::BuildGraph(const py::dict &init_params, const std::string &phase, + const py::object &broadcast_params) { #if ((defined ENABLE_GE) || (defined ENABLE_D)) return BuildDFGraph(info_, init_params, phase, broadcast_params); #else @@ -1078,8 +1079,8 @@ FuncGraphPtr ExecutorPy::BuildGraph(const py::dict &init_params, const std::stri #endif } -void ExecutorPy::UpdataParamNodeDefaultInput(const std::string &phase, - const std::unordered_map ¶ms_value) { +void GraphExecutorPy::UpdataParamNodeDefaultInput( + const std::string &phase, const std::unordered_map ¶ms_value) { FuncGraphPtr func_graph = info_[phase]->resource->func_graph(); MS_EXCEPTION_IF_NULL(func_graph); MS_LOG(DEBUG) << "UpdataParamNodeDefaultInput for func graph(" << func_graph->ToString() << ") phase(" << phase @@ -1096,13 +1097,13 @@ void ExecutorPy::UpdataParamNodeDefaultInput(const std::string &phase, } } -void ExecutorPy::RunInitGraph(const py::dict &init_params, const std::string &phase) const { +void GraphExecutorPy::RunInitGraph(const py::dict &init_params, const std::string &phase) const { #ifdef ENABLE_GE RunGEInitGraph(init_params, phase); #endif } -void ExecutorPy::PyExePath(const py::object &py_exe_path) { +void GraphExecutorPy::PyExePath(const py::object &py_exe_path) { if (!py::isinstance(py_exe_path)) { MS_LOG(EXCEPTION) << "Failed, py_exe_path input is not a str"; } @@ -1111,7 +1112,7 @@ void ExecutorPy::PyExePath(const py::object &py_exe_path) { ms_context->set_param(MS_CTX_PYTHON_EXE_PATH, py_exe_path_s); } -void ExecutorPy::KernelBuildServerDir(const py::object &kernel_build_server_dir) { +void GraphExecutorPy::KernelBuildServerDir(const py::object &kernel_build_server_dir) { if (!py::isinstance(kernel_build_server_dir)) { MS_LOG(EXCEPTION) << "Failed, kernel_build_server_dir input is not a str"; } @@ -1423,7 +1424,7 @@ void ClearResAtexit() { abstract::ClearPrimEvaluatorMap(); pipeline::GetMethodMap().clear(); pipeline::GetAttrMap().clear(); - pipeline::ExecutorPy::ClearRes(); + pipeline::GraphExecutorPy::ClearRes(); pipeline::ReclaimOptimizer(); pynative::PynativeExecutor::GetInstance()->ClearRes(); opt::python_pass::PyPassManager::GetInstance()->ClearRes(); diff --git a/mindspore/ccsrc/pipeline/jit/pipeline.h b/mindspore/ccsrc/pipeline/jit/pipeline.h index bf30b002ddf..83094341819 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline.h +++ b/mindspore/ccsrc/pipeline/jit/pipeline.h @@ -49,7 +49,7 @@ class Pipeline { ~Pipeline() = default; - void Run(const std::string &phase_s); + void Run(const std::string &phase); ResourcePtr resource() { return resource_; } @@ -59,29 +59,29 @@ class Pipeline { }; // A function pipeline. -class ExecutorPy : public std::enable_shared_from_this { +class GraphExecutorPy : public std::enable_shared_from_this { public: - static std::shared_ptr GetInstance() { + static std::shared_ptr GetInstance() { std::lock_guard i_lock(instance_lock_); if (executor_ == nullptr) { - executor_ = std::shared_ptr(new (std::nothrow) ExecutorPy()); + executor_ = std::shared_ptr(new (std::nothrow) GraphExecutorPy()); } return executor_; } - ~ExecutorPy(); + ~GraphExecutorPy(); const std::string &phase() const { return phase_; } - void SaveCompiledGraph(const std::string &phase_s); - bool CompileInner(const py::object &obj, const py::tuple &args, const py::object &phase, bool use_vm, + void SaveCompiledGraph(const std::string &phase); + bool CompileInner(const py::object &source_obj, const py::tuple &args, const py::object &phase_obj, bool use_vm, const std::string &queue_name); - bool Compile(const py::object &obj, const py::tuple &args, const py::object &phase, bool use_vm, + bool Compile(const py::object &source_obj, const py::tuple &args, const py::object &phase_obj, bool use_vm, const std::string &queue_name); void ProcessVmArg(const py::tuple &args, const std::string &phase, VectorRef *arg_list); // for pynative mode when use_vm is on - py::object Run(const py::tuple &args, const py::object &phase); + py::object Run(const py::tuple &args, const py::object &phase_obj); ResourcePtr GetResource(const std::string &phase); FuncGraphPtr GetFuncGraph(const std::string &phase); FuncGraphPtr GetGradGraph(const std::string &phase); @@ -105,17 +105,17 @@ class ExecutorPy : public std::enable_shared_from_this { void SetNumOpsInfo(size_t); py::dict GetAllreduceFusion(const std::string &phase); void DelNetRes(const std::string &id); - void ReleaseResource(const py::object &phase); + void ReleaseResource(const py::object &phase_obj); static void ClearRes(); static bool GetDebugTerminate() { return debugger_terminate_; } static void DebugTerminate(bool val) { debugger_terminate_ = val; } void TerminateDebugger(); std::map> FetchInfoForQuantExport( - const std::string &phase_s); + const std::string &phase); private: - ExecutorPy(); + GraphExecutorPy(); void ConvertObjectToTensors(const py::dict &dict, std::map *tensors); void GetWeightInfo(const CNodePtr &root_node, const AnfNodePtr &weight_node, std::map> *fake_quant_table); @@ -125,18 +125,18 @@ class ExecutorPy : public std::enable_shared_from_this { static std::vector FilterActions(const std::vector &actions, const std::string &phase); std::map info_; - static std::shared_ptr executor_; + static std::shared_ptr executor_; static std::mutex instance_lock_; static bool debugger_terminate_; std::map stra_dict_; std::string phase_ = ""; std::map phase_to_num_op_info_; }; -using ExecutorPyPtr = std::shared_ptr; +using GraphExecutorPyPtr = std::shared_ptr; void CheckArgsValid(const py::tuple &args); // Generate a key for mapping function graph -py::tuple GenerateKey(const std::string &name, const std::unordered_map &defaults); +py::object GenerateArgumentsKey(const std::unordered_map &args); py::bool_ VerifyInputSignature(const py::list &input_signature, const py::tuple &inputs); bool InitDistribute(const std::map &options); diff --git a/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc b/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc index 82e517d144a..2115b8df5a2 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc +++ b/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc @@ -442,7 +442,7 @@ std::shared_ptr DoExecGraph(const FuncGraphPtr &graph, const std::ve void ProcessGeArg(const std::map &info, const py::tuple &args, const std::string &phase, std::vector *inputs) { - // check the arg and use the ExecutorPy args + // check the arg and use the GraphExecutorPy args std::size_t size = args.size(); if (info.count(phase) == 0) { diff --git a/mindspore/ccsrc/pipeline/jit/resource.cc b/mindspore/ccsrc/pipeline/jit/resource.cc index 65a6597ded0..74a40a7976d 100644 --- a/mindspore/ccsrc/pipeline/jit/resource.cc +++ b/mindspore/ccsrc/pipeline/jit/resource.cc @@ -246,7 +246,7 @@ BuiltInTypeMap &GetAttrMap() { Resource::Resource(const py::object &obj) : engine_(std::make_shared(abstract::GetPrimEvaluatorConstructors(), manager_)), - input_(obj), + source_input_(obj), is_cleaned_(false) {} Resource::~Resource() { @@ -313,7 +313,7 @@ Any Resource::GetAttrPtr(const TypeId &type, const std::string &name) { void Resource::Clean() { // AbstractTensor->elements() will be saved in AbstractBasePtrList args_spec_.clear(); - input_ = py::none(); + source_input_ = py::none(); // Context with AbstractBasePtrList may be saved in GraphEvaluator // some Evaluator like ResolveEvaluator may save Python object in cache, // it should be cleaned before Python Interpreter destructed. diff --git a/mindspore/ccsrc/pipeline/jit/resource.h b/mindspore/ccsrc/pipeline/jit/resource.h index 9a859efd3f3..6851ad0e025 100644 --- a/mindspore/ccsrc/pipeline/jit/resource.h +++ b/mindspore/ccsrc/pipeline/jit/resource.h @@ -67,7 +67,7 @@ class Resource : public ResourceBase { static Any GetAttrPtr(const TypeId &type, const std::string &name); - const py::object &input() const { return input_; } + const py::object &source_input() const { return source_input_; } FuncGraphPtr func_graph() const { return func_graph_; } void set_func_graph(const FuncGraphPtr &func_graph) { func_graph_ = func_graph; } @@ -84,7 +84,7 @@ class Resource : public ResourceBase { bool vm_loop_flag() { return vm_loop_flag_; } int64_t loop_size() { return loop_size_; } // Reclaim resource and clear the cache. - // ExecutorPy::Compile() can be called multiple times, so cache + // GraphExecutorPy::Compile() can be called multiple times, so cache // should be cleared. void Clean(); @@ -92,7 +92,8 @@ class Resource : public ResourceBase { abstract::AnalysisEnginePtr engine_; FuncGraphPtr func_graph_; abstract::AbstractBasePtrList args_spec_; - py::object input_; + // The source obj to compile, usually a `Cell` or `ms_function` decorated function. + py::object source_input_; bool is_cleaned_; // The func_graph_ is loaded from mindir bool is_load_{false}; diff --git a/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc b/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc index 6fae09ab44f..c7a78263784 100644 --- a/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc +++ b/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc @@ -2942,7 +2942,7 @@ void GradExecutor::GradMsFunction(const py::object &out, const py::args &args) { // Get ms_function func graph and grad graph. const auto &phase = graph_phase(); MS_LOG(DEBUG) << "ms_function func graph phase: " << phase; - auto executor = pipeline::ExecutorPy::GetInstance(); + auto executor = pipeline::GraphExecutorPy::GetInstance(); FuncGraphPtr ms_func_graph = executor->GetFuncGraph(phase); MS_EXCEPTION_IF_NULL(ms_func_graph); FuncGraphPtr grad_graph = executor->GetGradGraph(phase); diff --git a/mindspore/common/api.py b/mindspore/common/api.py index 02fef40b2f6..c2e48b85305 100644 --- a/mindspore/common/api.py +++ b/mindspore/common/api.py @@ -25,7 +25,7 @@ from mindspore import context from mindspore import log as logger from mindspore._extends.remote import kernel_build_server from .tensor import Tensor as MsTensor -from .._c_expression import generate_key, Executor_, Tensor, MetaTensor, PynativeExecutor_ +from .._c_expression import generate_arguments_key, GraphExecutor_, Tensor, MetaTensor, PynativeExecutor_ from .._c_expression import verify_inputs_signature, init_exec_dataset, _set_dataset_mode_config, init_pipeline from ..parallel._ps_context import _is_role_pserver from ..parallel._utils import _get_device_num, _get_global_rank, _need_to_full, _check_full_batch, _to_full_tensor, \ @@ -92,7 +92,7 @@ def _wrap_func(fn): def _exec_init_graph(obj, init_phase): """Execute the parameter initializer graph.""" - inst_executor = Executor_.get_instance() + inst_executor = GraphExecutor_.get_instance() param_dict = OrderedDict() for name, param in obj.parameters_dict().items(): if not param.is_init: @@ -104,11 +104,11 @@ def _exec_init_graph(obj, init_phase): inst_executor.run_init_graph(param_dict, init_phase) -class _MindSporeFunction: +class _MindsporeFunctionExecutor: """ - Represents a function compiled by mind expression. + Represents a function compiled by graph compiler. - _MindSporeFunction will compile the original function for every combination + _MindsporeFunctionExecutor will compile the original function for every combination of argument types and shapes it is given (as well as their values, optionally). Args: @@ -127,22 +127,23 @@ class _MindSporeFunction: self.obj = None if hasattr(obj, fn.__name__): self.obj = obj - self._executor = Executor_.get_instance() + self._graph_executor = GraphExecutor_.get_instance() def build_data_init_graph(self, graph_name): """Build GE data graph and init graph for the given graph name.""" if self.obj is None: logger.warning("Make sure parameter should not be used in function") para_dict = OrderedDict() - self._executor.build_data_graph(para_dict, graph_name) + self._graph_executor.build_data_graph(para_dict, graph_name) return - self._executor.build_data_graph(self.obj.parameters_dict(), graph_name, self.obj.parameters_broadcast_dict()) + self._graph_executor.build_data_graph(self.obj.parameters_dict(), graph_name, + self.obj.parameters_broadcast_dict()) init_phase = "init_subgraph" + graph_name[graph_name.find("."):] _exec_init_graph(self.obj, init_phase) def compile(self, args_list, arg_names, method_name): """Returns pipeline for the given args.""" - # verify the signature for both function and method + # Verify the signature for both function and method if self.input_signature is not None: signatures = [] for sig_spec in self.input_signature: @@ -155,35 +156,32 @@ class _MindSporeFunction: dic = dict(zip(arg_names, args_list)) generate_name = self.fn.__module__ + "." + self.fn.__name__ + "." + self.fn.__code__.co_filename + "." + \ - str(self.fn.__code__.co_firstlineno) + str(self.fn.__code__.co_firstlineno) + '.' + str(id(self.fn)) self.fn.__parse_method__ = method_name - # add key with obj - identify = "" - if self.obj is None: - identify = str(id(self.fn)) - else: + # Add key with obj + if self.obj is not None: + if self.obj.__module__ != self.fn.__module__: + logger.error(f'`obj` module not equal to `fn` module: {self.obj.__module__}, {self.fn.__module__}') self.obj.__parse_method__ = method_name - generate_name = self.obj.__module__ + "." + generate_name - identify = str(self.obj.create_time) + "_" + str(id(self.obj)) + '_' + str(id(self.fn)) + generate_name = generate_name + '.' + str(self.obj.create_time) + '.' + str(id(self.obj)) - generate_name = generate_name + "." + identify - key = generate_key(generate_name, dic) - phase = str(key[1]) + generate_name - if key not in ms_compile_cache.keys(): + key = generate_arguments_key(dic) + phase = generate_name + '.' + str(key) + if phase not in ms_compile_cache.keys(): is_compile = False if self.obj is None: - is_compile = self._executor.compile(self.fn, args_list, phase, True, "") + is_compile = self._graph_executor.compile(self.fn, args_list, phase, True, "") else: - is_compile = self._executor.compile(self.obj, args_list, phase, True, "") + is_compile = self._graph_executor.compile(self.obj, args_list, phase, True, "") if not is_compile: raise RuntimeError("Executor compile failed.") if context.get_context("enable_ge"): self.build_data_init_graph(phase) - ms_compile_cache[key] = phase + ms_compile_cache[phase] = phase return phase - return ms_compile_cache[key] + return phase @_wrap_func def __call__(self, *args): @@ -208,12 +206,13 @@ class _MindSporeFunction: new_inputs.append(i) elif context.get_context("grad_for_scalar") and isinstance(i, (int, float)): new_inputs.append(i) - output = self._executor(tuple(new_inputs), phase) + output = self._graph_executor(tuple(new_inputs), phase) if context.get_context("mode") == context.PYNATIVE_MODE: - _pynative_exec.set_graph_phase(phase) - _pynative_exec.grad_ms_function(output, *new_inputs) + _pynative_executor.set_graph_phase(phase) + _pynative_executor.grad_ms_function(output, *new_inputs) output = output[0] + return output @@ -283,7 +282,7 @@ def ms_function(fn=None, obj=None, input_signature=None): process_obj = None if args and not isinstance(args[0], MsTensor) and hasattr(args[0], func.__name__): process_obj = args[0] - out = _MindSporeFunction(func, input_signature, process_obj)(*args) + out = _MindsporeFunctionExecutor(func, input_signature, process_obj)(*args) return out return staging_specialize @@ -420,9 +419,9 @@ class _PynativeExecutor: return self._executor(obj, args) -class _Executor: +class _CellGraphExecutor: """ - An executor used to compile/manage/run graph. + An executor used to compile/manage/run graph for a Cell. Including data_graph, train_graph, eval_graph and predict graph. @@ -437,10 +436,10 @@ class _Executor: def __init__(self): # create needed graph by lazy mode self.is_init = False - self._executor = Executor_.get_instance() + self._graph_executor = GraphExecutor_.get_instance() self.compile_cache = {} - self._executor.set_py_exe_path(sys.executable) - self._executor.set_kernel_build_server_dir(os.path.split(kernel_build_server.__file__)[0] + os.sep) + self._graph_executor.set_py_exe_path(sys.executable) + self._graph_executor.set_kernel_build_server_dir(os.path.split(kernel_build_server.__file__)[0] + os.sep) self.queue_name = "" def init_dataset(self, queue_name, dataset_size, batch_size, dataset_types, dataset_shapes, @@ -472,7 +471,7 @@ class _Executor: return True def _build_data_graph(self, obj, phase): - self._executor.build_data_graph(obj.parameters_dict(), phase, obj.parameters_broadcast_dict()) + self._graph_executor.build_data_graph(obj.parameters_dict(), phase, obj.parameters_broadcast_dict()) def _set_dataset_mode(self, args_list): """set dataset mode.""" @@ -501,12 +500,9 @@ class _Executor: args_names, args_list = _generate_pip_args(obj, *args) dic = dict(zip(args_names, args_list)) - key = generate_key(phase, dic) - obj.phase_prefix = str(key[1]) - if 'export' in phase: - phase = phase + '.' + obj.phase_prefix + '.' + str(obj.create_time) + '.' + str(id(obj)) - else: - phase = obj.phase_prefix + phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + key = generate_arguments_key(dic) + obj.arguments_key = str(key) + phase = phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key if phase in self.compile_cache.keys(): logger.debug("%r graph has existed.", phase) @@ -524,11 +520,11 @@ class _Executor: enable_debug_runtime = context.get_context("enable_debug_runtime") enable_ge = context.get_context("enable_ge") use_vm = not enable_ge or (enable_debug_runtime and context.get_context("mode") == context.PYNATIVE_MODE) - result = self._executor.compile(obj, args_list, phase, use_vm, self.queue_name) + result = self._graph_executor.compile(obj, args_list, phase, use_vm, self.queue_name) self.compile_cache[phase] = phase if not result: raise RuntimeError("Executor compile failed.") - graph = self._executor.get_func_graph(phase) + graph = self._graph_executor.get_func_graph(phase) if graph is None: raise RuntimeError("Compile graph failed for phase {}.".format(phase)) @@ -541,7 +537,6 @@ class _Executor: # the following GE init process is not needed when use vm or ms backend if enable_ge: self._build_data_graph(obj, phase) - if "export" not in phase: init_phase = "init_subgraph." + str(obj.create_time) + "." + str(id(obj)) _exec_init_graph(obj, init_phase) @@ -559,8 +554,8 @@ class _Executor: self._updata_param_node_default_input(phase, replace) return - obj.parameter_layout_dict = self._executor.get_parameter_layout(phase) - obj.parallel_parameter_name_list = self._executor.get_parallel_parameter_name_list(phase) + obj.parameter_layout_dict = self._graph_executor.get_parameter_layout(phase) + obj.parallel_parameter_name_list = self._graph_executor.get_parallel_parameter_name_list(phase) replace = obj.init_parameters_data(auto_parallel_mode=True) if _get_pipeline_stages() > 1 and (not hasattr(obj, "is_first_iteration") or not obj.is_first_iteration): obj.remove_redundant_parameters() @@ -575,19 +570,19 @@ class _Executor: def _updata_param_node_default_input(self, phase, replace): new_param = {x.name: replace[x] for x in replace if id(x) != id(replace[x])} - return self._executor.updata_param_node_default_input(phase, new_param) + return self._graph_executor.updata_param_node_default_input(phase, new_param) def _get_shard_strategy(self, obj): - real_phase = obj.phase_prefix + obj.phase + '.' + str(obj.create_time) + '.' + str(id(obj)) - return self._executor.get_strategy(real_phase) + real_phase = obj.phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key + return self._graph_executor.get_strategy(real_phase) def _get_num_parallel_ops(self, obj): - real_phase = obj.phase_prefix + obj.phase + '.' + str(obj.create_time) + '.' + str(id(obj)) - return self._executor.get_num_parallel_ops(real_phase) + real_phase = obj.phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key + return self._graph_executor.get_num_parallel_ops(real_phase) def _get_allreduce_fusion(self, obj): - real_phase = obj.phase_prefix + obj.phase + '.' + str(obj.create_time) + '.' + str(id(obj)) - return self._executor.get_allreduce_fusion(real_phase) + real_phase = obj.phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key + return self._graph_executor.get_allreduce_fusion(real_phase) def has_compiled(self, phase='predict'): """ @@ -599,7 +594,7 @@ class _Executor: Returns: bool, specifies whether the specific graph has been compiled. """ - return self._executor.has_compiled(phase) + return self._graph_executor.has_compiled(phase) def __call__(self, obj, *args, phase='predict'): if context.get_context("precompile_only") or _is_role_pserver(): @@ -615,7 +610,7 @@ class _Executor: raise RuntimeError('Process method parameter is failure') args_list = tuple(arguments_dict.values()) obj.__parse_method__ = parse_method - return self._executor(args_list, phase) + return self._graph_executor(args_list, phase) def run(self, obj, *args, phase='predict'): """ @@ -628,23 +623,23 @@ class _Executor: Tensor/Tuple, return execute result. """ if phase == 'save': - return self._executor((), phase + '.' + str(obj.create_time) + '.' + str(id(obj))) + return self._graph_executor((), phase + '.' + str(obj.create_time) + '.' + str(id(obj))) - phase_real = obj.phase_prefix + phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + phase_real = phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key if self.has_compiled(phase_real): return self._exec_pip(obj, *args, phase=phase_real) raise KeyError('{} graph is not exist.'.format(phase_real)) def del_net_res(self, net_id): - self._executor.del_net_res(net_id) + self._graph_executor.del_net_res(net_id) def _get_func_graph_proto(self, obj, exec_id, ir_type="onnx_ir", use_prefix=False): """Get graph proto from pipeline.""" if use_prefix: - exec_id = obj.phase_prefix + exec_id - if self._executor.has_compiled(exec_id) is False: + exec_id = exec_id + '.' + obj.arguments_key + if self._graph_executor.has_compiled(exec_id) is False: return None - return self._executor.get_func_graph_proto(exec_id, ir_type) + return self._graph_executor.get_func_graph_proto(exec_id, ir_type) def export(self, file_name, graph_id): """ @@ -659,12 +654,12 @@ class _Executor: def fetch_info_for_quant_export(self, exec_id): """Get graph proto from pipeline.""" - if self._executor.has_compiled(exec_id) is False: + if self._graph_executor.has_compiled(exec_id) is False: return None - return self._executor.fetch_info_for_quant_export(exec_id) + return self._graph_executor.fetch_info_for_quant_export(exec_id) -_executor = _Executor() -_pynative_exec = _PynativeExecutor() +_cell_graph_executor = _CellGraphExecutor() +_pynative_executor = _PynativeExecutor() __all__ = ['ms_function'] diff --git a/mindspore/compression/export/quant_export.py b/mindspore/compression/export/quant_export.py index 4c6a5468fc0..4badbc619ce 100644 --- a/mindspore/compression/export/quant_export.py +++ b/mindspore/compression/export/quant_export.py @@ -22,7 +22,7 @@ from ... import nn, ops from ..._checkparam import Validator from ...common import Tensor from ...common import dtype as mstype -from ...common.api import _executor +from ...common.api import _cell_graph_executor as _executor from ...common.parameter import Parameter from ...nn import Cell from ...nn.layer import quant diff --git a/mindspore/nn/cell.py b/mindspore/nn/cell.py index 54bcf01f932..b43f05de83b 100755 --- a/mindspore/nn/cell.py +++ b/mindspore/nn/cell.py @@ -30,7 +30,7 @@ from .. import context from .._c_expression import init_pipeline, Cell_, FuncGraph from .._checkparam import Validator from ..common import dtype as mstype -from ..common.api import _executor, _pynative_exec +from ..common.api import _cell_graph_executor, _pynative_executor from ..common.parameter import Parameter, ParameterTuple from ..common.tensor import Tensor from ..ops.operations import HookBackward, Cast @@ -98,7 +98,7 @@ class Cell(Cell_): self._parallel_parameter_name_list = () self._parallel_parameter_merge_net_dict = {} self._create_time = int(time.time() * 1e9) - self.phase_prefix = "" + self.arguments_key = "" self.parameter_broadcast_done = False init_pipeline() @@ -264,8 +264,8 @@ class Cell(Cell_): def get_func_graph_proto(self): """Return graph binary proto.""" - return _executor._get_func_graph_proto(self, self.phase + "." + str(self.create_time) + '.' + str(id(self)), - "anf_ir", True) + exec_id = self.phase + "." + str(self.create_time) + '.' + str(id(self)) + return _cell_graph_executor._get_func_graph_proto(self, exec_id, "anf_ir", True) def __getattr__(self, name): if '_params' in self.__dict__: @@ -295,9 +295,9 @@ class Cell(Cell_): def __del__(self): if context.get_context is not None and context.get_context("mode") == context.PYNATIVE_MODE: - _pynative_exec.del_cell(str(id(self))) + _pynative_executor.del_cell(str(id(self))) if hasattr(self, "_create_time"): - _executor.del_net_res(str(self._create_time)) + _cell_graph_executor.del_net_res(str(self._create_time)) def __delattr__(self, name): if name in self._params: @@ -338,7 +338,7 @@ class Cell(Cell_): def do_parameter_broadcast(self): if context.get_auto_parallel_context("parallel_mode") == ParallelMode.DATA_PARALLEL: if not self.parameter_broadcast_done: - _pynative_exec.parameter_broadcast(self, self.phase, self._auto_parallel_mode) + _pynative_executor.parameter_broadcast(self, self.phase, self._auto_parallel_mode) self.parameter_broadcast_done = True def run_construct(self, cast_inputs, kwargs): @@ -381,6 +381,8 @@ class Cell(Cell_): bound_args = inspect.signature(self.construct).bind(*inputs, **kwargs) inputs = bound_args.args kwargs = bound_args.kwargs + + # Run in Graph mode. if context.get_context("mode") == context.GRAPH_MODE: self._check_construct_args(*inputs, **kwargs) if self.enable_hook: @@ -388,13 +390,14 @@ class Cell(Cell_): out = self.compile_and_run(*inputs) return out + # Run in PyNative mode. self.do_parameter_broadcast() for item in inputs: if isinstance(item, numpy.ndarray): raise TypeError("The cell inputs should not be numpy arrays.") if self.requires_grad is True: - _pynative_exec.set_grad_flag(True) - _pynative_exec.new_graph(self, *inputs, **kwargs) + _pynative_executor.set_grad_flag(True) + _pynative_executor.new_graph(self, *inputs, **kwargs) cast_inputs = list() if hasattr(self, "_mindspore_flags"): if self._mindspore_flags.get('fp16'): @@ -406,7 +409,7 @@ class Cell(Cell_): output = self.run_construct(cast_inputs, kwargs) if isinstance(output, Parameter): output = output.data - _pynative_exec.end_graph(self, output, *inputs, **kwargs) + _pynative_executor.end_graph(self, output, *inputs, **kwargs) return output def _add_attr(self, name, value): @@ -551,7 +554,7 @@ class Cell(Cell_): """ Replace parameters with sliced tensors by parallel strategies. - Please refer to the usage in source code of `mindspore.common._Executor.compile`. + Please refer to the usage in source code of `mindspore.common._CellGraphExecutor.compile`. Args: params (dict): The parameters dictionary used for initializing the data graph. @@ -635,7 +638,7 @@ class Cell(Cell_): Args: inputs (tuple): Inputs of the Cell object. """ - _executor.compile(self, *inputs, phase=self.phase, auto_parallel_mode=self._auto_parallel_mode) + _cell_graph_executor.compile(self, *inputs, phase=self.phase, auto_parallel_mode=self._auto_parallel_mode) def compile_and_run(self, *inputs): """ @@ -659,19 +662,19 @@ class Cell(Cell_): if self._auto_parallel_mode: if new_inputs and isinstance(new_inputs[0], Tensor) and inputs[0].virtual_flag: - # get parallel inputs in sink mode, parallel inputs set in _executor.compile + # get parallel inputs in sink mode, parallel inputs set in _cell_graph_executor.compile parallel_inputs_run = self._parallel_inputs_run else: parallel_inputs_run = new_inputs - return _executor(self, *parallel_inputs_run, phase=self.phase) - return _executor(self, *new_inputs, phase=self.phase) + return _cell_graph_executor(self, *parallel_inputs_run, phase=self.phase) + return _cell_graph_executor(self, *new_inputs, phase=self.phase) def auto_parallel_compile_and_run(self): return self._auto_parallel_compile_and_run def exec_checkpoint_graph(self): """Executes saving checkpoint graph operation.""" - _executor(self, phase='save') + _cell_graph_executor(self, phase='save') def insert_param_to_cell(self, param_name, param, check_name=True): """ diff --git a/mindspore/ops/composite/base.py b/mindspore/ops/composite/base.py index 9265058f8bd..a2f2ed4df95 100644 --- a/mindspore/ops/composite/base.py +++ b/mindspore/ops/composite/base.py @@ -23,7 +23,7 @@ from mindspore import context from ..._c_expression import EnvInstance_, GradOperation_, HyperMap_, Map_, MultitypeFuncGraph_, Tail_, \ TupleAdd_, TupleSlice_, UnpackCall_, ZipOperation_, ListAppend_, TupleGetItemTensor_ from ...common import dtype as mstype -from ...common.api import ms_function, _pynative_exec, _wrap_func +from ...common.api import ms_function, _pynative_executor, _wrap_func from ..primitive import Primitive from ..operations import _grad_ops from .. import operations as P @@ -341,14 +341,14 @@ class GradOperation(GradOperation_): new_kwargs = kwargs.copy() new_kwargs.pop('sens') if isinstance(fn, FunctionType): - if not _pynative_exec.check_run(fn, *args, **new_kwargs): - _pynative_exec.set_grad_flag(True) - _pynative_exec.new_graph(fn, *args, **new_kwargs) + if not _pynative_executor.check_run(fn, *args, **new_kwargs): + _pynative_executor.set_grad_flag(True) + _pynative_executor.new_graph(fn, *args, **new_kwargs) output = fn(*args, **new_kwargs) - _pynative_exec.end_graph(fn, output, *args, **new_kwargs) + _pynative_executor.end_graph(fn, output, *args, **new_kwargs) else: # Check if fn have run already - if not _pynative_exec.check_run(fn, *args, **new_kwargs): + if not _pynative_executor.check_run(fn, *args, **new_kwargs): fn.set_grad() fn(*args, **new_kwargs) @@ -368,12 +368,12 @@ class GradOperation(GradOperation_): else: @_wrap_func def after_grad(*args, **kwargs): - if _pynative_exec.check_graph(fn, *args, **kwargs): + if _pynative_executor.check_graph(fn, *args, **kwargs): print("Another grad step is running") self._pynative_forward_run(args, kwargs, fn) - _pynative_exec.grad(grad_, fn, weights, *args, **kwargs) - out = _pynative_exec(fn, *args, **kwargs) - _pynative_exec.clear_grad(fn, *args, **kwargs) + _pynative_executor.grad(grad_, fn, weights, *args, **kwargs) + out = _pynative_executor(fn, *args, **kwargs) + _pynative_executor.clear_grad(fn, *args, **kwargs) return out self.grad_fn = after_grad self.fn = fn diff --git a/mindspore/train/_utils.py b/mindspore/train/_utils.py index 413e27366aa..91866a5d580 100644 --- a/mindspore/train/_utils.py +++ b/mindspore/train/_utils.py @@ -22,7 +22,7 @@ from mindspore.common.tensor import Tensor from mindspore.common.dtype import dtype_to_nptype, pytype_to_dtype from mindspore.common import dtype as mstype from mindspore import log as logger -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.train.mind_ir_pb2 import ModelProto as mindir_model from mindspore.train.checkpoint_pb2 import Checkpoint from mindspore.train.node_strategy_pb2 import ParallelStrategyMap as ckpt_strategy @@ -64,13 +64,13 @@ def _exec_datagraph(exec_dataset, dataset_size, phase='dataset', create_data_inf send_epoch_end = bool(dataset_size == -1) exec_dataset = exec_dataset.device_que(send_epoch_end=send_epoch_end, create_data_info_queue=create_data_info_queue) - _executor.init_dataset(exec_dataset.queue_name, - dataset_size, - batch_size, - dataset_types, - dataset_shapes, - input_indexs, - phase=phase) + _cell_graph_executor.init_dataset(exec_dataset.queue_name, + dataset_size, + batch_size, + dataset_types, + dataset_shapes, + input_indexs, + phase=phase) return exec_dataset diff --git a/mindspore/train/model.py b/mindspore/train/model.py index a0dc92fde17..64c9b72b654 100644 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -37,7 +37,7 @@ from ..context import ParallelMode from ..parallel._cost_model_context import _set_multi_subgraphs from .dataset_helper import DatasetHelper, connect_network_with_dataset from . import amp -from ..common.api import _pynative_exec +from ..common.api import _pynative_executor def _transfer_tensor_to_tuple(inputs): @@ -53,7 +53,7 @@ def _transfer_tensor_to_tuple(inputs): class _StepSync(Callback): @staticmethod def step_end(run_context): - _pynative_exec.sync() + _pynative_executor.sync() class Model: diff --git a/mindspore/train/serialization.py b/mindspore/train/serialization.py index e4d54884d7a..cf70d6da262 100644 --- a/mindspore/train/serialization.py +++ b/mindspore/train/serialization.py @@ -37,7 +37,7 @@ from mindspore.train.mind_ir_pb2 import GraphProto as graph_proto from mindspore.common.tensor import Tensor from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor as _executor from mindspore.common import dtype as mstype from mindspore._checkparam import check_input_data, Validator from mindspore.compression.export import quant_export diff --git a/model_zoo/research/3d/DeepLM/lm_solver/solver.py b/model_zoo/research/3d/DeepLM/lm_solver/solver.py index 30873c6916b..76745d99426 100644 --- a/model_zoo/research/3d/DeepLM/lm_solver/solver.py +++ b/model_zoo/research/3d/DeepLM/lm_solver/solver.py @@ -20,7 +20,7 @@ from mindspore.ops import operations as P from mindspore.ops import functional as F from mindspore.ops.composite import GradOperation from mindspore.nn import Cell -from mindspore.common.api import _pynative_exec +from mindspore.common.api import _pynative_executor from . import jacobian as jb from . import listvec as lv @@ -231,7 +231,7 @@ class LMSolver: return mem def timing(self): - _pynative_exec.sync() + _pynative_executor.sync() return time() def initialize_variables(self): diff --git a/tests/mindspore_test_framework/utils/block_util.py b/tests/mindspore_test_framework/utils/block_util.py index d6f4f609787..cf76a144d0a 100644 --- a/tests/mindspore_test_framework/utils/block_util.py +++ b/tests/mindspore_test_framework/utils/block_util.py @@ -21,7 +21,7 @@ import numpy as np from mindspore import ParameterTuple from mindspore import nn, context -from mindspore.common.api import _executor, ms_function +from mindspore.common.api import _cell_graph_executor, ms_function from mindspore.common.tensor import Tensor from mindspore.ops import functional as F from mindspore.ops import operations as P @@ -45,7 +45,7 @@ def set_block_param_with_rand(net, rand_func=None): def compile_block(net, *inputs, rand_func=None, training=True): set_block_training(net, training) set_block_param_with_rand(net, rand_func) - return _executor.compile(net, *inputs) + return _cell_graph_executor.compile(net, *inputs) def run_block(net, *inputs, rand_func=None, training=True): diff --git a/tests/ops_common.py b/tests/ops_common.py index 7e042f57d46..ea2b8e515f3 100644 --- a/tests/ops_common.py +++ b/tests/ops_common.py @@ -20,7 +20,7 @@ import mindspore.ops.composite as C import mindspore.ops.functional as F import mindspore.ops.operations as P from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) @@ -252,18 +252,18 @@ def get_loss_fun(construct_net, num_input, output_index): def build_construct_graph(net, *inputs, execute=True): net.set_train() - _executor.compile(net, *inputs) + _cell_graph_executor.compile(net, *inputs) if execute: - _executor(net, inputs) + _cell_graph_executor(net, inputs) def build_backward_graph(net, output_shapes, inputs, execute=True): inputs = append_sens_to_inputs(output_shapes, inputs) net = gen_backward_net(net, len(inputs) - 1) net.set_train() - _executor.compile(net, inputs) + _cell_graph_executor.compile(net, inputs) if execute: - _executor(net, inputs) + _cell_graph_executor(net, inputs) def convert(shp, dtype=np.float32, scale=6): diff --git a/tests/perf_test/test_lenet.py b/tests/perf_test/test_lenet.py index 8b61e9be5ea..b2b741ce25e 100644 --- a/tests/perf_test/test_lenet.py +++ b/tests/perf_test/test_lenet.py @@ -22,7 +22,7 @@ import mindspore.nn as nn import mindspore.ops.composite as C from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor context.set_context(mode=context.GRAPH_MODE) @@ -59,7 +59,7 @@ def test_compile(): height, weight) * 3, np.float32)) - _executor.compile(net, inp) + _cell_graph_executor.compile(net, inp) def test_compile_grad(): @@ -72,4 +72,4 @@ def test_compile_grad(): sens = Tensor(np.ones([batch_size, num_class]).astype(np.float32)) grad_op = LeNetGrad(net) - _executor.compile(grad_op, inp, sens) + _cell_graph_executor.compile(grad_op, inp, sens) diff --git a/tests/perf_test/test_resnet_infer.py b/tests/perf_test/test_resnet_infer.py index a12160f49ad..c57431f19f5 100644 --- a/tests/perf_test/test_resnet_infer.py +++ b/tests/perf_test/test_resnet_infer.py @@ -18,11 +18,11 @@ import numpy as np from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from .resnet_example import resnet50 def test_compile(): net = resnet50() inp = Tensor(np.ones([1, 3, 224, 224]).astype(np.float32)) - _executor.compile(net, inp) + _cell_graph_executor.compile(net, inp) diff --git a/tests/perf_test/test_resnet_train.py b/tests/perf_test/test_resnet_train.py index 7524c9bf43f..ebf8b0608a9 100644 --- a/tests/perf_test/test_resnet_train.py +++ b/tests/perf_test/test_resnet_train.py @@ -19,7 +19,7 @@ import numpy as np import mindspore.context as context from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from .resnet_example import resnet50 from ..train_step_wrap import train_step_with_loss_warp @@ -31,4 +31,4 @@ def test_train_step(): net.set_train() inp = Tensor(np.ones([1, 3, 224, 224], np.float32)) label = Tensor(np.zeros([1, 10], np.float32)) - _executor.compile(net, inp, label) + _cell_graph_executor.compile(net, inp, label) diff --git a/tests/st/data_transfer/test_tdt_data_transfer.py b/tests/st/data_transfer/test_tdt_data_transfer.py index 8b50908df74..555ea9e3760 100644 --- a/tests/st/data_transfer/test_tdt_data_transfer.py +++ b/tests/st/data_transfer/test_tdt_data_transfer.py @@ -17,7 +17,7 @@ import numpy as np import pytest from mindspore import context, nn, Tensor from mindspore import log as logger -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common import dtype as mstype from mindspore.ops import operations as P import mindspore.dataset as de @@ -121,7 +121,7 @@ def op_network_with_step_num(dataset, step_num): net = SingleOpNetwork(dataset_shapes) net_with_dataset = NetWithTDT(net, dataset_types, dataset_shapes, queue_name) # when device type is Davinci, net should has get_next operation before call init_dataset - _executor.init_dataset(dataset.queue_name, 1, batch_size, dataset_types, dataset_shapes, (), "") + _cell_graph_executor.init_dataset(dataset.queue_name, 1, batch_size, dataset_types, dataset_shapes, (), "") dataset_send_tdt(dataset) return op_network_with_epoch(net_with_dataset, step_num) diff --git a/tests/st/gnn/test_gat_model.py b/tests/st/gnn/test_gat_model.py index ed511481ccc..2732db3efd0 100644 --- a/tests/st/gnn/test_gat_model.py +++ b/tests/st/gnn/test_gat_model.py @@ -18,7 +18,7 @@ import numpy as np import mindspore.nn as nn import mindspore.context as context from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from gat import GAT context.set_context(mode=context.GRAPH_MODE) @@ -44,4 +44,4 @@ def test_GAT(): ftr_drop=0.6, activation=activation, residual=residual) - _executor.compile(net, input_data, biases) + _cell_graph_executor.compile(net, input_data, biases) diff --git a/tests/st/gnn/test_gnn_aggregator.py b/tests/st/gnn/test_gnn_aggregator.py index fc1f682a782..e3effba3635 100644 --- a/tests/st/gnn/test_gnn_aggregator.py +++ b/tests/st/gnn/test_gnn_aggregator.py @@ -20,7 +20,7 @@ import mindspore.context as context import mindspore.nn as nn import mindspore.ops.composite as C from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor context.set_context(mode=context.GRAPH_MODE) @@ -45,7 +45,7 @@ def test_MeanAggregator(): """Compile MeanAggregator forward graph""" aggregator = MeanAggregator(32, 64, activation="relu", dropout_ratio=0.5) input_data = Tensor(np.array(np.random.rand(32, 3, 32), dtype=np.float32)) - _executor.compile(aggregator, input_data) + _cell_graph_executor.compile(aggregator, input_data) def test_MeanAggregator_grad(): @@ -54,7 +54,7 @@ def test_MeanAggregator_grad(): input_data = Tensor(np.array(np.random.rand(32, 3, 32), dtype=np.float32)) sens = Tensor(np.ones([32, 64]).astype(np.float32)) grad_op = MeanAggregatorGrad(aggregator) - _executor.compile(grad_op, input_data, sens) + _cell_graph_executor.compile(grad_op, input_data, sens) def test_AttentionHead(): @@ -66,11 +66,11 @@ def test_AttentionHead(): residual=False) input_data = Tensor(np.array(np.random.rand(1, 2708, 1433), dtype=np.float32)) biases = Tensor(np.array(np.random.rand(1, 2708, 2708), dtype=np.float32)) - _executor.compile(head, input_data, biases) + _cell_graph_executor.compile(head, input_data, biases) def test_AttentionAggregator(): input_data = Tensor(np.array(np.random.rand(1, 2708, 1433), dtype=np.float32)) biases = Tensor(np.array(np.random.rand(1, 2708, 2708), dtype=np.float32)) net = AttentionAggregator(1433, 8, 8) - _executor.compile(net, input_data, biases) + _cell_graph_executor.compile(net, input_data, biases) diff --git a/tests/st/ops/ascend/test_tdt_data_ms.py b/tests/st/ops/ascend/test_tdt_data_ms.py index cfebc2b956c..b11ecbe8e7c 100644 --- a/tests/st/ops/ascend/test_tdt_data_ms.py +++ b/tests/st/ops/ascend/test_tdt_data_ms.py @@ -19,7 +19,7 @@ import mindspore.context as context import mindspore.dataset as ds import mindspore.dataset.vision.c_transforms as vision import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.tensor import Tensor from mindspore.dataset.vision import Inter from mindspore.ops import operations as P @@ -92,8 +92,8 @@ if __name__ == '__main__': net = dataiter() net.set_train() - _executor.init_dataset(ds1.queue_name, 39, batch_size, - dataset_types, dataset_shapes, (), 'dataset') + _cell_graph_executor.init_dataset(ds1.queue_name, 39, batch_size, + dataset_types, dataset_shapes, (), 'dataset') ds1.send() for data in data_set.create_tuple_iterator(output_numpy=True, num_epochs=1): diff --git a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_class.py b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_class.py index 8e5609bb562..5f965639c80 100644 --- a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_class.py +++ b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parse_class.py @@ -22,7 +22,7 @@ import logging import numpy as np import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.parameter import Parameter from mindspore.common.tensor import Tensor @@ -82,4 +82,4 @@ def test_get_object_graph(): Y = Tensor(np.ones([2, 2, 2]).astype(np.float32)) network = SimpleNet(ResNet(X), Y, True) print(network.parameters_dict()) - return _executor.compile(network, X, Y) + return _cell_graph_executor.compile(network, X, Y) diff --git a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parser_integrate.py b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parser_integrate.py index 28bded64016..fc0562894a4 100644 --- a/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parser_integrate.py +++ b/tests/ut/cpp/python_input/gtest_input/pipeline/parse/parser_integrate.py @@ -19,7 +19,7 @@ import numpy as np import mindspore._c_expression as me import mindspore.nn as nn from mindspore.common import dtype -from mindspore.common.api import ms_function, _executor +from mindspore.common.api import ms_function, _cell_graph_executor from mindspore.common.parameter import Parameter from mindspore.common.tensor import Tensor from mindspore.ops import functional as F @@ -153,7 +153,7 @@ class TestNet(nn.Cell): def test_compile_conv2d(): net = Net() inputs = Tensor(np.ones([1, 3, 16, 50]).astype(np.float32)) - _executor.compile(net, inputs) + _cell_graph_executor.compile(net, inputs) def test_none(x, y): diff --git a/tests/ut/cpp/utils/callback_test.cc b/tests/ut/cpp/utils/callback_test.cc index 1823c909850..698dd2573c8 100644 --- a/tests/ut/cpp/utils/callback_test.cc +++ b/tests/ut/cpp/utils/callback_test.cc @@ -38,7 +38,7 @@ class TestCallback : public UT::Common { * # ut and python static info not share TEST_F(TestCallback, test_get_anf_tensor_shape) { py::object obj = python_adapter::CallPyFn("gtest_input.pipeline.parse.parse_class", "test_get_object_graph"); - FuncGraphPtr func_graph = pipeline::ExecutorPy::GetInstance()->GetFuncGraphPy(obj); + FuncGraphPtr func_graph = pipeline::GraphExecutorPy::GetInstance()->GetFuncGraphPy(obj); transform::DfGraphManager::GetInstance().SetAnfGraph(func_graph); std::shared_ptr> param_shape_ptr = std::make_shared>(); bool get_shape = callbacks::GetParameterShape(func_graph, "weight", param_shape_ptr); @@ -47,7 +47,7 @@ TEST_F(TestCallback, test_get_anf_tensor_shape) { TEST_F(TestCallback, test_checkpoint_save_op) { py::object obj = python_adapter::CallPyFn("gtest_input.pipeline.parse.parse_class", "test_get_object_graph"); - FuncGraphPtr func_graph = pipeline::ExecutorPy::GetInstance()->GetFuncGraphPy(obj); + FuncGraphPtr func_graph = pipeline::GraphExecutorPy::GetInstance()->GetFuncGraphPy(obj); transform::DfGraphManager::GetInstance().SetAnfGraph(func_graph); #define DTYPE float diff --git a/tests/ut/python/communication/test_comm.py b/tests/ut/python/communication/test_comm.py index 9285b06b41e..090e9549812 100644 --- a/tests/ut/python/communication/test_comm.py +++ b/tests/ut/python/communication/test_comm.py @@ -18,7 +18,7 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.communication._comm_helper import Backend from mindspore.communication.management import HCCL_WORLD_COMM_GROUP, NCCL_WORLD_COMM_GROUP, GlobalComm, init from mindspore.nn import Dense @@ -154,7 +154,7 @@ def run_allreduce(op): momentum=0.9) network = WithLossCell(network, loss_fn) network = TrainOneStepCell(network, optimizer) - _executor.compile(network, input_tensor, label_tensor) + _cell_graph_executor.compile(network, input_tensor, label_tensor) def test_allreduce(): @@ -178,7 +178,7 @@ def test_allgather(): momentum=0.9) network = WithLossCell(network, loss_fn) network = TrainOneStepCell(network, optimizer) - _executor.compile(network, input_tensor, label_tensor) + _cell_graph_executor.compile(network, input_tensor, label_tensor) def test_allswap(): """run_allswap""" @@ -192,7 +192,7 @@ def test_allswap(): momentum=0.9) network = WithLossCell(network, loss_fn) network = TrainOneStepCell(network, optimizer) - _executor.compile(network, input_tensor, label_tensor) + _cell_graph_executor.compile(network, input_tensor, label_tensor) def run_reducescatter(op): @@ -207,7 +207,7 @@ def run_reducescatter(op): momentum=0.9) network = WithLossCell(network, loss_fn) network = TrainOneStepCell(network, optimizer) - _executor.compile(network, input_tensor, label_tensor) + _cell_graph_executor.compile(network, input_tensor, label_tensor) def test_reducescatter(): @@ -228,7 +228,7 @@ def test_broadcast(): momentum=0.9) network = WithLossCell(network, loss_fn) network = TrainOneStepCell(network, optimizer) - _executor.compile(network, input_tensor_1, label_tensor) + _cell_graph_executor.compile(network, input_tensor_1, label_tensor) def test_alltoall(): @@ -243,4 +243,4 @@ def test_alltoall(): momentum=0.9) network = WithLossCell(network, loss_fn) network = TrainOneStepCell(network, optimizer) - _executor.compile(network, input_tensor, label_tensor) + _cell_graph_executor.compile(network, input_tensor, label_tensor) diff --git a/tests/ut/python/communication/test_data_parallel_dense.py b/tests/ut/python/communication/test_data_parallel_dense.py index c80f4c5a2f0..df615810d32 100644 --- a/tests/ut/python/communication/test_data_parallel_dense.py +++ b/tests/ut/python/communication/test_data_parallel_dense.py @@ -21,7 +21,7 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Momentum from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.ops import operations as P @@ -70,5 +70,5 @@ def test_data_parallel_dense(): net = WithLossCell(net, loss_fn) net = TrainOneStepCell(net, optimizer) - _executor.compile(net, inp, label) + _cell_graph_executor.compile(net, inp, label) context.reset_auto_parallel_context() diff --git a/tests/ut/python/exec/test_eval.py b/tests/ut/python/exec/test_eval.py index 6d6e0ba20fc..4fa1ce5aab9 100644 --- a/tests/ut/python/exec/test_eval.py +++ b/tests/ut/python/exec/test_eval.py @@ -19,7 +19,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from ..ut_filter import non_graph_engine @@ -63,7 +63,7 @@ def test_compile_train_eval(): train_input_data = Tensor(np.ones([1, 3, 32, 32]).astype(np.float32) * 0.01) context.set_context(mode=context.GRAPH_MODE) - ms_executor = _executor + ms_executor = _cell_graph_executor ms_executor.init_dataset("train", 1, 1, [ms.float32], [[1, 3, 32, 32]], (), 'dataset') diff --git a/tests/ut/python/ir/test_tensor.py b/tests/ut/python/ir/test_tensor.py index 4f2e29c0a1e..6f5ef0233ac 100644 --- a/tests/ut/python/ir/test_tensor.py +++ b/tests/ut/python/ir/test_tensor.py @@ -286,7 +286,7 @@ def test_return_tensor(): net = Net(0) input_data = ms.Tensor(np.array([[1.2, 2.1], [2.2, 3.2]]).astype('float32')) input_data.set_dtype(ms.float32) - exe = me._executor + exe = me._cell_graph_executor exe.compile(net, input_data) tensor_ = exe(net, input_data) diff --git a/tests/ut/python/ir/test_tensor_py.py b/tests/ut/python/ir/test_tensor_py.py index 69842b32c69..f568096be6f 100644 --- a/tests/ut/python/ir/test_tensor_py.py +++ b/tests/ut/python/ir/test_tensor_py.py @@ -17,7 +17,7 @@ import numpy as np import mindspore as ms import mindspore.common.initializer as init -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell from mindspore.ops import operations as P from ..ut_filter import non_graph_engine @@ -133,7 +133,7 @@ def test_tensor_method_sub(): x = ms.Tensor(np.ones([5, 3], np.float32)) y = ms.Tensor(np.ones([8, 5, 3], np.float32)) - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_tensor_method_mul(): @@ -152,7 +152,7 @@ def test_tensor_method_mul(): x = ms.Tensor(np.ones([5, 3], np.float32)) y = ms.Tensor(np.ones([8, 5, 3], np.float32)) - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_tensor_method_div(): @@ -171,4 +171,4 @@ def test_tensor_method_div(): x = ms.Tensor(np.ones([5, 3], np.float32)) y = ms.Tensor(np.ones([8, 5, 3], np.float32)) - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) diff --git a/tests/ut/python/model/res18_example.py b/tests/ut/python/model/res18_example.py index 20924baadf6..354bc15c0c4 100644 --- a/tests/ut/python/model/res18_example.py +++ b/tests/ut/python/model/res18_example.py @@ -19,7 +19,7 @@ import numpy as np import mindspore.nn as nn # pylint: disable=C0414 from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops.operations import Add from ...train_step_wrap import train_step_with_loss_warp @@ -244,14 +244,14 @@ def resnet9(): def test_compile(): net = resnet18() input_data = Tensor(np.ones([1, 3, 224, 224])) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) def test_train_step(): net = train_step_with_loss_warp(resnet9()) input_data = Tensor(np.ones([1, 3, 224, 224])) label = Tensor(np.zeros([1, 10])) - _executor.compile(net, input_data, label) + _cell_graph_executor.compile(net, input_data, label) def test_train_step_training(): @@ -259,4 +259,4 @@ def test_train_step_training(): input_data = Tensor(np.ones([1, 3, 224, 224])) label = Tensor(np.zeros([1, 10])) net.set_train() - _executor.compile(net, input_data, label) + _cell_graph_executor.compile(net, input_data, label) diff --git a/tests/ut/python/model/test_lenet.py b/tests/ut/python/model/test_lenet.py index cc679c29740..41f46611a89 100644 --- a/tests/ut/python/model/test_lenet.py +++ b/tests/ut/python/model/test_lenet.py @@ -18,7 +18,7 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import operations as P from ....train_step_wrap import train_step_with_loss_warp, train_step_with_sens @@ -53,14 +53,14 @@ def test_lenet5_train_step(): predict = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32) * 0.01) label = Tensor(np.zeros([1, 10]).astype(np.float32)) net = train_step_with_loss_warp(LeNet5()) - _executor.compile(net, predict, label) + _cell_graph_executor.compile(net, predict, label) def test_lenet5_train_sens(): predict = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32) * 0.01) sens = Tensor(np.ones([1, 10]).astype(np.float32)) net = train_step_with_sens(LeNet5(), sens) - _executor.compile(net, predict) + _cell_graph_executor.compile(net, predict) def test_lenet5_train_step_training(): @@ -68,4 +68,4 @@ def test_lenet5_train_step_training(): label = Tensor(np.zeros([1, 10]).astype(np.float32)) net = train_step_with_loss_warp(LeNet5()) net.set_train() - _executor.compile(net, predict, label) + _cell_graph_executor.compile(net, predict, label) diff --git a/tests/ut/python/model/test_lenet_core_after_exception.py b/tests/ut/python/model/test_lenet_core_after_exception.py index fde3147d79b..dfd47ac599b 100644 --- a/tests/ut/python/model/test_lenet_core_after_exception.py +++ b/tests/ut/python/model/test_lenet_core_after_exception.py @@ -17,7 +17,7 @@ import numpy as np import pytest import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.tensor import Tensor from mindspore.ops import operations as P from ....train_step_wrap import train_step_with_loss_warp @@ -54,5 +54,5 @@ def test_lenet5_exception(): label = Tensor(in2) net = train_step_with_loss_warp(LeNet5()) with pytest.raises(RuntimeError) as info: - _executor.compile(net, predict, label) + _cell_graph_executor.compile(net, predict, label) assert "x_shape[C_in] / group must equal to w_shape[C_in] = " in str(info.value) diff --git a/tests/ut/python/model/test_mix_precision.py b/tests/ut/python/model/test_mix_precision.py index 94ca0e8a970..9f097f76831 100644 --- a/tests/ut/python/model/test_mix_precision.py +++ b/tests/ut/python/model/test_mix_precision.py @@ -19,7 +19,7 @@ import mindspore.common.dtype as mstype import mindspore.nn as nn from mindspore import Tensor, context from mindspore.common import ParameterTuple -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.parameter import Parameter from mindspore.nn import Momentum from mindspore.nn import TrainOneStepCell, WithLossCell @@ -111,7 +111,7 @@ def test_data_parallel_with_cast(): net = WithLossCell(net, loss_fn) net = TrainOneStepCell(net, optimizer) - _executor.compile(net, predict, label) + _cell_graph_executor.compile(net, predict, label) context.reset_auto_parallel_context() @@ -128,7 +128,7 @@ def test_nn_prelu(): x = Tensor(np.ones([1, 16, 10, 10]).astype(np.float32) * 0.01) net = NetForPReLU().set_train() net.add_flags_recursive(fp16=True) - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) class NetForCast(nn.Cell): diff --git a/tests/ut/python/nn/optim/test_ada_grad.py b/tests/ut/python/nn/optim/test_ada_grad.py index fb27a294e5d..2e43bd7acd7 100644 --- a/tests/ut/python/nn/optim/test_ada_grad.py +++ b/tests/ut/python/nn/optim/test_ada_grad.py @@ -19,7 +19,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor, Parameter, context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim import Adagrad from mindspore.ops import operations as P @@ -55,4 +55,4 @@ def test_ada_grad(): optimizer = Adagrad(net.trainable_params(), weight_decay=0.9, loss_scale=1024.0) net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) diff --git a/tests/ut/python/nn/optim/test_adafactor.py b/tests/ut/python/nn/optim/test_adafactor.py index 351665cf12b..21847778602 100644 --- a/tests/ut/python/nn/optim/test_adafactor.py +++ b/tests/ut/python/nn/optim/test_adafactor.py @@ -18,7 +18,7 @@ import pytest import mindspore.nn as nn from mindspore import Tensor, Parameter, context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim.adafactor import AdaFactor from mindspore.ops import operations as P @@ -82,7 +82,7 @@ def test_adafactor_compile1(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adafactor_compile2(): @@ -97,7 +97,7 @@ def test_adafactor_compile2(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adafactor_compile3(): @@ -113,7 +113,7 @@ def test_adafactor_compile3(): warmup_init=False, compression=False) net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adafactor_compile4(): @@ -133,7 +133,7 @@ def test_adafactor_compile4(): warmup_init=warmup_init, compression=compression) net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adafactor_compile5(): @@ -153,7 +153,7 @@ def test_adafactor_compile5(): warmup_init=warmup_init, compression=compression) net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adafactor_compile6(): @@ -173,7 +173,7 @@ def test_adafactor_compile6(): warmup_init=warmup_init, compression=compression) net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adafactor_group1(): @@ -192,7 +192,7 @@ def test_adafactor_group1(): optimizer = AdaFactor(group_params, learning_rate=poly_decay_lr, relative_step=False) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adafactor_group2(): @@ -210,7 +210,7 @@ def test_adafactor_group2(): {'params': [all_params[1]]}] optimizer = AdaFactor(group_params, learning_rate=schedule_lr, relative_step=False) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adafactor_group3(): @@ -227,7 +227,7 @@ def test_adafactor_group3(): optimizer = AdaFactor(group_params, learning_rate=None) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adafactor_group4(): @@ -244,7 +244,7 @@ def test_adafactor_group4(): {'params': [all_params[1]]}] optimizer = AdaFactor(group_params, learning_rate=None) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adafactor_group5(): @@ -261,7 +261,7 @@ def test_adafactor_group5(): {'params': [all_params[1]]}] optimizer = AdaFactor(group_params, learning_rate=None, beta1=0.1) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adafactor_group6(): @@ -278,4 +278,4 @@ def test_adafactor_group6(): {'params': [all_params[1]]}] optimizer = AdaFactor(group_params, learning_rate=None, beta1=0.2) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) diff --git a/tests/ut/python/nn/optim/test_adam.py b/tests/ut/python/nn/optim/test_adam.py index 44fa10357f1..c23fe6db1d1 100644 --- a/tests/ut/python/nn/optim/test_adam.py +++ b/tests/ut/python/nn/optim/test_adam.py @@ -18,7 +18,7 @@ import pytest import mindspore.nn as nn from mindspore import Tensor, Parameter, context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim import Adam, AdamWeightDecay from mindspore.ops import operations as P @@ -86,7 +86,7 @@ def test_adamw_compile(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adam_compile(): @@ -101,7 +101,7 @@ def test_adam_compile(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_sparse_adam_compile(): @@ -114,7 +114,7 @@ def test_sparse_adam_compile(): optimizer = Adam(net.trainable_params(), learning_rate=0.1, loss_scale=1024.0, weight_decay=0.9) optimizer.target = 'CPU' train_network = TrainOneStepCell(net, optimizer) - _executor.compile(train_network, indices, label) + _cell_graph_executor.compile(train_network, indices, label) def test_sparse_adam(): @@ -126,7 +126,7 @@ def test_sparse_adam(): optimizer = Adam(net.trainable_params(), learning_rate=0.1, loss_scale=1024.0, weight_decay=0.9) train_network = TrainOneStepCell(net, optimizer) - _executor.compile(train_network, indices, label) + _cell_graph_executor.compile(train_network, indices, label) def test_adam_group1(): @@ -146,7 +146,7 @@ def test_adam_group1(): optimizer = nn.Adam(group_params, learning_rate=0.1) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adam_group2(): @@ -164,7 +164,7 @@ def test_adam_group2(): {'params': [all_params[1]]}] optimizer = nn.Adam(group_params, learning_rate=schedule_lr) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adamweightdecay_group(): @@ -182,7 +182,7 @@ def test_adamweightdecay_group(): {'params': [all_params[1]]}] optimizer = nn.AdamWeightDecay(group_params, learning_rate=schedule_lr) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_adamoffload_group(): @@ -200,7 +200,7 @@ def test_adamoffload_group(): {'params': [all_params[1]]}] optimizer = nn.AdamOffload(group_params, learning_rate=schedule_lr) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_AdamWeightDecay_beta1(): diff --git a/tests/ut/python/nn/optim/test_ftrl.py b/tests/ut/python/nn/optim/test_ftrl.py index a2d8c2efb34..ab849591c1c 100644 --- a/tests/ut/python/nn/optim/test_ftrl.py +++ b/tests/ut/python/nn/optim/test_ftrl.py @@ -18,7 +18,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor, Parameter, context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim import FTRL from mindspore.ops import operations as P @@ -66,7 +66,7 @@ def test_ftrl(): optimizer = FTRL(net.trainable_params(), weight_decay=0.9, loss_scale=2.0) net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_spares_ftrl_compile(): @@ -79,7 +79,7 @@ def test_spares_ftrl_compile(): optimizer = FTRL(net.trainable_params(), weight_decay=0.9, loss_scale=2.0) optimizer.target = 'CPU' train_network = TrainOneStepCell(net, optimizer) - _executor.compile(train_network, indices, label) + _cell_graph_executor.compile(train_network, indices, label) def test_spares_ftrl(): @@ -92,4 +92,4 @@ def test_spares_ftrl(): optimizer = FTRL(net.trainable_params(), weight_decay=0.9, loss_scale=2.0) optimizer.target = 'Ascend' train_network = TrainOneStepCell(net, optimizer) - _executor.compile(train_network, indices, label) + _cell_graph_executor.compile(train_network, indices, label) diff --git a/tests/ut/python/nn/optim/test_lamb.py b/tests/ut/python/nn/optim/test_lamb.py index b2963fc9501..2acef4b5d67 100644 --- a/tests/ut/python/nn/optim/test_lamb.py +++ b/tests/ut/python/nn/optim/test_lamb.py @@ -17,7 +17,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim import Lamb from mindspore.ops import operations as P @@ -83,7 +83,7 @@ def test_lamb_compile_dynamic_lr(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_lamb_compile(): @@ -98,7 +98,7 @@ def test_lamb_compile(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_lamb_group(): @@ -116,4 +116,4 @@ def test_lamb_group(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) diff --git a/tests/ut/python/nn/optim/test_lars.py b/tests/ut/python/nn/optim/test_lars.py index 1373691b72b..a3030b57d35 100644 --- a/tests/ut/python/nn/optim/test_lars.py +++ b/tests/ut/python/nn/optim/test_lars.py @@ -18,7 +18,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore.common import dtype as mstype -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim import LARS, Momentum from mindspore.ops import operations as P @@ -61,7 +61,7 @@ def test_lars_multi_step_lr(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_lars_float_lr(): @@ -78,4 +78,4 @@ def test_lars_float_lr(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) diff --git a/tests/ut/python/nn/optim/test_lazyadam.py b/tests/ut/python/nn/optim/test_lazyadam.py index 2d52003d28b..b004b65cce2 100644 --- a/tests/ut/python/nn/optim/test_lazyadam.py +++ b/tests/ut/python/nn/optim/test_lazyadam.py @@ -18,7 +18,7 @@ import pytest import mindspore.nn as nn from mindspore import Tensor, Parameter, context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim import LazyAdam from mindspore.ops import operations as P @@ -70,7 +70,7 @@ def test_lazy_adam_compile(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_spares_lazy_adam_compile(): @@ -83,7 +83,7 @@ def test_spares_lazy_adam_compile(): optimizer = LazyAdam(net.trainable_params(), learning_rate=0.1, weight_decay=0.9, loss_scale=2.0) optimizer.target = 'CPU' train_network = TrainOneStepCell(net, optimizer) - _executor.compile(train_network, indices, label) + _cell_graph_executor.compile(train_network, indices, label) def test_spares_lazy_adam(): @@ -96,7 +96,7 @@ def test_spares_lazy_adam(): optimizer = LazyAdam(net.trainable_params(), learning_rate=0.1, weight_decay=0.9, loss_scale=2.0) optimizer.target = 'Ascend' train_network = TrainOneStepCell(net, optimizer) - _executor.compile(train_network, indices, label) + _cell_graph_executor.compile(train_network, indices, label) def test_lazy_adam_error(): diff --git a/tests/ut/python/nn/optim/test_momentum.py b/tests/ut/python/nn/optim/test_momentum.py index d3f1c4c218a..127894c3bce 100644 --- a/tests/ut/python/nn/optim/test_momentum.py +++ b/tests/ut/python/nn/optim/test_momentum.py @@ -17,7 +17,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim import Momentum from mindspore.ops import operations as P @@ -50,4 +50,4 @@ def test_momentum_compile(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) diff --git a/tests/ut/python/nn/optim/test_proximal_ada_grad.py b/tests/ut/python/nn/optim/test_proximal_ada_grad.py index 674cddae25b..4995cd71536 100644 --- a/tests/ut/python/nn/optim/test_proximal_ada_grad.py +++ b/tests/ut/python/nn/optim/test_proximal_ada_grad.py @@ -18,7 +18,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor, Parameter, context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim import ProximalAdagrad from mindspore.ops import operations as P @@ -66,7 +66,7 @@ def test_proximal_ada_grad(): optimizer = ProximalAdagrad(net.trainable_params(), weight_decay=0.9, loss_scale=1024.0) net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_spares_proximal_ada_grad_compile(): @@ -79,7 +79,7 @@ def test_spares_proximal_ada_grad_compile(): optimizer = ProximalAdagrad(net.trainable_params(), weight_decay=0.9, loss_scale=1024.0) optimizer.target = 'CPU' train_network = TrainOneStepCell(net, optimizer) - _executor.compile(train_network, indices, label) + _cell_graph_executor.compile(train_network, indices, label) def test_spares_proximal_ada_grad(): @@ -91,4 +91,4 @@ def test_spares_proximal_ada_grad(): optimizer = ProximalAdagrad(net.trainable_params(), weight_decay=0.9, loss_scale=1024.0) train_network = TrainOneStepCell(net, optimizer) - _executor.compile(train_network, indices, label) + _cell_graph_executor.compile(train_network, indices, label) diff --git a/tests/ut/python/nn/optim/test_rmsprop.py b/tests/ut/python/nn/optim/test_rmsprop.py index 683220eefe5..077c30df4ab 100644 --- a/tests/ut/python/nn/optim/test_rmsprop.py +++ b/tests/ut/python/nn/optim/test_rmsprop.py @@ -18,7 +18,7 @@ import pytest import mindspore.nn as nn from mindspore import Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim import RMSProp from mindspore.ops import operations as P @@ -51,7 +51,7 @@ def test_rmsprop_compile(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_rmsprop_e(): diff --git a/tests/ut/python/nn/probability/dpn/test_vae.py b/tests/ut/python/nn/probability/dpn/test_vae.py index aff129574e3..3122fc6bd8a 100644 --- a/tests/ut/python/nn/probability/dpn/test_vae.py +++ b/tests/ut/python/nn/probability/dpn/test_vae.py @@ -18,7 +18,7 @@ import numpy as np import mindspore.common.dtype as mstype import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn.probability.dpn import VAE @@ -54,4 +54,4 @@ def test_vae(): decoder = Decoder() net = VAE(encoder, decoder, hidden_size=3, latent_size=2) input_data = Tensor(np.random.rand(32, 6), dtype=mstype.float32) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) diff --git a/tests/ut/python/nn/test_activation.py b/tests/ut/python/nn/test_activation.py index 1a8cb25fe0e..4264be58cf4 100755 --- a/tests/ut/python/nn/test_activation.py +++ b/tests/ut/python/nn/test_activation.py @@ -17,7 +17,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from ..ut_filter import non_graph_engine @@ -74,7 +74,7 @@ class Net1(nn.Cell): def test_compile_relu(): net = Net1() input_data = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]], dtype=np.float32)) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) class Net_gelu(nn.Cell): @@ -89,7 +89,7 @@ class Net_gelu(nn.Cell): def test_compile_gelu(): net = Net_gelu() input_data = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]], dtype=np.float32)) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) class NetLeakyReLU(nn.Cell): @@ -104,4 +104,4 @@ class NetLeakyReLU(nn.Cell): def test_compile_leaky_relu(): net = NetLeakyReLU(alpha=0.1) input_data = Tensor(np.array([[1.6, 0, 0.6], [6, 0, -6]], dtype=np.float32)) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) diff --git a/tests/ut/python/nn/test_batchnorm.py b/tests/ut/python/nn/test_batchnorm.py index 100a91a44b0..0d81a06a29e 100644 --- a/tests/ut/python/nn/test_batchnorm.py +++ b/tests/ut/python/nn/test_batchnorm.py @@ -18,7 +18,7 @@ import pytest import mindspore.nn as nn from mindspore import Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor def test_bn_pars_valid1(): @@ -55,7 +55,7 @@ class Net(nn.Cell): def test_compile(): net = Net() input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]).astype(np.float32)) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) class GroupNet(nn.Cell): @@ -70,4 +70,4 @@ class GroupNet(nn.Cell): def test_compile_groupnorm(): net = nn.GroupNorm(16, 64) input_data = Tensor(np.random.rand(1, 64, 256, 256).astype(np.float32)) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) diff --git a/tests/ut/python/nn/test_cell.py b/tests/ut/python/nn/test_cell.py index f15cc5c65a7..7aeb26f5854 100644 --- a/tests/ut/python/nn/test_cell.py +++ b/tests/ut/python/nn/test_cell.py @@ -19,7 +19,7 @@ import pytest import mindspore.nn as nn from mindspore import Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor class ModA(nn.Cell): @@ -299,4 +299,4 @@ def test_cell_names(): ta = Tensor(np.ones([2, 3])) mn = ModelName(ta) with pytest.raises(ValueError): - _executor.compile(mn) + _cell_graph_executor.compile(mn) diff --git a/tests/ut/python/nn/test_cell_wrapper.py b/tests/ut/python/nn/test_cell_wrapper.py index 773d482705a..cd84f1553e6 100755 --- a/tests/ut/python/nn/test_cell_wrapper.py +++ b/tests/ut/python/nn/test_cell_wrapper.py @@ -18,7 +18,7 @@ import pytest import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore.common import dtype as mstype -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, WithLossCell, ParameterUpdate from mindspore.nn.optim import Momentum from mindspore.ops import operations as P @@ -50,7 +50,7 @@ def test_parameter_update_int32_and_tensor(): train_network.set_train() inputs = Tensor(np.ones([1, 64]).astype(np.float32)) label = Tensor(np.zeros([1, 10]).astype(np.float32)) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) # test tensor param_lr = train_network.parameters_dict()['learning_rate'] @@ -58,14 +58,14 @@ def test_parameter_update_int32_and_tensor(): update_network.phase = 'update_param' input_lr = Tensor(np.array([0.2, 0.02, 0.002]), mstype.float32) - _executor.compile(update_network, input_lr) + _cell_graph_executor.compile(update_network, input_lr) # test int32 param_step = train_network.parameters_dict()['global_step'] update_global_step = ParameterUpdate(param_step) input_step = Tensor(np.array([1000]), mstype.int32) - _executor.compile(update_global_step, input_step) + _cell_graph_executor.compile(update_global_step, input_step) def test_parameter_update_float32(): @@ -81,7 +81,7 @@ def test_parameter_update_float32(): train_network.set_train() inputs = Tensor(np.ones([1, 64]).astype(np.float32)) label = Tensor(np.zeros([1, 10]).astype(np.float32)) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) # construct and compile update graph param_lr = train_network.parameters_dict()['learning_rate'] @@ -89,7 +89,7 @@ def test_parameter_update_float32(): update_network.phase = 'update_param' input_lr = Tensor(0.0001, mstype.float32) - _executor.compile(update_network, input_lr) + _cell_graph_executor.compile(update_network, input_lr) def test_parameter_update_error(): diff --git a/tests/ut/python/nn/test_central_crop.py b/tests/ut/python/nn/test_central_crop.py index dc9f438f952..2d2fae54ee3 100644 --- a/tests/ut/python/nn/test_central_crop.py +++ b/tests/ut/python/nn/test_central_crop.py @@ -21,7 +21,7 @@ import pytest import mindspore.nn as nn from mindspore import Tensor from mindspore.common import dtype as mstype -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor class CentralCropNet(nn.Cell): @@ -37,14 +37,14 @@ def test_compile_3d_central_crop(): central_fraction = 0.2 net = CentralCropNet(central_fraction) image = Tensor(np.random.random((3, 16, 16)), mstype.float32) - _executor.compile(net, image) + _cell_graph_executor.compile(net, image) def test_compile_4d_central_crop(): central_fraction = 0.5 net = CentralCropNet(central_fraction) image = Tensor(np.random.random((8, 3, 16, 16)), mstype.float32) - _executor.compile(net, image) + _cell_graph_executor.compile(net, image) def test_central_fraction_bool(): @@ -71,4 +71,4 @@ def test_central_crop_invalid_5d_input(): net = CentralCropNet(central_fraction=0.5) with pytest.raises(ValueError): - _executor.compile(net, invalid_image) + _cell_graph_executor.compile(net, invalid_image) diff --git a/tests/ut/python/nn/test_dense.py b/tests/ut/python/nn/test_dense.py index 57f4ed80830..29fb3281d57 100644 --- a/tests/ut/python/nn/test_dense.py +++ b/tests/ut/python/nn/test_dense.py @@ -20,7 +20,7 @@ import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.ops import operations as P -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from ..ut_filter import non_graph_engine @@ -115,12 +115,12 @@ def test_compile(): bias = Tensor(np.random.randint(0, 255, [8]).astype(np.float32)) net = Net(64, 8, weight=weight, bias=bias) input_data = Tensor(np.random.randint(0, 255, [128, 64]).astype(np.float32)) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) # training net_train = Net(64, 8, weight=weight, bias=bias) net_train.set_train() - _executor.compile(net_train, input_data) + _cell_graph_executor.compile(net_train, input_data) def test_compile_2(): @@ -129,12 +129,12 @@ def test_compile_2(): weight = Tensor(np.random.randint(0, 255, [8, 64]).astype(np.float32)) net = Net(64, 8, weight=weight, has_bias=False) input_data = Tensor(np.random.randint(0, 255, [128, 64]).astype(np.float32)) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) # training net_train = Net(64, 8, weight=weight, has_bias=False) net_train.set_train() - _executor.compile(net_train, input_data) + _cell_graph_executor.compile(net_train, input_data) def test_compile_3(): @@ -144,12 +144,12 @@ def test_compile_3(): context.set_context(mode=context.GRAPH_MODE) net = Net(128, 10) input_data = Tensor(np.random.randint(0, 255, [128, 128]).astype(np.float32)) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) # training net_train = Net(128, 10) net_train.set_train() - _executor.compile(net_train, input_data) + _cell_graph_executor.compile(net_train, input_data) def test_compile_4(): @@ -159,9 +159,9 @@ def test_compile_4(): context.set_context(mode=context.GRAPH_MODE) net = Net(128, 10, has_bias=False) input_data = Tensor(np.random.randint(0, 255, [128, 128]).astype(np.float32)) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) # training net_train = Net(128, 10, has_bias=False) net_train.set_train() - _executor.compile(net_train, input_data) + _cell_graph_executor.compile(net_train, input_data) diff --git a/tests/ut/python/nn/test_flatten.py b/tests/ut/python/nn/test_flatten.py index c4b4cbefc37..70846188985 100644 --- a/tests/ut/python/nn/test_flatten.py +++ b/tests/ut/python/nn/test_flatten.py @@ -19,7 +19,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor class Net(nn.Cell): @@ -34,4 +34,4 @@ class Net(nn.Cell): def test_compile(): net = Net() input_data = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]]).astype(np.float32)) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) diff --git a/tests/ut/python/nn/test_image_gradients.py b/tests/ut/python/nn/test_image_gradients.py index 7971a8deb07..b33605450d0 100644 --- a/tests/ut/python/nn/test_image_gradients.py +++ b/tests/ut/python/nn/test_image_gradients.py @@ -20,7 +20,7 @@ import mindspore.common.dtype as mstype import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.api import ms_function context.set_context(device_target="Ascend") @@ -40,7 +40,7 @@ def test_compile(): # input shape 1 x 1 x 2 x 2 image = Tensor(np.array([[[[1, 2], [3, 4]]]]), dtype=mstype.int32) net = Net() - _executor.compile(net, image) + _cell_graph_executor.compile(net, image) def test_compile_multi_channel(): @@ -51,7 +51,7 @@ def test_compile_multi_channel(): [[[5, 10], [15, 20]], [[25, 30], [35, 40]]], [[[10, 20], [30, 40]], [[50, 60], [70, 80]]]]), dtype=dtype) net = Net() - _executor.compile(net, image) + _cell_graph_executor.compile(net, image) def test_invalid_5d_input(): @@ -59,4 +59,4 @@ def test_invalid_5d_input(): image = Tensor(np.random.random([4, 1, 16, 16, 1]), dtype=dtype) net = Net() with pytest.raises(ValueError): - _executor.compile(net, image) + _cell_graph_executor.compile(net, image) diff --git a/tests/ut/python/nn/test_learning_rate_schedule.py b/tests/ut/python/nn/test_learning_rate_schedule.py index 390e8858990..c540142491d 100644 --- a/tests/ut/python/nn/test_learning_rate_schedule.py +++ b/tests/ut/python/nn/test_learning_rate_schedule.py @@ -17,7 +17,7 @@ import pytest from mindspore import Tensor from mindspore.nn import learning_rate_schedule as lr_schedules -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor import mindspore.common.dtype as mstype @@ -124,34 +124,34 @@ class TestInit: def test_exponential_decay(): lr_schedule = lr_schedules.ExponentialDecayLR(learning_rate, decay_rate, decay_steps, True) - _executor.compile(lr_schedule, global_step) + _cell_graph_executor.compile(lr_schedule, global_step) def test_enatural_exp_decay(): lr_schedule = lr_schedules.NaturalExpDecayLR(learning_rate, decay_rate, decay_steps, True) - _executor.compile(lr_schedule, global_step) + _cell_graph_executor.compile(lr_schedule, global_step) def test_inverse_decay(): lr_schedule = lr_schedules.InverseDecayLR(learning_rate, decay_rate, decay_steps, True) - _executor.compile(lr_schedule, global_step) + _cell_graph_executor.compile(lr_schedule, global_step) def test_cosine_decay(): lr_schedule = lr_schedules.CosineDecayLR(min_lr, max_lr, decay_steps) - _executor.compile(lr_schedule, global_step) + _cell_graph_executor.compile(lr_schedule, global_step) def test_polynomial_decay(): lr_schedule = lr_schedules.PolynomialDecayLR(learning_rate, end_learning_rate, decay_steps, power) - _executor.compile(lr_schedule, global_step) + _cell_graph_executor.compile(lr_schedule, global_step) def test_polynomial_decay2(): lr_schedule = lr_schedules.PolynomialDecayLR(learning_rate, end_learning_rate, decay_steps, power, True) - _executor.compile(lr_schedule, global_step) + _cell_graph_executor.compile(lr_schedule, global_step) def test_warmup(): lr_schedule = lr_schedules.WarmUpLR(learning_rate, warmup_steps) - _executor.compile(lr_schedule, global_step) + _cell_graph_executor.compile(lr_schedule, global_step) diff --git a/tests/ut/python/nn/test_msssim.py b/tests/ut/python/nn/test_msssim.py index b85d13c927e..5bd98a9b41e 100644 --- a/tests/ut/python/nn/test_msssim.py +++ b/tests/ut/python/nn/test_msssim.py @@ -21,7 +21,7 @@ import pytest import mindspore.common.dtype as mstype import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor _MSSSIM_WEIGHTS = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333) @@ -39,7 +39,7 @@ def test_compile(): net = MSSSIMNet(power_factors=factors) img1 = Tensor(np.random.random((8, 3, 128, 128))) img2 = Tensor(np.random.random((8, 3, 128, 128))) - _executor.compile(net, img1, img2) + _cell_graph_executor.compile(net, img1, img2) def test_compile_grayscale(): @@ -48,7 +48,7 @@ def test_compile_grayscale(): net = MSSSIMNet(max_val=max_val, power_factors=factors) img1 = Tensor(np.random.randint(0, 256, (8, 3, 128, 128), np.uint8)) img2 = Tensor(np.random.randint(0, 256, (8, 3, 128, 128), np.uint8)) - _executor.compile(net, img1, img2) + _cell_graph_executor.compile(net, img1, img2) def test_msssim_max_val_negative(): @@ -102,7 +102,7 @@ def test_msssim_different_shape(): img2 = Tensor(np.random.random(shape_2)) net = MSSSIMNet(power_factors=factors) with pytest.raises(ValueError): - _executor.compile(net, img1, img2) + _cell_graph_executor.compile(net, img1, img2) def test_msssim_different_dtype(): @@ -113,7 +113,7 @@ def test_msssim_different_dtype(): img2 = Tensor(np.random.random((8, 3, 128, 128)), dtype=dtype_2) net = MSSSIMNet(power_factors=factors) with pytest.raises(TypeError): - _executor.compile(net, img1, img2) + _cell_graph_executor.compile(net, img1, img2) def test_msssim_invalid_5d_input(): @@ -128,8 +128,8 @@ def test_msssim_invalid_5d_input(): net = MSSSIMNet(power_factors=factors) with pytest.raises(ValueError): - _executor.compile(net, invalid_img1, img2) + _cell_graph_executor.compile(net, invalid_img1, img2) with pytest.raises(ValueError): - _executor.compile(net, img1, invalid_img2) + _cell_graph_executor.compile(net, img1, invalid_img2) with pytest.raises(ValueError): - _executor.compile(net, invalid_img1, invalid_img2) + _cell_graph_executor.compile(net, invalid_img1, invalid_img2) diff --git a/tests/ut/python/nn/test_nn_embedding.py b/tests/ut/python/nn/test_nn_embedding.py index 8a8dcef841c..39d25e475d3 100755 --- a/tests/ut/python/nn/test_nn_embedding.py +++ b/tests/ut/python/nn/test_nn_embedding.py @@ -18,7 +18,7 @@ import pytest from mindspore import Tensor from mindspore.common import dtype -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Embedding, MultiFieldEmbeddingLookup from ..ut_filter import non_graph_engine @@ -27,21 +27,21 @@ from ..ut_filter import non_graph_engine def test_check_embedding_1(): net = Embedding(20000, 768, False) input_data = Tensor(np.ones([8, 128]), dtype.int32) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) @non_graph_engine def test_check_embedding_2(): net = Embedding(20000, 768, True) input_data = Tensor(np.ones([8, 128]), dtype.int32) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) @non_graph_engine def test_check_embedding_3(): net = Embedding(20000, 768, True, "zeros") input_data = Tensor(np.ones([8, 128]), dtype.int32) - _executor.compile(net, input_data) + _cell_graph_executor.compile(net, input_data) def compile_multi_field_embedding(shape_id, shape_value, shape_field, @@ -50,7 +50,7 @@ def compile_multi_field_embedding(shape_id, shape_value, shape_field, input_data = Tensor(np.ones(shape_id), type_id) input_value = Tensor(np.ones(shape_value), type_value) input_field = Tensor(np.ones(shape_field), type_field) - _executor.compile(net, input_data, input_value, input_field) + _cell_graph_executor.compile(net, input_data, input_value, input_field) @non_graph_engine diff --git a/tests/ut/python/nn/test_norm.py b/tests/ut/python/nn/test_norm.py index a46ff5e3e0a..f1628b017fb 100644 --- a/tests/ut/python/nn/test_norm.py +++ b/tests/ut/python/nn/test_norm.py @@ -17,7 +17,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from ..ut_filter import non_graph_engine @@ -34,4 +34,4 @@ class NormNet(nn.Cell): def test_compile_norm(): net = NormNet() x = Tensor(np.array([2.0, 1.0])) - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) diff --git a/tests/ut/python/nn/test_pooling.py b/tests/ut/python/nn/test_pooling.py index 38b0b55e556..61acf1aef9e 100644 --- a/tests/ut/python/nn/test_pooling.py +++ b/tests/ut/python/nn/test_pooling.py @@ -19,7 +19,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor class AvgNet(nn.Cell): @@ -36,7 +36,7 @@ class AvgNet(nn.Cell): def test_compile_avg(): net = AvgNet(3, 1) x = Tensor(np.ones([1, 3, 16, 50]).astype(np.float32)) - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) class MaxNet(nn.Cell): @@ -58,7 +58,7 @@ class MaxNet(nn.Cell): def test_compile_max(): net = MaxNet(3, stride=1, padding=0) x = Tensor(np.random.randint(0, 255, [1, 3, 6, 6]).astype(np.float32)) - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) class Avg1dNet(nn.Cell): @@ -75,4 +75,4 @@ class Avg1dNet(nn.Cell): def test_avg1d(): net = Avg1dNet(6, 1) input_ = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32)) - _executor.compile(net, input_) + _cell_graph_executor.compile(net, input_) diff --git a/tests/ut/python/nn/test_psnr.py b/tests/ut/python/nn/test_psnr.py index b045516442a..addca7edb0e 100644 --- a/tests/ut/python/nn/test_psnr.py +++ b/tests/ut/python/nn/test_psnr.py @@ -21,7 +21,7 @@ import pytest import mindspore.nn as nn from mindspore import Tensor from mindspore.common import dtype as mstype -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor class PSNRNet(nn.Cell): @@ -38,7 +38,7 @@ def test_compile_psnr(): net = PSNRNet(max_val) img1 = Tensor(np.random.random((8, 3, 16, 16))) img2 = Tensor(np.random.random((8, 3, 16, 16))) - _executor.compile(net, img1, img2) + _cell_graph_executor.compile(net, img1, img2) def test_compile_psnr_grayscale(): @@ -46,7 +46,7 @@ def test_compile_psnr_grayscale(): net = PSNRNet(max_val) img1 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) img2 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) - _executor.compile(net, img1, img2) + _cell_graph_executor.compile(net, img1, img2) def test_psnr_max_val_negative(): @@ -74,7 +74,7 @@ def test_psnr_different_shape(): img2 = Tensor(np.random.random(shape_2)) net = PSNRNet() with pytest.raises(ValueError): - _executor.compile(net, img1, img2) + _cell_graph_executor.compile(net, img1, img2) def test_psnr_different_dtype(): @@ -84,7 +84,7 @@ def test_psnr_different_dtype(): img2 = Tensor(np.random.random((8, 3, 16, 16)), dtype=dtype_2) net = PSNRNet() with pytest.raises(TypeError): - _executor.compile(net, img1, img2) + _cell_graph_executor.compile(net, img1, img2) def test_psnr_invalid_5d_input(): @@ -98,8 +98,8 @@ def test_psnr_invalid_5d_input(): net = PSNRNet() with pytest.raises(ValueError): - _executor.compile(net, invalid_img1, img2) + _cell_graph_executor.compile(net, invalid_img1, img2) with pytest.raises(ValueError): - _executor.compile(net, img1, invalid_img2) + _cell_graph_executor.compile(net, img1, invalid_img2) with pytest.raises(ValueError): - _executor.compile(net, invalid_img1, invalid_img2) + _cell_graph_executor.compile(net, invalid_img1, invalid_img2) diff --git a/tests/ut/python/nn/test_ssim.py b/tests/ut/python/nn/test_ssim.py index 8e6866cb593..445fd8be41b 100644 --- a/tests/ut/python/nn/test_ssim.py +++ b/tests/ut/python/nn/test_ssim.py @@ -21,7 +21,7 @@ import pytest import mindspore.common.dtype as mstype import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor class SSIMNet(nn.Cell): @@ -37,7 +37,7 @@ def test_compile(): net = SSIMNet() img1 = Tensor(np.random.random((8, 3, 16, 16)), mstype.float32) img2 = Tensor(np.random.random((8, 3, 16, 16)), mstype.float32) - _executor.compile(net, img1, img2) + _cell_graph_executor.compile(net, img1, img2) def test_ssim_max_val_negative(): @@ -85,7 +85,7 @@ def test_ssim_different_shape(): img2 = Tensor(np.random.random(shape_2)) net = SSIMNet() with pytest.raises(TypeError): - _executor.compile(net, img1, img2) + _cell_graph_executor.compile(net, img1, img2) def test_ssim_different_dtype(): @@ -95,7 +95,7 @@ def test_ssim_different_dtype(): img2 = Tensor(np.random.random((8, 3, 16, 16)), dtype=dtype_2) net = SSIMNet() with pytest.raises(TypeError): - _executor.compile(net, img1, img2) + _cell_graph_executor.compile(net, img1, img2) def test_ssim_invalid_5d_input(): @@ -109,8 +109,8 @@ def test_ssim_invalid_5d_input(): net = SSIMNet() with pytest.raises(TypeError): - _executor.compile(net, invalid_img1, img2) + _cell_graph_executor.compile(net, invalid_img1, img2) with pytest.raises(TypeError): - _executor.compile(net, img1, invalid_img2) + _cell_graph_executor.compile(net, img1, invalid_img2) with pytest.raises(TypeError): - _executor.compile(net, invalid_img1, invalid_img2) + _cell_graph_executor.compile(net, invalid_img1, invalid_img2) diff --git a/tests/ut/python/nn/test_transformer.py b/tests/ut/python/nn/test_transformer.py index a2f5e552813..256bc37fb92 100644 --- a/tests/ut/python/nn/test_transformer.py +++ b/tests/ut/python/nn/test_transformer.py @@ -19,7 +19,7 @@ from mindspore import Tensor from mindspore.common import dtype from mindspore.parallel.nn import MultiHeadAttention, FeedForward, TransformerEncoderLayer, TransformerEncoder, \ TransformerDecoder, TransformerDecoderLayer, Transformer, CrossEntropyLoss, AttentionMask, FixedSparseAttention -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor def test_transformer_encoder_only(): @@ -34,7 +34,7 @@ def test_transformer_encoder_only(): encoder_input_value = Tensor(np.ones((2, 20, 64)), dtype.float32) encoder_input_mask = Tensor(np.ones((2, 20, 20)), dtype.float16) - _executor.compile(model, encoder_input_value, encoder_input_mask) + _cell_graph_executor.compile(model, encoder_input_value, encoder_input_mask) def test_transformer_encoder_log_softmax(): @@ -51,7 +51,7 @@ def test_transformer_encoder_log_softmax(): encoder_input_value = Tensor(np.ones((2, 20, 64)), dtype.float32) encoder_input_mask = Tensor(np.ones((2, 20, 20)), dtype.float16) - _executor.compile(model, encoder_input_value, encoder_input_mask) + _cell_graph_executor.compile(model, encoder_input_value, encoder_input_mask) def test_transformer_encoder_leakyrelu(): @@ -67,7 +67,7 @@ def test_transformer_encoder_leakyrelu(): encoder_input_value = Tensor(np.ones((2, 20, 64)), dtype.float32) encoder_input_mask = Tensor(np.ones((2, 20, 20)), dtype.float16) - _executor.compile(model, encoder_input_value, encoder_input_mask) + _cell_graph_executor.compile(model, encoder_input_value, encoder_input_mask) def test_transformer_encoder_logsigmoid(): @@ -83,7 +83,7 @@ def test_transformer_encoder_logsigmoid(): encoder_input_value = Tensor(np.ones((2, 20, 64)), dtype.float32) encoder_input_mask = Tensor(np.ones((2, 20, 20)), dtype.float16) - _executor.compile(model, encoder_input_value, encoder_input_mask) + _cell_graph_executor.compile(model, encoder_input_value, encoder_input_mask) def test_encoder_and_decoder(): @@ -102,10 +102,10 @@ def test_encoder_and_decoder(): decoder_input_mask = Tensor(np.ones((2, 10, 10)), dtype.float16) memory_mask = Tensor(np.ones((2, 10, 20)), dtype.float16) - _executor.compile(model, encoder_input_value, encoder_input_mask, - decoder_input_value, - decoder_input_mask, - memory_mask) + _cell_graph_executor.compile(model, encoder_input_value, encoder_input_mask, + decoder_input_value, + decoder_input_mask, + memory_mask) def test_transformer_encoder(): @@ -119,9 +119,9 @@ def test_transformer_encoder(): encoder_input_value = Tensor(np.ones((2, 16, 8)), dtype.float32) encoder_input_mask = Tensor(np.ones((2, 16, 16)), dtype.float16) - _executor.compile(model, - encoder_input_value, - encoder_input_mask) + _cell_graph_executor.compile(model, + encoder_input_value, + encoder_input_mask) def test_transformer_encoder_layer(): @@ -131,9 +131,9 @@ def test_transformer_encoder_layer(): encoder_input_value = Tensor(np.ones((2, 16, 8)), dtype.float32) encoder_input_mask = Tensor(np.ones((2, 16, 16)), dtype.float16) - _executor.compile(model, - encoder_input_value, - encoder_input_mask) + _cell_graph_executor.compile(model, + encoder_input_value, + encoder_input_mask) def test_transformer_encoder_layer_post_ture(): @@ -145,9 +145,9 @@ def test_transformer_encoder_layer_post_ture(): encoder_input_value = Tensor(np.ones((2, 16, 8)), dtype.float32) encoder_input_mask = Tensor(np.ones((2, 16, 16)), dtype.float16) - _executor.compile(model, - encoder_input_value, - encoder_input_mask) + _cell_graph_executor.compile(model, + encoder_input_value, + encoder_input_mask) def test_transformer_decoder(): @@ -165,9 +165,9 @@ def test_transformer_decoder(): decoder_input_mask = Tensor(np.ones((2, 10, 10)), dtype.float16) memory_mask = Tensor(np.ones((2, 10, 20)), dtype.float16) - _executor.compile(model, decoder_input_value, decoder_input_mask, - encoder_input_value, - memory_mask) + _cell_graph_executor.compile(model, decoder_input_value, decoder_input_mask, + encoder_input_value, + memory_mask) def test_transformer_decoder_layer(): @@ -185,9 +185,9 @@ def test_transformer_decoder_layer(): decoder_input_mask = Tensor(np.ones((2, 10, 10)), dtype.float16) memory_mask = Tensor(np.ones((2, 10, 20)), dtype.float16) - _executor.compile(model, decoder_input_value, decoder_input_mask, - encoder_input_value, - memory_mask) + _cell_graph_executor.compile(model, decoder_input_value, decoder_input_mask, + encoder_input_value, + memory_mask) def test_multihead_attention(): @@ -200,7 +200,7 @@ def test_multihead_attention(): to_tensor = Tensor(np.ones((2, 20, 15)), dtype.float16) attention_mask = Tensor(np.ones((2, 20, 20)), dtype.float16) - _executor.compile(model, from_tensor, to_tensor, to_tensor, attention_mask) + _cell_graph_executor.compile(model, from_tensor, to_tensor, to_tensor, attention_mask) def test_multihead_attention_wrong_batch(): @@ -214,7 +214,7 @@ def test_multihead_attention_wrong_batch(): attention_mask = Tensor(np.ones((3, 20, 20)), dtype.float16) with pytest.raises(ValueError): - _executor.compile(model, from_tensor, to_tensor, to_tensor, attention_mask) + _cell_graph_executor.compile(model, from_tensor, to_tensor, to_tensor, attention_mask) def test_feedforward_layer(): @@ -224,7 +224,7 @@ def test_feedforward_layer(): hidden_act='relu') tensor = Tensor(np.ones((2, 20, 15)), dtype.float32) - _executor.compile(model, tensor) + _cell_graph_executor.compile(model, tensor) def test_cross_entroy(): @@ -233,13 +233,13 @@ def test_cross_entroy(): labels_np = np.array([1]).astype(np.int32) input_mask = Tensor(np.ones(1).astype(np.float32)) labels = Tensor(labels_np) - _executor.compile(model, logits, labels, input_mask) + _cell_graph_executor.compile(model, logits, labels, input_mask) def test_attention_mask(): model = AttentionMask(seq_length=19) inputs = Tensor(np.ones((2, 19)), dtype.float32) - _executor.compile(model, inputs) + _cell_graph_executor.compile(model, inputs) def test_sparse_attention(): @@ -252,4 +252,4 @@ def test_sparse_attention(): k = Tensor(np.ones((2, 1024, 512)), dtype.float16) v = Tensor(np.ones((2, 1024, 512)), dtype.float16) mask = Tensor(np.ones((2, 1024)), dtype.float32) - _executor.compile(model, q, k, v, mask) + _cell_graph_executor.compile(model, q, k, v, mask) diff --git a/tests/ut/python/ops/test_ops_check.py b/tests/ut/python/ops/test_ops_check.py index efbb1bb8307..2fe6ca01ed5 100644 --- a/tests/ut/python/ops/test_ops_check.py +++ b/tests/ut/python/ops/test_ops_check.py @@ -21,7 +21,7 @@ import pytest import mindspore.context as context from mindspore import Tensor from mindspore import nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import operations as P from ..ut_filter import non_graph_engine from ....mindspore_test_framework.mindspore_test import mindspore_test @@ -63,7 +63,7 @@ def test_net_without_construct(): """ test_net_without_construct """ net = NetMissConstruct() inp = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32)) - _executor.compile(net, inp) + _cell_graph_executor.compile(net, inp) class NetWithRaise(nn.Cell): @@ -83,7 +83,7 @@ def test_net_with_raise(): net = NetWithRaise() inp = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32)) with pytest.raises(RuntimeError) as err: - _executor.compile(net, inp) + _cell_graph_executor.compile(net, inp) assert "Unsupported syntax 'Raise'." in str(err.value) diff --git a/tests/ut/python/optimizer/test_optimizer_with_parameter_groups.py b/tests/ut/python/optimizer/test_optimizer_with_parameter_groups.py index fccccafb0f9..80ff0c06eee 100644 --- a/tests/ut/python/optimizer/test_optimizer_with_parameter_groups.py +++ b/tests/ut/python/optimizer/test_optimizer_with_parameter_groups.py @@ -18,7 +18,7 @@ import pytest import mindspore.common.dtype as mstype import mindspore.nn as nn from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.tensor import Tensor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim import Momentum, SGD, RMSProp, Adam @@ -80,7 +80,7 @@ def test_group_lr(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, opt) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_group_dynamic_1(): @@ -114,7 +114,7 @@ def test_group_dynamic_1(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, opt) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_group_dynamic_2(): @@ -144,7 +144,7 @@ def test_group_dynamic_2(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, opt) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_group_dynamic_no_same_size(): @@ -214,7 +214,7 @@ def test_weight_decay(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, opt) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) def test_group_repeat_param(): diff --git a/tests/ut/python/optimizer/test_python_pass.py b/tests/ut/python/optimizer/test_python_pass.py index 610f3dd8272..a492fe7df79 100644 --- a/tests/ut/python/optimizer/test_python_pass.py +++ b/tests/ut/python/optimizer/test_python_pass.py @@ -23,7 +23,7 @@ from mindspore.ops import _constants as Constants from mindspore.graph_utils.python_pass import register_pass, unregister_pass, set_renorm, gen_new_parameter,\ cancel_new_parameter, set_reopt from mindspore.common.api import _generate_pip_args -from mindspore._c_expression import generate_key, Executor_ +from mindspore._c_expression import generate_arguments_key, GraphExecutor_ from mindspore.graph_utils.graph_pattern import OneOf, Prim, Call, NoneOf, Any, NewTensor, NewParameter, Imm context.set_context(mode=context.GRAPH_MODE) @@ -31,13 +31,10 @@ context.set_context(mode=context.GRAPH_MODE) def get_func_graph(obj, *args, phase="validate"): args_names, args_list = _generate_pip_args(obj, *args) dic = dict(zip(args_names, args_list)) - key = generate_key(phase, dic) - phase_prefix = str(key[1]) - if phase == 'export': - phase = phase + '.' + phase_prefix + '.' + str(obj.create_time) - else: - phase = phase_prefix + phase + '.' + str(obj.create_time) - _executor = Executor_.get_instance() + key = generate_arguments_key(dic) + obj.arguments_key = str(key) + phase = phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key + _executor = GraphExecutor_.get_instance() _executor.compile(obj, args_list, phase, False, "") return _executor.get_func_graph(phase) diff --git a/tests/ut/python/parallel/test_adafactor.py b/tests/ut/python/parallel/test_adafactor.py index 101c3cd88d9..db04ef748a6 100644 --- a/tests/ut/python/parallel/test_adafactor.py +++ b/tests/ut/python/parallel/test_adafactor.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell from mindspore.nn.optim.adafactor import AdaFactor from mindspore.ops import operations as P @@ -53,7 +53,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_add_relu_redistribution.py b/tests/ut/python/parallel/test_add_relu_redistribution.py index b7e09621b7f..92de4b48e6d 100644 --- a/tests/ut/python/parallel/test_add_relu_redistribution.py +++ b/tests/ut/python/parallel/test_add_relu_redistribution.py @@ -17,7 +17,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -60,7 +60,7 @@ class Grad(nn.Cell): def compile_net(net, x, y): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_add_relu_stride_slice(): diff --git a/tests/ut/python/parallel/test_allreduce_fusion.py b/tests/ut/python/parallel/test_allreduce_fusion.py index b7147945a94..717fc64be40 100644 --- a/tests/ut/python/parallel/test_allreduce_fusion.py +++ b/tests/ut/python/parallel/test_allreduce_fusion.py @@ -18,7 +18,7 @@ import pytest import mindspore as ms import mindspore.nn as nn from mindspore import Tensor, context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum from mindspore.parallel import _cost_model_context as cost_model_context @@ -120,7 +120,7 @@ def train_common(net): model = Model(net, loss, opt) model.train(epoch_size, dataset, dataset_sink_mode=False) - allreduce_fusion_dict = _executor._get_allreduce_fusion(model._train_network) + allreduce_fusion_dict = _cell_graph_executor._get_allreduce_fusion(model._train_network) print(allreduce_fusion_dict) return allreduce_fusion_dict diff --git a/tests/ut/python/parallel/test_alltoall.py b/tests/ut/python/parallel/test_alltoall.py index df7537342d8..2f5f2fbfb78 100644 --- a/tests/ut/python/parallel/test_alltoall.py +++ b/tests/ut/python/parallel/test_alltoall.py @@ -20,7 +20,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.parameter import Parameter from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum @@ -97,7 +97,7 @@ def all_to_all_common(strategy1): model = Model(net, loss, opt) model.train(epoch_size, dataset, dataset_sink_mode=False) - strategys = _executor._get_shard_strategy(model._train_network) + strategys = _cell_graph_executor._get_shard_strategy(model._train_network) return strategys @@ -137,7 +137,7 @@ def test_all_to_all_success(): return out net = Net() - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_all_to_all_invalid_split_count_value_failed(): @@ -159,7 +159,7 @@ def test_all_to_all_invalid_split_count_value_failed(): with pytest.raises(ValueError): net = Net() - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_all_to_all_invalid_split_count_type_failed(): @@ -181,7 +181,7 @@ def test_all_to_all_invalid_split_count_type_failed(): with pytest.raises(TypeError): net = Net() - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_all_to_all_invalid_split_dim_value_failed(): @@ -203,7 +203,7 @@ def test_all_to_all_invalid_split_dim_value_failed(): with pytest.raises(IndexError): net = Net() - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_all_to_all_invalid_split_dim_type_failed(): @@ -225,7 +225,7 @@ def test_all_to_all_invalid_split_dim_type_failed(): with pytest.raises(TypeError): net = Net() - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_all_to_all_invalid_concat_dim_value_failed(): @@ -247,7 +247,7 @@ def test_all_to_all_invalid_concat_dim_value_failed(): with pytest.raises(IndexError): net = Net() - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_all_to_all_invalid_concat_dim_type_failed(): @@ -269,7 +269,7 @@ def test_all_to_all_invalid_concat_dim_type_failed(): with pytest.raises(TypeError): net = Net() - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_all_to_all_invalid_split_count_cannot_be_divisible_failed(): @@ -291,7 +291,7 @@ def test_all_to_all_invalid_split_count_cannot_be_divisible_failed(): with pytest.raises(ValueError): net = Net() - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_all_to_all_invalid_group_type_failed(): @@ -313,7 +313,7 @@ def test_all_to_all_invalid_group_type_failed(): with pytest.raises(TypeError): net = Net() - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) if __name__ == '__main__': diff --git a/tests/ut/python/parallel/test_arithmetic.py b/tests/ut/python/parallel/test_arithmetic.py index 85d73ea8bb6..89efe20884b 100644 --- a/tests/ut/python/parallel/test_arithmetic.py +++ b/tests/ut/python/parallel/test_arithmetic.py @@ -17,7 +17,7 @@ import numpy as np import mindspore as ms import mindspore.nn as nn from mindspore import Parameter, Tensor, context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -49,7 +49,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) def test_matmul_sub(): @@ -651,7 +651,7 @@ def test_assign_sub(): def compile_sub_net(net, x): net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) context.set_auto_parallel_context(device_num=64, global_rank=15) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -699,7 +699,7 @@ def test_assign_add(): def compile_sub_net(net, x): net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) context.set_auto_parallel_context(device_num=64, global_rank=15) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") @@ -747,7 +747,7 @@ def test_assign(): def compile_sub_net(net, x): net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) context.set_auto_parallel_context(device_num=64, global_rank=15) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") diff --git a/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py b/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py index 07b843b6146..f5dbf582a03 100644 --- a/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py +++ b/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -74,4 +74,4 @@ def test_auto_parallel_bn_with_prelu(): net = GradWrap(NetWithLoss(Net())) net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) diff --git a/tests/ut/python/parallel/test_auto_parallel_activation.py b/tests/ut/python/parallel/test_auto_parallel_activation.py index 2be2ae35540..4902772abb3 100644 --- a/tests/ut/python/parallel/test_auto_parallel_activation.py +++ b/tests/ut/python/parallel/test_auto_parallel_activation.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -44,7 +44,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_auto_parallel_arithmetic.py b/tests/ut/python/parallel/test_auto_parallel_arithmetic.py index 3ff2ac81d8d..a3f24bb910e 100644 --- a/tests/ut/python/parallel/test_auto_parallel_arithmetic.py +++ b/tests/ut/python/parallel/test_auto_parallel_arithmetic.py @@ -19,7 +19,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.parallel._utils import _reset_op_id as reset_op_id @@ -54,7 +54,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y, b, phase): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b, phase=phase) + _cell_graph_executor.compile(net, x, y, b, phase=phase) def test_auto_parallel_arithmetic(): @@ -78,7 +78,7 @@ def test_auto_parallel_arithmetic(): y = Tensor(np.ones([32, 128]), dtype=ms.float32) b = Tensor(np.ones([64, 128]), dtype=ms.float32) compile_net(net, x, y, b, phase='train') - strategies = _executor._get_shard_strategy(net) + strategies = _cell_graph_executor._get_shard_strategy(net) for (k, v) in strategies.items(): if re.search('FloorDiv-op', k) is not None: assert v == [[2, 4], [2, 4]] @@ -107,7 +107,7 @@ def test_auto_parallel_arithmetic_broadcast_both(): y = Tensor(np.ones([32, 1]), dtype=ms.float32) b = Tensor(np.ones([1, 64]), dtype=ms.float32) compile_net(net, x, y, b, phase='train') - strategies = _executor._get_shard_strategy(net) + strategies = _cell_graph_executor._get_shard_strategy(net) for (k, v) in strategies.items(): if re.search('FloorDiv-op', k) is not None: assert v == [[8, 1], [1, 1]] @@ -136,7 +136,7 @@ def test_auto_parallel_arithmetic_broadcast_right(): y = Tensor(np.ones([32, 32]), dtype=ms.float32) b = Tensor(np.ones([32]), dtype=ms.float32) compile_net(net, x, y, b, phase='train') - strategies = _executor._get_shard_strategy(net) + strategies = _cell_graph_executor._get_shard_strategy(net) for (k, v) in strategies.items(): if re.search('FloorDiv-op', k) is not None: assert v == [[4, 2], [2]] @@ -165,7 +165,7 @@ def test_auto_parallel_arithmetic_broadcast_left(): y = Tensor(np.ones([32, 32]), dtype=ms.float32) b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) compile_net(net, x, y, b, phase="train") - strategies = _executor._get_shard_strategy(net) + strategies = _cell_graph_executor._get_shard_strategy(net) for (k, v) in strategies.items(): if re.search('FloorDiv-op', k) is not None: assert v == [[4, 2], [1, 4, 2]] diff --git a/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py b/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py index 3bd389db7da..ccca4c88ada 100644 --- a/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py +++ b/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py @@ -19,7 +19,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.parallel._utils import _reset_op_id as reset_op_id from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -62,8 +62,8 @@ def test_auto_parallel_assign_sub_with_ref_key(): reset_op_id() net.set_train() - _executor.compile(net, x, phase="train") - strategies = _executor._get_shard_strategy(net) + _cell_graph_executor.compile(net, x, phase="train") + strategies = _cell_graph_executor._get_shard_strategy(net) for (k, v) in strategies.items(): if re.search('PReLU-op', k) is not None: assert v == [[1, 1, 1, 8], [1]] diff --git a/tests/ut/python/parallel/test_auto_parallel_cast.py b/tests/ut/python/parallel/test_auto_parallel_cast.py index a67bf7f9eb7..bb92b5a20b0 100644 --- a/tests/ut/python/parallel/test_auto_parallel_cast.py +++ b/tests/ut/python/parallel/test_auto_parallel_cast.py @@ -19,7 +19,7 @@ import mindspore.nn as nn from mindspore import Tensor from mindspore import context from mindspore.common import dtype as mstype -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.parallel._utils import _reset_op_id as reset_op_id @@ -82,8 +82,8 @@ def test_double_star_graph(): reset_op_id() net.set_train() - _executor.compile(net, x, y, z, w, phase='train') - strategies = _executor._get_shard_strategy(net) + _cell_graph_executor.compile(net, x, y, z, w, phase='train') + strategies = _cell_graph_executor._get_shard_strategy(net) expected_strategies = {'Default/network-Net/Cast-op1': [[8, 1]], 'Default/network-Net/Cast-op3': [[1, 8]], 'Default/network-Net/MatMul-op2': [[8, 1], [1, 1]], diff --git a/tests/ut/python/parallel/test_auto_parallel_common_parameter.py b/tests/ut/python/parallel/test_auto_parallel_common_parameter.py index 33059a7a48e..6ce99a7c0a9 100644 --- a/tests/ut/python/parallel/test_auto_parallel_common_parameter.py +++ b/tests/ut/python/parallel/test_auto_parallel_common_parameter.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -73,4 +73,4 @@ def test_common_parameter(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, z) + _cell_graph_executor.compile(net, x, y, z) diff --git a/tests/ut/python/parallel/test_auto_parallel_double_sources.py b/tests/ut/python/parallel/test_auto_parallel_double_sources.py index 9e361e71061..8b64c7dd22a 100644 --- a/tests/ut/python/parallel/test_auto_parallel_double_sources.py +++ b/tests/ut/python/parallel/test_auto_parallel_double_sources.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -80,7 +80,7 @@ def test_double_source_graph(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, z, w, a) + _cell_graph_executor.compile(net, x, y, z, w, a) def test_double_source_complex_graph(): @@ -116,4 +116,4 @@ def test_double_source_complex_graph(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, z, w, a) + _cell_graph_executor.compile(net, x, y, z, w, a) diff --git a/tests/ut/python/parallel/test_auto_parallel_double_star.py b/tests/ut/python/parallel/test_auto_parallel_double_star.py index b6b43a6d26b..52d5beca5f1 100644 --- a/tests/ut/python/parallel/test_auto_parallel_double_star.py +++ b/tests/ut/python/parallel/test_auto_parallel_double_star.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -84,4 +84,4 @@ def test_double_star_graph(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, z, w, a, b, c) + _cell_graph_executor.compile(net, x, y, z, w, a, b, c) diff --git a/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py b/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py index 15f47068129..d2242c39cad 100644 --- a/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py +++ b/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py @@ -19,7 +19,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor, Parameter, ParameterTuple from mindspore import context, Model -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn.optim import Adam, FTRL from mindspore.ops import composite as C from mindspore.ops import functional as F @@ -114,8 +114,8 @@ def test_double_subgraphs(): x = Tensor(np.ones([8, 8, 8, 8]), dtype=ms.float32) reset_op_id() net.set_train() - _executor.compile(net, x, phase='train') - strategies = _executor._get_shard_strategy(net) + _cell_graph_executor.compile(net, x, phase='train') + strategies = _cell_graph_executor._get_shard_strategy(net) for (k, v) in strategies.items(): if re.search('ReduceMean-op', k) is not None: assert v == [[8, 1, 1, 1]] @@ -165,7 +165,7 @@ def test_double_subgraphs_train(): ds_train = DatasetLenet(Tensor(batch_ids), None) model = Model(net) model.train(1, ds_train, dataset_sink_mode=False) - strategies = _executor._get_shard_strategy(net) + strategies = _cell_graph_executor._get_shard_strategy(net) for (k, v) in strategies.items(): if re.search('ReduceMean-op', k) is not None: assert v == [[1, 1, 1, 1]] diff --git a/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py b/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py index b64f8fec5d1..71f8b41a7ff 100644 --- a/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py +++ b/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -71,4 +71,4 @@ def test_two_matmul(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) diff --git a/tests/ut/python/parallel/test_auto_parallel_for_loop.py b/tests/ut/python/parallel/test_auto_parallel_for_loop.py index b97a70587f2..eae68a3499d 100644 --- a/tests/ut/python/parallel/test_auto_parallel_for_loop.py +++ b/tests/ut/python/parallel/test_auto_parallel_for_loop.py @@ -21,7 +21,7 @@ import mindspore.nn as nn from mindspore.ops import operations as P, functional as F from mindspore.common.initializer import initializer import mindspore.common.dtype as mstype -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from tests.dataset_mock import MindData @@ -123,7 +123,7 @@ def test_auto_parallel(): net = Full(_w1, 3) net.set_auto_parallel() net.set_train() - _executor.compile(net, _x, phase='train') - num_ops = _executor._get_num_parallel_ops(net) + _cell_graph_executor.compile(net, _x, phase='train') + num_ops = _cell_graph_executor._get_num_parallel_ops(net) expected_num = 16 assert num_ops == expected_num diff --git a/tests/ut/python/parallel/test_auto_parallel_for_loop_multi_subgraph.py b/tests/ut/python/parallel/test_auto_parallel_for_loop_multi_subgraph.py index c990e02a788..cb5c87d3ce5 100644 --- a/tests/ut/python/parallel/test_auto_parallel_for_loop_multi_subgraph.py +++ b/tests/ut/python/parallel/test_auto_parallel_for_loop_multi_subgraph.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor, Parameter, ParameterTuple from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn.optim import Adam, FTRL from mindspore.ops import composite as C from mindspore.ops import functional as F @@ -130,7 +130,7 @@ def test_double_subgraphs(): x = Tensor(np.ones([8, 8, 8, 8]), dtype=ms.float32) reset_op_id() net.set_train() - _executor.compile(net, x, phase='train') - num_ops = _executor._get_num_parallel_ops(net) + _cell_graph_executor.compile(net, x, phase='train') + num_ops = _cell_graph_executor._get_num_parallel_ops(net) expected_num = 7 assert expected_num == num_ops diff --git a/tests/ut/python/parallel/test_auto_parallel_for_loop_reshape.py b/tests/ut/python/parallel/test_auto_parallel_for_loop_reshape.py index 00c410179b2..66d05693075 100644 --- a/tests/ut/python/parallel/test_auto_parallel_for_loop_reshape.py +++ b/tests/ut/python/parallel/test_auto_parallel_for_loop_reshape.py @@ -21,7 +21,7 @@ import mindspore.nn as nn from mindspore.ops import operations as P, functional as F from mindspore.common.initializer import initializer import mindspore.common.dtype as mstype -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from tests.dataset_mock import MindData @@ -130,7 +130,7 @@ def test_auto_parallel(): net = Full(_w1, 3) net.set_auto_parallel() net.set_train() - _executor.compile(net, _x, phase='train') - num_ops = _executor._get_num_parallel_ops(net) + _cell_graph_executor.compile(net, _x, phase='train') + num_ops = _cell_graph_executor._get_num_parallel_ops(net) expected_num = 16 assert num_ops == expected_num diff --git a/tests/ut/python/parallel/test_auto_parallel_four_matmul.py b/tests/ut/python/parallel/test_auto_parallel_four_matmul.py index 0590ff062b9..a09355aee62 100644 --- a/tests/ut/python/parallel/test_auto_parallel_four_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_four_matmul.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -50,7 +50,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y, z, w, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, z, w, b) + _cell_graph_executor.compile(net, x, y, z, w, b) # model_parallel test diff --git a/tests/ut/python/parallel/test_auto_parallel_l2normalize.py b/tests/ut/python/parallel/test_auto_parallel_l2normalize.py index 2de76ab7d93..a79e331f69b 100644 --- a/tests/ut/python/parallel/test_auto_parallel_l2normalize.py +++ b/tests/ut/python/parallel/test_auto_parallel_l2normalize.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.parallel._utils import _reset_op_id as reset_op_id @@ -74,4 +74,4 @@ def test_auto_parallel_l2normalize(): y = Tensor(np.ones([128, 64, 64]), dtype=ms.float32) b = Tensor(np.ones([128, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y, b, phase='train') + _cell_graph_executor.compile(net, x, y, b, phase='train') diff --git a/tests/ut/python/parallel/test_auto_parallel_matmul_drop.py b/tests/ut/python/parallel/test_auto_parallel_matmul_drop.py index 35012641f4d..7618c197413 100644 --- a/tests/ut/python/parallel/test_auto_parallel_matmul_drop.py +++ b/tests/ut/python/parallel/test_auto_parallel_matmul_drop.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -71,4 +71,4 @@ def test_two_matmul_dropout(): y = Tensor(np.ones([32, 64]), dtype=ms.float32) b = Tensor(np.ones([64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) diff --git a/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py b/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py index b11836a435a..e2b9372c5aa 100644 --- a/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py +++ b/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py @@ -19,7 +19,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.parallel._utils import _reset_op_id as reset_op_id @@ -75,8 +75,8 @@ def test_matmul_prelu(): reset_op_id() net.set_train() - _executor.compile(net, x, y, b, phase='train') - strategies = _executor._get_shard_strategy(net) + _cell_graph_executor.compile(net, x, y, b, phase='train') + strategies = _cell_graph_executor._get_shard_strategy(net) for (k, v) in strategies.items(): if re.search('PReLU-op', k) is not None: assert v == [[16, 1, 1, 1], [1]] diff --git a/tests/ut/python/parallel/test_auto_parallel_multi_graph.py b/tests/ut/python/parallel/test_auto_parallel_multi_graph.py index ab71e6cb6fa..42178168f9e 100644 --- a/tests/ut/python/parallel/test_auto_parallel_multi_graph.py +++ b/tests/ut/python/parallel/test_auto_parallel_multi_graph.py @@ -15,7 +15,7 @@ import numpy as np import mindspore as ms import mindspore.context as context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore import Tensor, Parameter from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -59,7 +59,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, inputs_, label_) + _cell_graph_executor.compile(train_net, inputs_, label_) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_auto_parallel_onehot.py b/tests/ut/python/parallel/test_auto_parallel_onehot.py index 59c6ed7271e..1e8357ce381 100644 --- a/tests/ut/python/parallel/test_auto_parallel_onehot.py +++ b/tests/ut/python/parallel/test_auto_parallel_onehot.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.parameter import Parameter from mindspore.nn.optim.momentum import Momentum from mindspore.ops import composite as C @@ -100,7 +100,7 @@ def test_auto_parallel_arithmetic(): y = Tensor(np.ones([32, 64]), dtype=ms.float32) b = Tensor(np.ones([64]), dtype=ms.int32) net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) def test_auto_parallel_arithmetic_model(): diff --git a/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py b/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py index 0890dfd3c02..2b619103b19 100644 --- a/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py +++ b/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py @@ -20,7 +20,7 @@ import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore import context from mindspore.common import dtype as mstype -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import operations as P from mindspore.parallel import set_algo_parameters from mindspore.parallel._utils import _reset_op_id as reset_op_id @@ -69,8 +69,8 @@ def test_common_parameter(): reset_op_id() net.set_train() - _executor.compile(net, x, y, phase='train') - strategies = _executor._get_shard_strategy(net) + _cell_graph_executor.compile(net, x, y, phase='train') + strategies = _cell_graph_executor._get_shard_strategy(net) for (k, v) in strategies.items(): if re.search('MatMul-op', k) is not None: assert v == [[8, 1], [1, 1]] diff --git a/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py b/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py index 3011f44b8bd..cd366117303 100644 --- a/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py +++ b/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -78,4 +78,4 @@ def test_four_matmul_linear(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, z, w, b) + _cell_graph_executor.compile(net, x, y, z, w, b) diff --git a/tests/ut/python/parallel/test_auto_parallel_reduce_method.py b/tests/ut/python/parallel/test_auto_parallel_reduce_method.py index 2161b17d6d5..4f871cb6cfb 100644 --- a/tests/ut/python/parallel/test_auto_parallel_reduce_method.py +++ b/tests/ut/python/parallel/test_auto_parallel_reduce_method.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -50,7 +50,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) # model_parallel test diff --git a/tests/ut/python/parallel/test_auto_parallel_reshape.py b/tests/ut/python/parallel/test_auto_parallel_reshape.py index 8707ca01b30..61ab6c6058f 100644 --- a/tests/ut/python/parallel/test_auto_parallel_reshape.py +++ b/tests/ut/python/parallel/test_auto_parallel_reshape.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.parameter import Parameter from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -69,7 +69,7 @@ def test_reshape_matmul(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_reshape(): class Net(nn.Cell): @@ -92,7 +92,7 @@ def test_reshape_reshape(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_auto_1(): @@ -118,7 +118,7 @@ def test_reshape_auto_1(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_auto_2(): @@ -147,7 +147,7 @@ def test_reshape_auto_2(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_auto_3(): @@ -173,7 +173,7 @@ def test_reshape_auto_3(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_auto_4(): @@ -200,7 +200,7 @@ def test_reshape_auto_4(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_auto_5(): @@ -251,7 +251,7 @@ def test_reshape_auto_5(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_reshape_auto_6(): class NetWithLoss6(nn.Cell): @@ -299,7 +299,7 @@ def test_reshape_auto_6(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_reshape_auto_7(): class Net(nn.Cell): @@ -322,7 +322,7 @@ def test_reshape_auto_7(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_depend_reshape(): class Net(nn.Cell): @@ -371,9 +371,9 @@ def test_reshape_depend_reshape(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) net_auto = GradWrap1(NetWithLoss1(Net())) context.set_auto_parallel_context(parallel_mode="auto_parallel") net_auto.set_auto_parallel() net_auto.set_train() - _executor.compile(net_auto, x, y) + _cell_graph_executor.compile(net_auto, x, y) diff --git a/tests/ut/python/parallel/test_auto_parallel_resnet.py b/tests/ut/python/parallel/test_auto_parallel_resnet.py index f11e915849e..72d625dcd70 100644 --- a/tests/ut/python/parallel/test_auto_parallel_resnet.py +++ b/tests/ut/python/parallel/test_auto_parallel_resnet.py @@ -20,7 +20,7 @@ import mindspore.nn as nn import mindspore.ops.functional as F from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.initializer import TruncatedNormal from mindspore.communication.management import init from mindspore.nn.loss.loss import LossBase @@ -302,7 +302,7 @@ def test_train_32k_8p(batch_size=32, num_classes=32768): opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) model = Model(net, loss_fn=loss, optimizer=opt) model.train(5, dataset, dataset_sink_mode=False) - strategies = _executor._get_shard_strategy(model._train_network) + strategies = _cell_graph_executor._get_shard_strategy(model._train_network) for (k, v) in strategies.items(): if re.search('Conv2D-op', k) is not None: assert v[0][0] == dev_num @@ -311,7 +311,7 @@ def test_train_32k_8p(batch_size=32, num_classes=32768): elif re.search('ReduceSum-op', k) is not None: assert v == [[dev_num, 1]] - allreduce_fusion_dict = _executor._get_allreduce_fusion(model._train_network) + allreduce_fusion_dict = _cell_graph_executor._get_allreduce_fusion(model._train_network) print(allreduce_fusion_dict) return allreduce_fusion_dict @@ -678,7 +678,7 @@ def test_train_64k_8p(batch_size=32, num_classes=65536): # 1048576 #131072 #327 opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) model = Model(net, loss_fn=loss, optimizer=opt) model.train(5, dataset, dataset_sink_mode=False) - strategies = _executor._get_shard_strategy(model._train_network) + strategies = _cell_graph_executor._get_shard_strategy(model._train_network) for (k, v) in strategies.items(): if re.search('Conv2D-op', k) is not None: assert v[0][0] == dev_num @@ -706,7 +706,7 @@ def test_train_8k_8p_gpu(batch_size=32, num_classes=8192): opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) model = Model(net, loss_fn=loss, optimizer=opt) model.train(5, dataset, dataset_sink_mode=False) - strategies = _executor._get_shard_strategy(model._train_network) + strategies = _cell_graph_executor._get_shard_strategy(model._train_network) for (k, v) in strategies.items(): if re.search('Conv2D-op', k) is not None: assert v[0][0] == dev_num @@ -732,7 +732,7 @@ def test_train_8k_8p_gpu_approxi(batch_size=32, num_classes=8192): opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) model = Model(net, loss_fn=loss, optimizer=opt) model.train(5, dataset, dataset_sink_mode=False) - strategies = _executor._get_shard_strategy(model._train_network) + strategies = _cell_graph_executor._get_shard_strategy(model._train_network) for (k, v) in strategies.items(): if re.search('Conv2D-op', k) is not None: assert v[0][0] == dev_num @@ -758,7 +758,7 @@ def test_train_4k_8p_gpu(batch_size=32, num_classes=4096): opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) model = Model(net, loss_fn=loss, optimizer=opt) model.train(5, dataset, dataset_sink_mode=False) - strategies = _executor._get_shard_strategy(model._train_network) + strategies = _cell_graph_executor._get_shard_strategy(model._train_network) for (k, v) in strategies.items(): if re.search('Conv2D-op', k) is not None: assert v[0][0] == dev_num diff --git a/tests/ut/python/parallel/test_auto_parallel_resnet_sharding_propagation.py b/tests/ut/python/parallel/test_auto_parallel_resnet_sharding_propagation.py index f021485b07f..f7a89c85ec0 100644 --- a/tests/ut/python/parallel/test_auto_parallel_resnet_sharding_propagation.py +++ b/tests/ut/python/parallel/test_auto_parallel_resnet_sharding_propagation.py @@ -20,7 +20,7 @@ import mindspore.nn as nn import mindspore.ops.functional as F from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.initializer import TruncatedNormal from mindspore.communication.management import init from mindspore.nn.loss.loss import _Loss @@ -307,7 +307,7 @@ def test_train_64k_8p(batch_size=32, num_classes=65536): # 1048576 #131072 #327 opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) model = Model(net, loss_fn=loss, optimizer=opt) model.train(5, dataset, dataset_sink_mode=False) - strategies = _executor._get_shard_strategy(model._train_network) + strategies = _cell_graph_executor._get_shard_strategy(model._train_network) for (k, v) in strategies.items(): if re.search('Conv2D-op', k) is not None: assert v[0][0] == dev_num diff --git a/tests/ut/python/parallel/test_auto_parallel_resnet_sharding_propagation2.py b/tests/ut/python/parallel/test_auto_parallel_resnet_sharding_propagation2.py index 977dc8d0a23..f8490d86250 100644 --- a/tests/ut/python/parallel/test_auto_parallel_resnet_sharding_propagation2.py +++ b/tests/ut/python/parallel/test_auto_parallel_resnet_sharding_propagation2.py @@ -20,7 +20,7 @@ import mindspore.nn as nn import mindspore.ops.functional as F from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.initializer import TruncatedNormal from mindspore.communication.management import init from mindspore.nn.loss.loss import _Loss @@ -304,7 +304,7 @@ def test_train_32k_8p(batch_size=32, num_classes=32768): opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) model = Model(net, loss_fn=loss, optimizer=opt) model.train(5, dataset, dataset_sink_mode=False) - strategies = _executor._get_shard_strategy(model._train_network) + strategies = _cell_graph_executor._get_shard_strategy(model._train_network) for (k, v) in strategies.items(): if re.search('Conv2D-op', k) is not None: assert v[0][0] == dev_num diff --git a/tests/ut/python/parallel/test_auto_parallel_rhombus.py b/tests/ut/python/parallel/test_auto_parallel_rhombus.py index 97e821c0e3c..70bc3fa10ad 100644 --- a/tests/ut/python/parallel/test_auto_parallel_rhombus.py +++ b/tests/ut/python/parallel/test_auto_parallel_rhombus.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -50,7 +50,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) def test_rhombus1(): diff --git a/tests/ut/python/parallel/test_auto_parallel_segment_min.py b/tests/ut/python/parallel/test_auto_parallel_segment_min.py index 36dde644f4f..896fb17ad8b 100644 --- a/tests/ut/python/parallel/test_auto_parallel_segment_min.py +++ b/tests/ut/python/parallel/test_auto_parallel_segment_min.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C import mindspore.ops as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -68,4 +68,4 @@ def test_auto_parallel_unsortedsegmentmin(): net = GradWrap(NetWithLoss(Net(16))) net.set_auto_parallel() net.set_train() - _executor.compile(net, x, indices) + _cell_graph_executor.compile(net, x, indices) diff --git a/tests/ut/python/parallel/test_auto_parallel_segment_sum.py b/tests/ut/python/parallel/test_auto_parallel_segment_sum.py index 5b5d2291dc0..bb755f22998 100644 --- a/tests/ut/python/parallel/test_auto_parallel_segment_sum.py +++ b/tests/ut/python/parallel/test_auto_parallel_segment_sum.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C import mindspore.ops as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -68,4 +68,4 @@ def test_auto_parallel_unsortedsegmentsum(): net = GradWrap(NetWithLoss(Net(16))) net.set_auto_parallel() net.set_train() - _executor.compile(net, x, indices) + _cell_graph_executor.compile(net, x, indices) diff --git a/tests/ut/python/parallel/test_auto_parallel_shard_propagation.py b/tests/ut/python/parallel/test_auto_parallel_shard_propagation.py index bbdf3152354..83716e9f318 100644 --- a/tests/ut/python/parallel/test_auto_parallel_shard_propagation.py +++ b/tests/ut/python/parallel/test_auto_parallel_shard_propagation.py @@ -16,7 +16,7 @@ import numpy as np import pytest import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -46,7 +46,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_auto_parallel_shard_propagation2.py b/tests/ut/python/parallel/test_auto_parallel_shard_propagation2.py index eeab1ef774f..55d6a481474 100644 --- a/tests/ut/python/parallel/test_auto_parallel_shard_propagation2.py +++ b/tests/ut/python/parallel/test_auto_parallel_shard_propagation2.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms import mindspore.common.dtype as mstype from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -46,7 +46,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_auto_parallel_shard_propagation3.py b/tests/ut/python/parallel/test_auto_parallel_shard_propagation3.py index 91b91bfce77..8abb691109e 100644 --- a/tests/ut/python/parallel/test_auto_parallel_shard_propagation3.py +++ b/tests/ut/python/parallel/test_auto_parallel_shard_propagation3.py @@ -16,7 +16,7 @@ import numpy as np import pytest import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -48,7 +48,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py b/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py index 165a7e1ce35..e2e0a5c2087 100644 --- a/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py +++ b/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -67,4 +67,4 @@ def test_softmax_cross_entropy_loss_auto_parallel(): y = Tensor(np.ones([64, 32]), dtype=ms.float32) b = Tensor(np.ones([64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) diff --git a/tests/ut/python/parallel/test_auto_parallel_star_partial_strategy.py b/tests/ut/python/parallel/test_auto_parallel_star_partial_strategy.py index 9fe72f7abf8..8ff0eac04a6 100644 --- a/tests/ut/python/parallel/test_auto_parallel_star_partial_strategy.py +++ b/tests/ut/python/parallel/test_auto_parallel_star_partial_strategy.py @@ -16,7 +16,7 @@ import numpy as np import pytest import mindspore as ms import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.parallel._utils import _reset_op_id as reset_op_id @@ -89,7 +89,7 @@ def test_star_strategy_consistency1(): net.set_auto_parallel() reset_op_id() net.set_train() - _executor.compile(net, x, phase='train') + _cell_graph_executor.compile(net, x, phase='train') def test_star_strategy_consistency2(): @@ -104,7 +104,7 @@ def test_star_strategy_consistency2(): net.set_auto_parallel() reset_op_id() net.set_train() - _executor.compile(net, x, phase='train') + _cell_graph_executor.compile(net, x, phase='train') def test_star_strategy_consistency3(): @@ -119,7 +119,7 @@ def test_star_strategy_consistency3(): net.set_auto_parallel() reset_op_id() net.set_train() - _executor.compile(net, x, phase='train') + _cell_graph_executor.compile(net, x, phase='train') def test_star_strategy_consistency4(): @@ -135,4 +135,4 @@ def test_star_strategy_consistency4(): reset_op_id() with pytest.raises(RuntimeError): net.set_train() - _executor.compile(net, x, phase='train') + _cell_graph_executor.compile(net, x, phase='train') diff --git a/tests/ut/python/parallel/test_auto_parallel_transformer.py b/tests/ut/python/parallel/test_auto_parallel_transformer.py index 6e7751cf0a4..dfaf1793672 100644 --- a/tests/ut/python/parallel/test_auto_parallel_transformer.py +++ b/tests/ut/python/parallel/test_auto_parallel_transformer.py @@ -17,7 +17,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -113,4 +113,4 @@ def test_dmnet_train_step(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, input_) + _cell_graph_executor.compile(net, input_) diff --git a/tests/ut/python/parallel/test_auto_parallel_transpose.py b/tests/ut/python/parallel/test_auto_parallel_transpose.py index c8fa9dc2040..2db2ed3e08b 100644 --- a/tests/ut/python/parallel/test_auto_parallel_transpose.py +++ b/tests/ut/python/parallel/test_auto_parallel_transpose.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.parallel._utils import _reset_op_id as reset_op_id @@ -77,8 +77,8 @@ def test_two_matmul_transpose(): reset_op_id() net.set_train() - _executor.compile(net, x, y, b, phase='train') - strategies = _executor._get_shard_strategy(net) + _cell_graph_executor.compile(net, x, y, b, phase='train') + strategies = _cell_graph_executor._get_shard_strategy(net) print(strategies) expected_strategies = {'Default/network-Net/Transpose-op0': [[1, 16]], 'Default/network-Net/Transpose-op1': [[16, 1]], diff --git a/tests/ut/python/parallel/test_auto_parallel_triangle_overwrite.py b/tests/ut/python/parallel/test_auto_parallel_triangle_overwrite.py index 4389a39393f..7e3ecf4d3c7 100644 --- a/tests/ut/python/parallel/test_auto_parallel_triangle_overwrite.py +++ b/tests/ut/python/parallel/test_auto_parallel_triangle_overwrite.py @@ -15,7 +15,7 @@ import numpy as np import mindspore as ms import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.parallel._utils import _reset_op_id as reset_op_id @@ -71,4 +71,4 @@ def test_triangle_strategy_consistency(): reset_op_id() net.set_train() - _executor.compile(net, x, phase='train') + _cell_graph_executor.compile(net, x, phase='train') diff --git a/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py b/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py index 94f1660ce8d..b7216410438 100644 --- a/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py +++ b/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.ops.operations.comm_ops import _VirtualDataset @@ -79,4 +79,4 @@ def test_virtual_dataset_3_input(): y = Tensor(np.ones([32, 64]), dtype=ms.float32) b = Tensor(np.ones([64, 2048]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) diff --git a/tests/ut/python/parallel/test_auto_parallel_two_bn.py b/tests/ut/python/parallel/test_auto_parallel_two_bn.py index 8b55ba2fa8d..c0d4c71ad8a 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_bn.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_bn.py @@ -19,7 +19,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import operations as P from mindspore.parallel import set_algo_parameters from mindspore.parallel._utils import _reset_op_id as reset_op_id @@ -78,8 +78,8 @@ def test_two_bn(): set_algo_parameters(elementwise_op_strategy_follow=True) reset_op_id() - _executor.compile(net, x, phase='train') - strategies = _executor._get_shard_strategy(net) + _cell_graph_executor.compile(net, x, phase='train') + strategies = _cell_graph_executor._get_shard_strategy(net) assert len(strategies) == 4 for (k, v) in strategies.items(): diff --git a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py index ce624c7e3a1..cb7e6b0ce91 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py @@ -19,7 +19,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.parallel import _cost_model_context as cost_model_context @@ -160,8 +160,8 @@ def test_two_matmul(): reset_op_id() net.set_train() - _executor.compile(net, x, y, b, phase='train') - strategies = _executor._get_shard_strategy(net) + _cell_graph_executor.compile(net, x, y, b, phase='train') + strategies = _cell_graph_executor._get_shard_strategy(net) for (k, v) in strategies.items(): if re.search('MatMul-op', k) is not None: assert v == [[16, 1], [1, 1]] diff --git a/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py b/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py index 951f06e9f86..792ee626079 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -72,4 +72,4 @@ def test_four_matmul_linear(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) diff --git a/tests/ut/python/parallel/test_auto_parallel_zig_zag.py b/tests/ut/python/parallel/test_auto_parallel_zig_zag.py index 530e142b13e..2d8106dfe5e 100644 --- a/tests/ut/python/parallel/test_auto_parallel_zig_zag.py +++ b/tests/ut/python/parallel/test_auto_parallel_zig_zag.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -78,4 +78,4 @@ def test_zig_zag_graph(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, z, w, a) + _cell_graph_executor.compile(net, x, y, z, w, a) diff --git a/tests/ut/python/parallel/test_auto_star_elimination.py b/tests/ut/python/parallel/test_auto_star_elimination.py index e67d562e379..636306c9351 100644 --- a/tests/ut/python/parallel/test_auto_star_elimination.py +++ b/tests/ut/python/parallel/test_auto_star_elimination.py @@ -19,7 +19,7 @@ import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore import context from mindspore.common import dtype as mstype -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn.loss.loss import LossBase from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -90,4 +90,4 @@ def test_marin_loss(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) diff --git a/tests/ut/python/parallel/test_batch_matmul.py b/tests/ut/python/parallel/test_batch_matmul.py index c40d4d257a7..9e9642022ad 100644 --- a/tests/ut/python/parallel/test_batch_matmul.py +++ b/tests/ut/python/parallel/test_batch_matmul.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -46,7 +46,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_batch_parallel.py b/tests/ut/python/parallel/test_batch_parallel.py index 91f0f4e7b20..4783e3f5d01 100644 --- a/tests/ut/python/parallel/test_batch_parallel.py +++ b/tests/ut/python/parallel/test_batch_parallel.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -109,7 +109,7 @@ def test_batch(): w1 = Tensor(np.ones([128, 8, 32, 32]), dtype=ms.float32) w2 = Tensor(np.ones([128, 64, 24, 24]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, w1, w2) + _cell_graph_executor.compile(net, x, w1, w2) if __name__ == '__main__': diff --git a/tests/ut/python/parallel/test_batch_parallel_dropout.py b/tests/ut/python/parallel/test_batch_parallel_dropout.py index 3a4ed04759a..25c9d9de729 100644 --- a/tests/ut/python/parallel/test_batch_parallel_dropout.py +++ b/tests/ut/python/parallel/test_batch_parallel_dropout.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -71,4 +71,4 @@ def test_batch_parallel_dropout(): y = Tensor(np.ones([32, 64]), dtype=ms.float32) b = Tensor(np.ones([64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) diff --git a/tests/ut/python/parallel/test_batch_parallel_tensoradd.py b/tests/ut/python/parallel/test_batch_parallel_tensoradd.py index a1004db1184..83721d19667 100644 --- a/tests/ut/python/parallel/test_batch_parallel_tensoradd.py +++ b/tests/ut/python/parallel/test_batch_parallel_tensoradd.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -69,4 +69,4 @@ def test_matmul_add(): y = Tensor(np.ones([32, 64]), dtype=ms.float32) b = Tensor(np.ones([64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) diff --git a/tests/ut/python/parallel/test_batchmm.py b/tests/ut/python/parallel/test_batchmm.py index 99969e2d7ae..27cce89d52d 100644 --- a/tests/ut/python/parallel/test_batchmm.py +++ b/tests/ut/python/parallel/test_batchmm.py @@ -17,7 +17,7 @@ import mindspore as ms import mindspore.context as context from mindspore import Tensor, Parameter import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -59,7 +59,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x) + _cell_graph_executor.compile(train_net, _x) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_batchnorm.py b/tests/ut/python/parallel/test_batchnorm.py index f9a26f742ef..486275d7a96 100644 --- a/tests/ut/python/parallel/test_batchnorm.py +++ b/tests/ut/python/parallel/test_batchnorm.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum, BatchNorm2d, BatchNorm1d from mindspore.ops import operations as P @@ -47,7 +47,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() @@ -97,7 +97,7 @@ def compile_net2(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x1, _b1) + _cell_graph_executor.compile(train_net, _x1, _b1) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_broadcast_to.py b/tests/ut/python/parallel/test_broadcast_to.py index 8c763a49899..69f4f975b94 100644 --- a/tests/ut/python/parallel/test_broadcast_to.py +++ b/tests/ut/python/parallel/test_broadcast_to.py @@ -17,7 +17,7 @@ import mindspore as ms import mindspore.context as context from mindspore import Tensor, Parameter import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -69,7 +69,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x1) + _cell_graph_executor.compile(train_net, _x1) context.reset_auto_parallel_context() @@ -79,7 +79,7 @@ def compile_net2(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x1, _x2) + _cell_graph_executor.compile(train_net, _x1, _x2) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_comm_not_recompute.py b/tests/ut/python/parallel/test_comm_not_recompute.py index 9472a85f218..d5c7d438814 100644 --- a/tests/ut/python/parallel/test_comm_not_recompute.py +++ b/tests/ut/python/parallel/test_comm_not_recompute.py @@ -17,7 +17,7 @@ import numpy as np import mindspore.nn as nn import mindspore as ms from mindspore import Tensor, context, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import operations as P from mindspore.common.initializer import initializer from mindspore.context import _Context @@ -83,5 +83,5 @@ def test_dmnet_train_step(): net = train_step_with_loss_warp(DenseMutMulNet()) net.set_auto_parallel() net.set_train() - _executor.compile(net, input_, label) + _cell_graph_executor.compile(net, input_, label) _Context().set_backend_policy("ge") diff --git a/tests/ut/python/parallel/test_comparison_function_info.py b/tests/ut/python/parallel/test_comparison_function_info.py index 62e9a19bc41..15c3880cbaa 100644 --- a/tests/ut/python/parallel/test_comparison_function_info.py +++ b/tests/ut/python/parallel/test_comparison_function_info.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -50,7 +50,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) def test_matmul_equal(): diff --git a/tests/ut/python/parallel/test_concat.py b/tests/ut/python/parallel/test_concat.py index 1c5da3f401c..4c44f2e7424 100644 --- a/tests/ut/python/parallel/test_concat.py +++ b/tests/ut/python/parallel/test_concat.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -85,7 +85,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_conv2d.py b/tests/ut/python/parallel/test_conv2d.py index d89929c0b0b..7068baa363f 100644 --- a/tests/ut/python/parallel/test_conv2d.py +++ b/tests/ut/python/parallel/test_conv2d.py @@ -17,7 +17,7 @@ import pytest import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -52,7 +52,7 @@ def compile_net(net, input_x=_x): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, input_x, _b) + _cell_graph_executor.compile(train_net, input_x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_conv2d_transpose.py b/tests/ut/python/parallel/test_conv2d_transpose.py index aa7202fe656..fe9111a59d7 100644 --- a/tests/ut/python/parallel/test_conv2d_transpose.py +++ b/tests/ut/python/parallel/test_conv2d_transpose.py @@ -17,7 +17,7 @@ import pytest import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -65,7 +65,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_dataset.py b/tests/ut/python/parallel/test_dataset.py index 968de044307..9cbaeaed8b1 100644 --- a/tests/ut/python/parallel/test_dataset.py +++ b/tests/ut/python/parallel/test_dataset.py @@ -16,7 +16,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import operations as P from mindspore.ops.operations.comm_ops import _VirtualDataset @@ -41,4 +41,4 @@ def test_virtual_dataset(): y = Tensor(np.ones([32, 64], dtype=np.float32)) z = Tensor(np.ones([64, 64], dtype=np.float32)) network = VirtualDatasetNet() - _executor.compile(network, x, y, z) + _cell_graph_executor.compile(network, x, y, z) diff --git a/tests/ut/python/parallel/test_dense_matmul.py b/tests/ut/python/parallel/test_dense_matmul.py index f98d32d3815..c381b771f0a 100644 --- a/tests/ut/python/parallel/test_dense_matmul.py +++ b/tests/ut/python/parallel/test_dense_matmul.py @@ -16,7 +16,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor, context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import operations as P from ....train_step_wrap import train_step_with_loss_warp @@ -52,4 +52,4 @@ def test_dmnet_train_step(): net = DenseMutMulNet() net = train_step_with_loss_warp(DenseMutMulNet()) net.set_train() - _executor.compile(net, input_, label) + _cell_graph_executor.compile(net, input_, label) diff --git a/tests/ut/python/parallel/test_different_type_for_div_op.py b/tests/ut/python/parallel/test_different_type_for_div_op.py index 0c894cea9ac..e5bc28e96c2 100644 --- a/tests/ut/python/parallel/test_different_type_for_div_op.py +++ b/tests/ut/python/parallel/test_different_type_for_div_op.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -38,7 +38,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_sum_as_loss_float16(): diff --git a/tests/ut/python/parallel/test_dropout.py b/tests/ut/python/parallel/test_dropout.py index e8aed7e2a89..21a46ca6976 100644 --- a/tests/ut/python/parallel/test_dropout.py +++ b/tests/ut/python/parallel/test_dropout.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -50,7 +50,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_dropout_do_mask.py b/tests/ut/python/parallel/test_dropout_do_mask.py index 4c32f41e503..53e297e1f5f 100644 --- a/tests/ut/python/parallel/test_dropout_do_mask.py +++ b/tests/ut/python/parallel/test_dropout_do_mask.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -52,7 +52,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_dynamic_shape.py b/tests/ut/python/parallel/test_dynamic_shape.py index 427a0fca486..e3dffff629d 100644 --- a/tests/ut/python/parallel/test_dynamic_shape.py +++ b/tests/ut/python/parallel/test_dynamic_shape.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.parameter import Parameter from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -81,7 +81,7 @@ def test_unique_column_split(): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, x) + _cell_graph_executor.compile(train_net, x) def test_unique_row_split(): class Net(nn.Cell): @@ -115,4 +115,4 @@ def test_unique_row_split(): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, x) + _cell_graph_executor.compile(train_net, x) diff --git a/tests/ut/python/parallel/test_element_wise_function.py b/tests/ut/python/parallel/test_element_wise_function.py index 775d3913679..b92bc6adcb3 100644 --- a/tests/ut/python/parallel/test_element_wise_function.py +++ b/tests/ut/python/parallel/test_element_wise_function.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -50,7 +50,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) def test_matmul_pow(): diff --git a/tests/ut/python/parallel/test_embeddinglookup.py b/tests/ut/python/parallel/test_embeddinglookup.py index d001063057a..959d2bef230 100644 --- a/tests/ut/python/parallel/test_embeddinglookup.py +++ b/tests/ut/python/parallel/test_embeddinglookup.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import operations as P from mindspore.ops import composite as C from mindspore import Tensor, context @@ -67,7 +67,7 @@ def test_embeddinglookup_reducescatter_false(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([8, 32, 8]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_embeddinglookup_reducescatter_true(): @@ -79,7 +79,7 @@ def test_embeddinglookup_reducescatter_true(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([8, 32, 8]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_embeddinglookup_reducescatter_false_grad(): @@ -91,7 +91,7 @@ def test_embeddinglookup_reducescatter_false_grad(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([8, 32, 8]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_embeddinglookup_reducescatter_true_grad(): @@ -104,7 +104,7 @@ def test_embeddinglookup_reducescatter_true_grad(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([8, 32, 8]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_embeddinglookup_semi_auto1(): @@ -119,7 +119,7 @@ def test_embeddinglookup_semi_auto1(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_embeddinglookup_semi_auto2(): @@ -134,4 +134,4 @@ def test_embeddinglookup_semi_auto2(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) diff --git a/tests/ut/python/parallel/test_eval.py b/tests/ut/python/parallel/test_eval.py index c6c076c32c3..0d0ce6de273 100644 --- a/tests/ut/python/parallel/test_eval.py +++ b/tests/ut/python/parallel/test_eval.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell from mindspore.ops import operations as P @@ -60,11 +60,11 @@ def test_train_and_eval(): eval_net = EvalNet(net, strategy2=strategy2) net.set_auto_parallel() net.set_train() - _executor.compile(net, _x, _b, phase='train', auto_parallel_mode=True) + _cell_graph_executor.compile(net, _x, _b, phase='train', auto_parallel_mode=True) eval_net.set_train(mode=False) eval_net.set_auto_parallel() - _executor.compile(eval_net, _x, _b, phase='eval', auto_parallel_mode=True) + _cell_graph_executor.compile(eval_net, _x, _b, phase='eval', auto_parallel_mode=True) context.reset_auto_parallel_context() @@ -77,10 +77,10 @@ def test_train_and_eval_auto(): eval_net = EvalNet(net, strategy2=strategy2) net.set_auto_parallel() net.set_train() - _executor.compile(net, _x, _b, phase='train', auto_parallel_mode=True) + _cell_graph_executor.compile(net, _x, _b, phase='train', auto_parallel_mode=True) eval_net.set_train(mode=False) eval_net.set_auto_parallel() - _executor.compile(eval_net, _x, _b, phase='eval', auto_parallel_mode=True) + _cell_graph_executor.compile(eval_net, _x, _b, phase='eval', auto_parallel_mode=True) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_expand_dims.py b/tests/ut/python/parallel/test_expand_dims.py index e93f974f84d..aa96f79eb37 100644 --- a/tests/ut/python/parallel/test_expand_dims.py +++ b/tests/ut/python/parallel/test_expand_dims.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -59,7 +59,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_forward_graph.py b/tests/ut/python/parallel/test_forward_graph.py index 4780f0cd077..3c40be1d8a6 100644 --- a/tests/ut/python/parallel/test_forward_graph.py +++ b/tests/ut/python/parallel/test_forward_graph.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell from mindspore.ops import operations as P @@ -42,7 +42,7 @@ _b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) def compile_net(net): net.set_auto_parallel() net.set_train() - _executor.compile(net, _x, _b) + _cell_graph_executor.compile(net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_gather_v2.py b/tests/ut/python/parallel/test_gather_v2.py index 7635477fd51..113aad35243 100644 --- a/tests/ut/python/parallel/test_gather_v2.py +++ b/tests/ut/python/parallel/test_gather_v2.py @@ -17,7 +17,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -72,7 +72,7 @@ def test_gatherv2_semi_auto0(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto1(): @@ -85,7 +85,7 @@ def test_gatherv2_semi_auto1(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto2(): @@ -98,7 +98,7 @@ def test_gatherv2_semi_auto2(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto3(): @@ -111,7 +111,7 @@ def test_gatherv2_semi_auto3(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto4(): @@ -124,7 +124,7 @@ def test_gatherv2_semi_auto4(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto5(): @@ -137,7 +137,7 @@ def test_gatherv2_semi_auto5(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto6(): @@ -149,7 +149,7 @@ def test_gatherv2_semi_auto6(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto7(): @@ -161,7 +161,7 @@ def test_gatherv2_semi_auto7(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto8(): @@ -174,7 +174,7 @@ def test_gatherv2_semi_auto8(): x = Tensor(np.ones([64]), dtype=ms.float32) y = Tensor(np.ones([64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_forward_all_reduce(): @@ -187,7 +187,7 @@ def test_gatherv2_forward_all_reduce(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([2, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_shard_batch_and_axis(): @@ -200,7 +200,7 @@ def test_gatherv2_shard_batch_and_axis(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([2, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_split_axis_0_repeat_calc(): @@ -213,7 +213,7 @@ def test_gatherv2_split_axis_0_repeat_calc(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([2, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_auto0(): @@ -223,7 +223,7 @@ def test_gatherv2_auto0(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_auto1(): @@ -233,4 +233,4 @@ def test_gatherv2_auto1(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) diff --git a/tests/ut/python/parallel/test_gatherd.py b/tests/ut/python/parallel/test_gatherd.py index abdcdd69391..dec1fcb47c4 100644 --- a/tests/ut/python/parallel/test_gatherd.py +++ b/tests/ut/python/parallel/test_gatherd.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -45,7 +45,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_get_next.py b/tests/ut/python/parallel/test_get_next.py index 655678d0718..05c81d6a6bb 100644 --- a/tests/ut/python/parallel/test_get_next.py +++ b/tests/ut/python/parallel/test_get_next.py @@ -16,7 +16,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter, ParameterTuple from mindspore.ops import composite as C @@ -57,7 +57,7 @@ class GradWrap(nn.Cell): def compile_net(net): net.set_auto_parallel() - _executor.compile(net) + _cell_graph_executor.compile(net) def test_get_next_single(): @@ -74,7 +74,7 @@ def test_get_next_single(): return x net = GradWrap(NetWithLoss(Net(), [ms.float32, ms.int32], [[32, 64], [32]], 2)) - _executor.compile(net) + _cell_graph_executor.compile(net) def test_get_next_semi_auto_parallel(): diff --git a/tests/ut/python/parallel/test_get_parameter_layout.py b/tests/ut/python/parallel/test_get_parameter_layout.py index 6c3100390a9..f4a444db2ba 100644 --- a/tests/ut/python/parallel/test_get_parameter_layout.py +++ b/tests/ut/python/parallel/test_get_parameter_layout.py @@ -48,7 +48,7 @@ def test_get_parameter_layout(): net = Net(strategy1, strategy2, weight) net.set_auto_parallel() net.set_train() - exe = me._executor + exe = me._cell_graph_executor exe.compile(net, x, phase='train', auto_parallel_mode=True) x_layout = ([2, 4], [1, -1], [16, 32], 0, True, '') # device_arrangement = [2, 4], tensor_map = [1, -1] weight_layout = ([2, 4], [0, -1], [16, 32], 0, True, '') # device_arrangement = [2, 4], tensor_map = [0, -1] diff --git a/tests/ut/python/parallel/test_gpu_dropout.py b/tests/ut/python/parallel/test_gpu_dropout.py index 2dabc207c6d..75e552bbe33 100644 --- a/tests/ut/python/parallel/test_gpu_dropout.py +++ b/tests/ut/python/parallel/test_gpu_dropout.py @@ -17,7 +17,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -66,7 +66,7 @@ def test_dropout_semi_auto(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 128]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_dropout_semi_auto2(): @@ -79,7 +79,7 @@ def test_dropout_semi_auto2(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 128]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_dropout_semi_auto3(): @@ -92,7 +92,7 @@ def test_dropout_semi_auto3(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 128]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_dropout_auto(): @@ -103,4 +103,4 @@ def test_dropout_auto(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 128]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) diff --git a/tests/ut/python/parallel/test_hybird_parallel_activation.py b/tests/ut/python/parallel/test_hybird_parallel_activation.py index 5c9f6ec731d..b8138163998 100644 --- a/tests/ut/python/parallel/test_hybird_parallel_activation.py +++ b/tests/ut/python/parallel/test_hybird_parallel_activation.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -50,7 +50,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) def test_matmul_tanh(): diff --git a/tests/ut/python/parallel/test_initializer_weight_slice.py b/tests/ut/python/parallel/test_initializer_weight_slice.py index cd68b9ebf4e..3b4f58c23b0 100644 --- a/tests/ut/python/parallel/test_initializer_weight_slice.py +++ b/tests/ut/python/parallel/test_initializer_weight_slice.py @@ -47,7 +47,7 @@ def check_initializer_weight_slice(init_name="Uniform"): strategy1 = ((2, 1), (4, 1)) strategy2 = ((2, 4),) context.set_context(mode=context.GRAPH_MODE) - exe = me._executor + exe = me._cell_graph_executor x = Tensor(np.ones([32, 32]), dtype=ms.float32) weight = initializer(init_name, [64, 32], ms.float32) @@ -84,7 +84,7 @@ def test_wrong_order_set_parallel_mode_with_initializer(): strategy1 = ((2, 1), (4, 1)) strategy2 = ((2, 4),) net = Net(strategy1, strategy2, weight) - exe = me._executor + exe = me._cell_graph_executor x = Tensor(np.ones([32, 32]), dtype=ms.float32) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) net.set_auto_parallel() @@ -97,7 +97,7 @@ def test_wrong_order_set_same_parallel_mode_with_initializer(): strategy1 = ((2, 1), (4, 1)) strategy2 = ((2, 4),) net = Net(strategy1, strategy2, weight) - exe = me._executor + exe = me._cell_graph_executor x = Tensor(np.ones([32, 32]), dtype=ms.float32) context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0) net.set_auto_parallel() @@ -108,7 +108,7 @@ def test_wrong_order_set_parallel_mode_without_initializer(): strategy1 = ((2, 1), (4, 1)) strategy2 = ((2, 4),) net = Net(strategy1, strategy2, weight) - exe = me._executor + exe = me._cell_graph_executor x = Tensor(np.ones([32, 32]), dtype=ms.float32) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) net.set_auto_parallel() @@ -126,7 +126,7 @@ def test_check_initializer_weight_slice_seed(init_name="Uniform"): strategy1 = ((2, 1), (4, 1)) strategy2 = ((2, 4),) context.set_context(mode=context.GRAPH_MODE) - exe = me._executor + exe = me._cell_graph_executor x = Tensor(np.ones([32, 32]), dtype=ms.float32) weight = initializer(init_name, [64, 32], ms.float32) diff --git a/tests/ut/python/parallel/test_l2normalize.py b/tests/ut/python/parallel/test_l2normalize.py index ff1d4f8924e..268f45e0109 100644 --- a/tests/ut/python/parallel/test_l2normalize.py +++ b/tests/ut/python/parallel/test_l2normalize.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -76,4 +76,4 @@ def test_l2normalize_matmul(): y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32) b = Tensor(np.ones([128, 32, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) diff --git a/tests/ut/python/parallel/test_layer_norm.py b/tests/ut/python/parallel/test_layer_norm.py index a00b9b4d89f..a724e13b3a6 100644 --- a/tests/ut/python/parallel/test_layer_norm.py +++ b/tests/ut/python/parallel/test_layer_norm.py @@ -17,7 +17,7 @@ import pytest import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.initializer import initializer from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -53,7 +53,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_layer_norm_further.py b/tests/ut/python/parallel/test_layer_norm_further.py index 13bd3ae6721..f6151f59879 100644 --- a/tests/ut/python/parallel/test_layer_norm_further.py +++ b/tests/ut/python/parallel/test_layer_norm_further.py @@ -17,7 +17,7 @@ import pytest import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.initializer import initializer from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -151,7 +151,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_linear.py b/tests/ut/python/parallel/test_linear.py index ce2c4d3692c..c09b9cb8351 100644 --- a/tests/ut/python/parallel/test_linear.py +++ b/tests/ut/python/parallel/test_linear.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -74,4 +74,4 @@ def test_linear(): bias = Tensor(np.ones([64]), dtype=ms.float32) label = Tensor(np.ones([64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y, bias, label) + _cell_graph_executor.compile(net, x, y, bias, label) diff --git a/tests/ut/python/parallel/test_loop_two_matmul.py b/tests/ut/python/parallel/test_loop_two_matmul.py index 5c162a6d8f5..2703ebac173 100644 --- a/tests/ut/python/parallel/test_loop_two_matmul.py +++ b/tests/ut/python/parallel/test_loop_two_matmul.py @@ -19,7 +19,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -96,5 +96,5 @@ def test_two_matmul(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) count = count + 1 diff --git a/tests/ut/python/parallel/test_loss_and_optimizer.py b/tests/ut/python/parallel/test_loss_and_optimizer.py index 03c641e59a3..c2c3578ef1f 100644 --- a/tests/ut/python/parallel/test_loss_and_optimizer.py +++ b/tests/ut/python/parallel/test_loss_and_optimizer.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell from mindspore.nn.optim import Momentum, LARS from mindspore.ops import operations as P @@ -38,7 +38,7 @@ class NetWithLoss(nn.Cell): def compile_net(net, x, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, b) + _cell_graph_executor.compile(net, x, b) def test_momentum(): diff --git a/tests/ut/python/parallel/test_manual_embedding_lookup.py b/tests/ut/python/parallel/test_manual_embedding_lookup.py index dd19b99ed18..1ee815ffca8 100644 --- a/tests/ut/python/parallel/test_manual_embedding_lookup.py +++ b/tests/ut/python/parallel/test_manual_embedding_lookup.py @@ -17,7 +17,7 @@ import numpy as np import pytest import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, LazyAdam from mindspore.ops import operations as P from mindspore.common.initializer import initializer @@ -72,7 +72,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b, auto_parallel_mode=True) + _cell_graph_executor.compile(train_net, _x, _b, auto_parallel_mode=True) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_manual_gatherv2.py b/tests/ut/python/parallel/test_manual_gatherv2.py index 0cf8a39691c..a41bf0a53c1 100644 --- a/tests/ut/python/parallel/test_manual_gatherv2.py +++ b/tests/ut/python/parallel/test_manual_gatherv2.py @@ -17,7 +17,7 @@ import numpy as np import pytest import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P from mindspore.common.initializer import initializer @@ -65,7 +65,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b, auto_parallel_mode=True) + _cell_graph_executor.compile(train_net, _x, _b, auto_parallel_mode=True) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_matmul_dropout.py b/tests/ut/python/parallel/test_matmul_dropout.py index 0bea7d34f04..3cde941ea6e 100644 --- a/tests/ut/python/parallel/test_matmul_dropout.py +++ b/tests/ut/python/parallel/test_matmul_dropout.py @@ -20,7 +20,7 @@ from mindspore import Tensor from mindspore import context import mindspore.common.dtype as mstype from mindspore.common.seed import _get_graph_seed -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore._checkparam import Validator from mindspore.ops.primitive import constexpr from mindspore.ops import composite as C @@ -128,4 +128,4 @@ def test_two_matmul_dropout(): y = Tensor(np.ones([32, 64]), dtype=ms.float32) b = Tensor(np.ones([64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) diff --git a/tests/ut/python/parallel/test_matmul_tensor.py b/tests/ut/python/parallel/test_matmul_tensor.py index e052ed05c04..2e1e3aa3202 100644 --- a/tests/ut/python/parallel/test_matmul_tensor.py +++ b/tests/ut/python/parallel/test_matmul_tensor.py @@ -19,7 +19,7 @@ import mindspore.common.dtype as mstype import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.context import set_auto_parallel_context from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -52,7 +52,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) # model_parallel test diff --git a/tests/ut/python/parallel/test_maxpool_avgpool.py b/tests/ut/python/parallel/test_maxpool_avgpool.py index b88d2370fc1..43dd701b336 100644 --- a/tests/ut/python/parallel/test_maxpool_avgpool.py +++ b/tests/ut/python/parallel/test_maxpool_avgpool.py @@ -17,7 +17,7 @@ import pytest import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -63,7 +63,7 @@ def compile_net(net, inputs=_x): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, inputs, _b) + _cell_graph_executor.compile(train_net, inputs, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py b/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py index d30d27bf190..759c7f4be67 100644 --- a/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py +++ b/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -88,4 +88,4 @@ def test_two_matmul(): z = Tensor(np.ones([64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y, b, z) + _cell_graph_executor.compile(net, x, y, b, z) diff --git a/tests/ut/python/parallel/test_mul_div_bn.py b/tests/ut/python/parallel/test_mul_div_bn.py index 9c0f6360cff..f2419f43005 100644 --- a/tests/ut/python/parallel/test_mul_div_bn.py +++ b/tests/ut/python/parallel/test_mul_div_bn.py @@ -15,7 +15,7 @@ import numpy as np import mindspore as ms import mindspore.context as context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore import Tensor, Parameter import mindspore.nn as nn from mindspore.nn import Cell, TrainOneStepCell, Momentum @@ -66,7 +66,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, inputs_, label_) + _cell_graph_executor.compile(train_net, inputs_, label_) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_multi_field_embedding.py b/tests/ut/python/parallel/test_multi_field_embedding.py index f858caf2624..8ebad2fb14c 100644 --- a/tests/ut/python/parallel/test_multi_field_embedding.py +++ b/tests/ut/python/parallel/test_multi_field_embedding.py @@ -17,7 +17,7 @@ import numpy as np import mindspore as ms import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import operations as P from mindspore.ops import composite as C from mindspore import Tensor, context @@ -78,7 +78,7 @@ def compile_net(net, shape): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, x, y, z) + _cell_graph_executor.compile(train_net, x, y, z) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_neg.py b/tests/ut/python/parallel/test_neg.py index 6afcfe251f4..a4710484f75 100644 --- a/tests/ut/python/parallel/test_neg.py +++ b/tests/ut/python/parallel/test_neg.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -44,7 +44,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_neighborexchange.py b/tests/ut/python/parallel/test_neighborexchange.py index a2963186506..1c178187daf 100644 --- a/tests/ut/python/parallel/test_neighborexchange.py +++ b/tests/ut/python/parallel/test_neighborexchange.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.context as context from mindspore import Tensor, Parameter import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, Momentum from mindspore.ops import operations as P from mindspore.ops.operations._inner_ops import NeighborExchange @@ -33,7 +33,7 @@ def compile_net(net): optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_train() - _executor.compile(train_net, _x1, _x2) + _cell_graph_executor.compile(train_net, _x1, _x2) def test_NeighborExchange_two_inputs_success(): @@ -110,7 +110,7 @@ def test_NeighborExchange_empty_send_success(): return x1 net = Net() - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_NeighborExchange_empty_recv_success(): @@ -132,7 +132,7 @@ def test_NeighborExchange_empty_recv_success(): return x1 net = Net() - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_NeighborExchange_empty_send_empty_recv_success(): @@ -154,7 +154,7 @@ def test_NeighborExchange_empty_send_empty_recv_success(): return x1 net = Net() - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_NeighborExchange_recv_shape_num_diff_with_recv_rank_size_failed(): @@ -291,7 +291,7 @@ def test_NeighborExchange_attr_check_send_rank_ids_is_tuple_failed(): net = Net() with pytest.raises(TypeError): - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_NeighborExchange_attr_check_send_rank_ids_is_float_failed(): @@ -315,7 +315,7 @@ def test_NeighborExchange_attr_check_send_rank_ids_is_float_failed(): net = Net() with pytest.raises(TypeError): - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_NeighborExchange_attr_check_recv_rank_ids_is_tuple_failed(): @@ -339,7 +339,7 @@ def test_NeighborExchange_attr_check_recv_rank_ids_is_tuple_failed(): net = Net() with pytest.raises(TypeError): - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_NeighborExchange_attr_check_recv_rank_ids_is_float_failed(): @@ -363,7 +363,7 @@ def test_NeighborExchange_attr_check_recv_rank_ids_is_float_failed(): net = Net() with pytest.raises(TypeError): - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_NeighborExchange_attr_check_send_shape_not_tuple_failed(): @@ -387,7 +387,7 @@ def test_NeighborExchange_attr_check_send_shape_not_tuple_failed(): net = Net() with pytest.raises(TypeError): - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_NeighborExchange_attr_check_recv_type_numpy_failed(): @@ -411,7 +411,7 @@ def test_NeighborExchange_attr_check_recv_type_numpy_failed(): net = Net() with pytest.raises(TypeError): - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) def test_NeighborExchange_attr_invalid_grpup_failed(): @@ -435,4 +435,4 @@ def test_NeighborExchange_attr_invalid_grpup_failed(): net = Net() with pytest.raises(TypeError): - _executor.compile(net, _x1) + _cell_graph_executor.compile(net, _x1) diff --git a/tests/ut/python/parallel/test_one_dev.py b/tests/ut/python/parallel/test_one_dev.py index 336a3b4e4bc..94a8055a6d0 100644 --- a/tests/ut/python/parallel/test_one_dev.py +++ b/tests/ut/python/parallel/test_one_dev.py @@ -20,7 +20,7 @@ import mindspore.nn as nn from mindspore import Tensor from mindspore import context import mindspore.common.dtype as mstype -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.parameter import Parameter from mindspore.nn.loss.loss import LossBase from mindspore.nn.optim.momentum import Momentum @@ -116,7 +116,7 @@ def all_to_all_common(): model = Model(net, loss, opt) model.train(epoch_size, dataset, dataset_sink_mode=False) - strategys = _executor._get_shard_strategy(model._train_network) + strategys = _cell_graph_executor._get_shard_strategy(model._train_network) return strategys diff --git a/tests/ut/python/parallel/test_one_hot_net.py b/tests/ut/python/parallel/test_one_hot_net.py index 81d7dcf701a..1ac44e4da23 100644 --- a/tests/ut/python/parallel/test_one_hot_net.py +++ b/tests/ut/python/parallel/test_one_hot_net.py @@ -19,7 +19,7 @@ import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore import context from mindspore.common import dtype as mstype -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn.cell import Cell from mindspore.nn.optim.momentum import Momentum from mindspore.ops import composite as C @@ -279,7 +279,7 @@ def test_bn_reshape_dense_bn_train_loss(): net.set_auto_parallel() net.set_train() - _executor.compile(net, input_, label) + _cell_graph_executor.compile(net, input_, label) def test_semi_one_hot_net_batch(): @@ -294,7 +294,7 @@ def test_semi_one_hot_net_batch(): net.set_auto_parallel() net.set_train() - _executor.compile(net, input_, label) + _cell_graph_executor.compile(net, input_, label) def test_semi_one_hot_net_model(): diff --git a/tests/ut/python/parallel/test_one_weight_parameter.py b/tests/ut/python/parallel/test_one_weight_parameter.py index 6cffd4e7dd9..05724dc83de 100644 --- a/tests/ut/python/parallel/test_one_weight_parameter.py +++ b/tests/ut/python/parallel/test_one_weight_parameter.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor, Parameter, ParameterTuple from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -77,4 +77,4 @@ def test_one_weight_parameter(): train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, x, b) + _cell_graph_executor.compile(train_net, x, b) diff --git a/tests/ut/python/parallel/test_onehot.py b/tests/ut/python/parallel/test_onehot.py index 8d9ccab475e..16f322d3e0f 100644 --- a/tests/ut/python/parallel/test_onehot.py +++ b/tests/ut/python/parallel/test_onehot.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.nn.wrap.cell_wrapper import _VirtualDatasetCell @@ -77,7 +77,7 @@ def compile_graph(strategy1, strategy2, strategy3, strategy4, auto=False, onthot y = Tensor(np.ones([32, 64]), dtype=ms.float32) b = Tensor(np.ones([64]), dtype=ms.int32) net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) def test_onehot_model_parallel(): diff --git a/tests/ut/python/parallel/test_onehot_2dim.py b/tests/ut/python/parallel/test_onehot_2dim.py index 26d4bb22a3b..16b6c9cddb2 100644 --- a/tests/ut/python/parallel/test_onehot_2dim.py +++ b/tests/ut/python/parallel/test_onehot_2dim.py @@ -17,7 +17,7 @@ import mindspore as ms import mindspore.context as context from mindspore import Tensor, Parameter import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -50,7 +50,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x) + _cell_graph_executor.compile(train_net, _x) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_optimizer.py b/tests/ut/python/parallel/test_optimizer.py index 55a22ddf608..2fd9743fb85 100644 --- a/tests/ut/python/parallel/test_optimizer.py +++ b/tests/ut/python/parallel/test_optimizer.py @@ -17,7 +17,7 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.communication.management import init from mindspore.nn import Dense from mindspore.nn import Momentum @@ -62,4 +62,4 @@ def test_dense_gen_graph(): predict = Tensor(np.ones([64, 512]).astype(np.float32) * 0.01) label = Tensor(np.zeros([64, 32]).astype(np.float32)) network.set_auto_parallel() - _executor.compile(network, predict, label) + _cell_graph_executor.compile(network, predict, label) diff --git a/tests/ut/python/parallel/test_optimizer_clone_weight.py b/tests/ut/python/parallel/test_optimizer_clone_weight.py index fa6e3c32b4f..3ded6d51f07 100644 --- a/tests/ut/python/parallel/test_optimizer_clone_weight.py +++ b/tests/ut/python/parallel/test_optimizer_clone_weight.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore import context -from mindspore.common.api import _Executor +from mindspore.common.api import _CellGraphExecutor from mindspore.nn import TrainOneStepCell from mindspore.nn.optim import AdamWeightDecay from mindspore.ops import operations as P @@ -37,7 +37,7 @@ class NetWithLoss(nn.Cell): def compile_net(net, x, b): net.set_auto_parallel() - _Executor().compile(net, x, b) + _CellGraphExecutor().compile(net, x, b) def test_optimizer_clone_weight(): diff --git a/tests/ut/python/parallel/test_pack.py b/tests/ut/python/parallel/test_pack.py index df7887913e1..431f80f9f83 100644 --- a/tests/ut/python/parallel/test_pack.py +++ b/tests/ut/python/parallel/test_pack.py @@ -17,7 +17,7 @@ import mindspore as ms import mindspore.context as context from mindspore import Tensor, Parameter import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, Momentum from mindspore.ops import operations as P from mindspore.nn import Dense, Flatten @@ -141,7 +141,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x) + _cell_graph_executor.compile(train_net, _x) context.reset_auto_parallel_context() @@ -151,7 +151,7 @@ def compile_net1(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x1) + _cell_graph_executor.compile(train_net, _x1) context.reset_auto_parallel_context() @@ -161,7 +161,7 @@ def compile_net2(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x2) + _cell_graph_executor.compile(train_net, _x2) context.reset_auto_parallel_context() @@ -170,7 +170,7 @@ def compile_net_con(net): optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() - _executor.compile(train_net, _x_c) + _cell_graph_executor.compile(train_net, _x_c) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_parallel_optimizer.py b/tests/ut/python/parallel/test_parallel_optimizer.py index 9f8ce85b096..9c30e06fd2a 100644 --- a/tests/ut/python/parallel/test_parallel_optimizer.py +++ b/tests/ut/python/parallel/test_parallel_optimizer.py @@ -18,7 +18,7 @@ import pytest import mindspore.nn as nn from mindspore import Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.wrap.cell_wrapper import _VirtualDatasetCell from mindspore.nn.optim import Adam, AdamWeightDecay, Lamb, Momentum @@ -92,7 +92,7 @@ def auto_parallel_compile_net(mode, dev_num, net, strategy1=None, strategy2=None train_network = TrainOneStepCell(net, optimizer).set_comm_fusion(4) train_network.set_auto_parallel() train_network.set_train() - _executor.compile(train_network, inputs, label, phase="train", auto_parallel_mode=True) + _cell_graph_executor.compile(train_network, inputs, label, phase="train", auto_parallel_mode=True) context.reset_auto_parallel_context() return train_network @@ -154,7 +154,7 @@ def test_AdamWeightDecay(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) context.reset_auto_parallel_context() @@ -170,7 +170,7 @@ def test_lamb_compile(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) context.reset_auto_parallel_context() @@ -187,7 +187,7 @@ def test_lamb_split_fusion(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) + _cell_graph_executor.compile(train_network, inputs, label) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_parameter_multi_users.py b/tests/ut/python/parallel/test_parameter_multi_users.py index 12ac342db17..2bfbf6e00b5 100644 --- a/tests/ut/python/parallel/test_parameter_multi_users.py +++ b/tests/ut/python/parallel/test_parameter_multi_users.py @@ -16,7 +16,7 @@ import numpy as np import pytest import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -70,7 +70,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_prelu.py b/tests/ut/python/parallel/test_prelu.py index 47328237812..67853329e47 100644 --- a/tests/ut/python/parallel/test_prelu.py +++ b/tests/ut/python/parallel/test_prelu.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -50,7 +50,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_prelu_single_success1(): @@ -168,7 +168,7 @@ def test_prelu_parallel_success3(): net = GradWrap3(NetWithLoss3(Net(strategy1, strategy2))) net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, w) + _cell_graph_executor.compile(net, x, y, w) def test_prelu_parallel_success4(): diff --git a/tests/ut/python/parallel/test_print.py b/tests/ut/python/parallel/test_print.py index ed1bb908d33..9da75777f86 100644 --- a/tests/ut/python/parallel/test_print.py +++ b/tests/ut/python/parallel/test_print.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum, BatchNorm2d, BatchNorm1d from mindspore.ops import operations as P @@ -49,7 +49,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() @@ -99,7 +99,7 @@ def compile_net2(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x1, _b1) + _cell_graph_executor.compile(train_net, _x1, _b1) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_reduce_method_info.py b/tests/ut/python/parallel/test_reduce_method_info.py index ddf44098c48..ab18c08a423 100644 --- a/tests/ut/python/parallel/test_reduce_method_info.py +++ b/tests/ut/python/parallel/test_reduce_method_info.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -70,13 +70,13 @@ class GradWrap(nn.Cell): def compile_net_no_bias(net, x, y): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def compile_net(net, x, y, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) # model_parallel test diff --git a/tests/ut/python/parallel/test_reluv2.py b/tests/ut/python/parallel/test_reluv2.py index 74827b495a7..e02c950dcb9 100644 --- a/tests/ut/python/parallel/test_reluv2.py +++ b/tests/ut/python/parallel/test_reluv2.py @@ -17,7 +17,7 @@ import mindspore as ms import mindspore.context as context from mindspore import Tensor, Parameter import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -45,7 +45,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x) + _cell_graph_executor.compile(train_net, _x) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_repeated_calc.py b/tests/ut/python/parallel/test_repeated_calc.py index ebba125ee56..52104fbe1fd 100644 --- a/tests/ut/python/parallel/test_repeated_calc.py +++ b/tests/ut/python/parallel/test_repeated_calc.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -50,7 +50,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) # it has not redistribution diff --git a/tests/ut/python/parallel/test_reshape.py b/tests/ut/python/parallel/test_reshape.py index 9f1b81b057b..b5a190215fc 100644 --- a/tests/ut/python/parallel/test_reshape.py +++ b/tests/ut/python/parallel/test_reshape.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.parameter import Parameter from mindspore.common.parameter import ParameterTuple from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits @@ -305,7 +305,7 @@ class ReshapeNet6(nn.Cell): def compile_net(net, input_): net.set_auto_parallel() net.set_train() - _executor.compile(net, input_) + _cell_graph_executor.compile(net, input_) def reshape_net2(backbone): diff --git a/tests/ut/python/parallel/test_reshape_optimized.py b/tests/ut/python/parallel/test_reshape_optimized.py index e9021db799f..c6d7eb97158 100644 --- a/tests/ut/python/parallel/test_reshape_optimized.py +++ b/tests/ut/python/parallel/test_reshape_optimized.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -45,7 +45,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_reshape_parameter.py b/tests/ut/python/parallel/test_reshape_parameter.py index 9d6740cad22..e502684e359 100644 --- a/tests/ut/python/parallel/test_reshape_parameter.py +++ b/tests/ut/python/parallel/test_reshape_parameter.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -64,7 +64,7 @@ class Net(nn.Cell): def compile_net(net, x, y): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_reshape_parameter_data_parallel(): diff --git a/tests/ut/python/parallel/test_reshape_skip_redistribution.py b/tests/ut/python/parallel/test_reshape_skip_redistribution.py index d50484e33bc..872d985d3d6 100644 --- a/tests/ut/python/parallel/test_reshape_skip_redistribution.py +++ b/tests/ut/python/parallel/test_reshape_skip_redistribution.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -48,7 +48,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_reshape_unexpand.py b/tests/ut/python/parallel/test_reshape_unexpand.py index 49bf1e9b3fd..4f542c6995c 100644 --- a/tests/ut/python/parallel/test_reshape_unexpand.py +++ b/tests/ut/python/parallel/test_reshape_unexpand.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.common.parameter import Parameter from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -68,7 +68,7 @@ def test_reshape_unexpand(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_unexpand_1(): class Net(nn.Cell): @@ -91,7 +91,7 @@ def test_reshape_unexpand_1(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_unexpand_2(): class Net(nn.Cell): @@ -114,7 +114,7 @@ def test_reshape_unexpand_2(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_unexpand_3(): class Net(nn.Cell): @@ -138,7 +138,7 @@ def test_reshape_unexpand_3(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_unexpand_4(): class Net(nn.Cell): @@ -162,7 +162,7 @@ def test_reshape_unexpand_4(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_unexpand_5(): class Net(nn.Cell): @@ -186,7 +186,7 @@ def test_reshape_unexpand_5(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_unexpand_6(): class Net(nn.Cell): @@ -210,7 +210,7 @@ def test_reshape_unexpand_6(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_unexpand_7(): class Net(nn.Cell): @@ -244,7 +244,7 @@ def test_reshape_unexpand_7(): net = GradWrap(NetWithLoss(Net())) net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_reshape_unexpand_8(): class Net(nn.Cell): @@ -267,4 +267,4 @@ def test_reshape_unexpand_8(): context.set_auto_parallel_context(parallel_mode="auto_parallel") net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) diff --git a/tests/ut/python/parallel/test_scalar_loss.py b/tests/ut/python/parallel/test_scalar_loss.py index f25e7737ba7..004ebb04595 100644 --- a/tests/ut/python/parallel/test_scalar_loss.py +++ b/tests/ut/python/parallel/test_scalar_loss.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import functional as F from mindspore.ops import operations as P @@ -61,4 +61,4 @@ def test_sum_as_loss(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 32]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) diff --git a/tests/ut/python/parallel/test_self_attention.py b/tests/ut/python/parallel/test_self_attention.py index abba5b84cf8..1114eee30cd 100644 --- a/tests/ut/python/parallel/test_self_attention.py +++ b/tests/ut/python/parallel/test_self_attention.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.context import set_auto_parallel_context from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -53,7 +53,7 @@ class GradWrap(nn.Cell): def compile_net(net, x): net.set_auto_parallel() net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) class Net(nn.Cell): diff --git a/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py b/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py index 97aa17b26ab..d27df51d2a0 100644 --- a/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py +++ b/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py @@ -17,7 +17,7 @@ import numpy as np import mindspore as ms from mindspore import Tensor, Parameter, ParameterTuple, context from mindspore import nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn.optim import Adam, FTRL from mindspore.ops import composite as C from mindspore.ops import functional as F @@ -108,4 +108,4 @@ def test_two_subgraphs(): input_x = Tensor(np.ones([8, 8, 8, 8]), dtype=ms.float32) net.set_auto_parallel() net.set_train() - _executor.compile(net, input_x) + _cell_graph_executor.compile(net, input_x) diff --git a/tests/ut/python/parallel/test_shared_param_and_mix_precision.py b/tests/ut/python/parallel/test_shared_param_and_mix_precision.py index 2403ad5ad3a..6acba54914f 100644 --- a/tests/ut/python/parallel/test_shared_param_and_mix_precision.py +++ b/tests/ut/python/parallel/test_shared_param_and_mix_precision.py @@ -17,7 +17,7 @@ import numpy as np import mindspore.nn as nn import mindspore.common.dtype as mstype from mindspore import Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell from mindspore.nn.wrap.cell_wrapper import _VirtualDatasetCell from mindspore.nn.optim import Momentum @@ -71,7 +71,7 @@ def auto_parallel_compile_net(mode, dev_num, net, strategy1=None, strategy2=None train_network = TrainOneStepCell(net, optimizer).set_comm_fusion(4) train_network.set_auto_parallel() train_network.set_train() - _executor.compile(train_network, inputs, label, phase="train", auto_parallel_mode=True) + _cell_graph_executor.compile(train_network, inputs, label, phase="train", auto_parallel_mode=True) context.reset_auto_parallel_context() return train_network diff --git a/tests/ut/python/parallel/test_sigmoid_cross_entropy_with_logits.py b/tests/ut/python/parallel/test_sigmoid_cross_entropy_with_logits.py index d8e00c32c3f..b7a7653c5bd 100644 --- a/tests/ut/python/parallel/test_sigmoid_cross_entropy_with_logits.py +++ b/tests/ut/python/parallel/test_sigmoid_cross_entropy_with_logits.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -44,7 +44,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_slice.py b/tests/ut/python/parallel/test_slice.py index 84dc926033e..ba096fa1ccc 100644 --- a/tests/ut/python/parallel/test_slice.py +++ b/tests/ut/python/parallel/test_slice.py @@ -17,7 +17,7 @@ import pytest import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -69,7 +69,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py b/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py index b4d6d61bb7e..48537ba282f 100644 --- a/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py +++ b/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -49,7 +49,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) def test_softmax_cross_entropy_loss(): diff --git a/tests/ut/python/parallel/test_sparse_feature_bprop.py b/tests/ut/python/parallel/test_sparse_feature_bprop.py index 549a64d71a3..7941ebf7d25 100644 --- a/tests/ut/python/parallel/test_sparse_feature_bprop.py +++ b/tests/ut/python/parallel/test_sparse_feature_bprop.py @@ -23,7 +23,7 @@ from mindspore.common.parameter import Parameter from mindspore.common.tensor import Tensor from mindspore.ops import composite as C, operations as P from mindspore.ops.operations.comm_ops import AllReduce -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, Adam @@ -69,7 +69,7 @@ def test_bprop_with_sparse_feature_allreduce(test_context): x = Tensor(np.ones([64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x) + _cell_graph_executor.compile(net, x) def test_bprop_with_sparse_feature_mirror(test_context): @@ -96,7 +96,7 @@ def test_bprop_with_sparse_feature_mirror(test_context): optimizer = Adam(net.trainable_params(), learning_rate=0.1, loss_scale=1024.0, weight_decay=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) net = Net() compile_net(net) @@ -128,7 +128,7 @@ def test_bprop_with_sparse_feature_dataparallel(test_context): optimizer = Adam(net.trainable_params(), learning_rate=0.1, loss_scale=1024.0, weight_decay=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) net = Net() compile_net(net) diff --git a/tests/ut/python/parallel/test_sparse_gather_v2.py b/tests/ut/python/parallel/test_sparse_gather_v2.py index e80f3c08322..cd724421bd3 100644 --- a/tests/ut/python/parallel/test_sparse_gather_v2.py +++ b/tests/ut/python/parallel/test_sparse_gather_v2.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -73,7 +73,7 @@ def test_gatherv2_semi_auto0(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto1(): @@ -86,7 +86,7 @@ def test_gatherv2_semi_auto1(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto2(): @@ -99,7 +99,7 @@ def test_gatherv2_semi_auto2(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto3(): @@ -112,7 +112,7 @@ def test_gatherv2_semi_auto3(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto4(): @@ -125,7 +125,7 @@ def test_gatherv2_semi_auto4(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto5(): @@ -138,7 +138,7 @@ def test_gatherv2_semi_auto5(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto6(): @@ -150,7 +150,7 @@ def test_gatherv2_semi_auto6(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_semi_auto7(): @@ -162,7 +162,7 @@ def test_gatherv2_semi_auto7(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_auto0(): @@ -172,7 +172,7 @@ def test_gatherv2_auto0(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 32]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_auto1(): @@ -182,7 +182,7 @@ def test_gatherv2_auto1(): x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_cpu0(): @@ -195,7 +195,7 @@ def test_gatherv2_cpu0(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_cpu1(): @@ -208,7 +208,7 @@ def test_gatherv2_cpu1(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_gatherv2_cpu2(): @@ -221,4 +221,4 @@ def test_gatherv2_cpu2(): x = Tensor(np.ones([64, 64]), dtype=ms.float32) y = Tensor(np.ones([64, 64, 64]), dtype=ms.float32) net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) diff --git a/tests/ut/python/parallel/test_split.py b/tests/ut/python/parallel/test_split.py index cb6744a075b..677d511765c 100644 --- a/tests/ut/python/parallel/test_split.py +++ b/tests/ut/python/parallel/test_split.py @@ -17,7 +17,7 @@ import mindspore as ms import mindspore.context as context from mindspore import Tensor, Parameter import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -80,7 +80,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x) + _cell_graph_executor.compile(train_net, _x) context.reset_auto_parallel_context() @@ -90,7 +90,7 @@ def compile_net1(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x1) + _cell_graph_executor.compile(train_net, _x1) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_split_grad_sens.py b/tests/ut/python/parallel/test_split_grad_sens.py index 077dd9540e0..0bbc7968f81 100644 --- a/tests/ut/python/parallel/test_split_grad_sens.py +++ b/tests/ut/python/parallel/test_split_grad_sens.py @@ -19,7 +19,7 @@ import mindspore.common.dtype as mstype import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -67,12 +67,12 @@ class GradWrap4(nn.Cell): def compile_net(net, x, y, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) def compile_net_no_bias(net, x, y): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_no_grad(): class Net(nn.Cell): @@ -123,7 +123,7 @@ def test_grad_sens_parameter_type(): sens = Tensor(np.ones([128, 64]), dtype=ms.float32) net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b, sens, phase='train', auto_parallel_mode=True) + _cell_graph_executor.compile(net, x, y, b, sens, phase='train', auto_parallel_mode=True) x_layout = ([8, 8], [1, -1], [16, 32], 0, True, '') y_layout = ([8, 8], [-1, 0], [32, 8], 0, True, '') b_layout = ([8, 8], [0, -1], [8, 64], 0, True, '') diff --git a/tests/ut/python/parallel/test_square.py b/tests/ut/python/parallel/test_square.py index a354395c7d6..65fdf2a83ca 100644 --- a/tests/ut/python/parallel/test_square.py +++ b/tests/ut/python/parallel/test_square.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -46,7 +46,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_squeeze_info.py b/tests/ut/python/parallel/test_squeeze_info.py index 76bedeb0000..a7e3453ff3a 100644 --- a/tests/ut/python/parallel/test_squeeze_info.py +++ b/tests/ut/python/parallel/test_squeeze_info.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell from mindspore.ops import operations as P @@ -40,7 +40,7 @@ _b = Tensor(np.ones([64, 32]), dtype=ms.float32) def compile_net(net): net.set_auto_parallel() net.set_train() - _executor.compile(net, _x, _b) + _cell_graph_executor.compile(net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_step_parallel.py b/tests/ut/python/parallel/test_step_parallel.py index 5eb9ba7157d..a6c6827d124 100644 --- a/tests/ut/python/parallel/test_step_parallel.py +++ b/tests/ut/python/parallel/test_step_parallel.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -77,4 +77,4 @@ def test_two_matmul(): a = Tensor(np.ones([128, 128]), dtype=ms.float32) net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b, a) + _cell_graph_executor.compile(net, x, y, b, a) diff --git a/tests/ut/python/parallel/test_strategy_checkpoint.py b/tests/ut/python/parallel/test_strategy_checkpoint.py index d6bb9eeffd8..9b918b54c6e 100644 --- a/tests/ut/python/parallel/test_strategy_checkpoint.py +++ b/tests/ut/python/parallel/test_strategy_checkpoint.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.context import set_auto_parallel_context, reset_auto_parallel_context from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -89,7 +89,7 @@ def test_six_matmul_save(): x1 = Tensor(np.ones([32, 32]), dtype=ms.float32) x6 = Tensor(np.ones([128, 32]), dtype=ms.float32) net.set_train() - _executor.compile(net, x1, x6) + _cell_graph_executor.compile(net, x1, x6) # remove matmul2, add matmul7 @@ -153,7 +153,7 @@ def test_six_matmul_load(): x6 = Tensor(np.ones([128, 32]), dtype=ms.float32) x7 = Tensor(np.ones([32, 32]), dtype=ms.float32) net.set_train() - _executor.compile(net, x1, x6, x7) + _cell_graph_executor.compile(net, x1, x6, x7) # model_parallel test @@ -210,7 +210,7 @@ def test_six_matmul_save_auto(): x1 = Tensor(np.ones([32, 32]), dtype=ms.float32) x6 = Tensor(np.ones([128, 32]), dtype=ms.float32) net.set_train() - _executor.compile(net, x1, x6) + _cell_graph_executor.compile(net, x1, x6) # remove matmul2, add matmul7 @@ -271,4 +271,4 @@ def test_six_matmul_load_auto(): x6 = Tensor(np.ones([128, 32]), dtype=ms.float32) x7 = Tensor(np.ones([32, 32]), dtype=ms.float32) net.set_train() - _executor.compile(net, x1, x6, x7) + _cell_graph_executor.compile(net, x1, x6, x7) diff --git a/tests/ut/python/parallel/test_stridedslice.py b/tests/ut/python/parallel/test_stridedslice.py index d8ee43dea48..6532b5d351c 100644 --- a/tests/ut/python/parallel/test_stridedslice.py +++ b/tests/ut/python/parallel/test_stridedslice.py @@ -17,7 +17,7 @@ import pytest import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -72,7 +72,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x, _b) + _cell_graph_executor.compile(train_net, _x, _b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_sum_as_loss.py b/tests/ut/python/parallel/test_sum_as_loss.py index 35a5197385f..8200be98b34 100644 --- a/tests/ut/python/parallel/test_sum_as_loss.py +++ b/tests/ut/python/parallel/test_sum_as_loss.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -38,7 +38,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_sum_as_loss(): diff --git a/tests/ut/python/parallel/test_tile.py b/tests/ut/python/parallel/test_tile.py index 64731bf6dc4..a53c4fbc100 100644 --- a/tests/ut/python/parallel/test_tile.py +++ b/tests/ut/python/parallel/test_tile.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell, TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -83,7 +83,7 @@ def compile_net(net, x=_b, b=_b): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, x, b) + _cell_graph_executor.compile(train_net, x, b) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_train_and_eval.py b/tests/ut/python/parallel/test_train_and_eval.py index 2820dca5799..1af8d2c27c2 100644 --- a/tests/ut/python/parallel/test_train_and_eval.py +++ b/tests/ut/python/parallel/test_train_and_eval.py @@ -16,7 +16,7 @@ import numpy as np import mindspore as ms from mindspore import context, Tensor, Parameter -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell from mindspore.ops import operations as P @@ -60,10 +60,10 @@ def test_train_and_eval(): eval_net = EvalNet(net, strategy2=strategy2) net.set_auto_parallel() net.set_train() - _executor.compile(net, _x, _b, phase='train', auto_parallel_mode=True) + _cell_graph_executor.compile(net, _x, _b, phase='train', auto_parallel_mode=True) eval_net.set_train(mode=False) eval_net.set_auto_parallel() - _executor.compile(eval_net, _x, _b, phase='eval', auto_parallel_mode=True) + _cell_graph_executor.compile(eval_net, _x, _b, phase='eval', auto_parallel_mode=True) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_two_matmul.py b/tests/ut/python/parallel/test_two_matmul.py index 59994469d29..3505d406558 100644 --- a/tests/ut/python/parallel/test_two_matmul.py +++ b/tests/ut/python/parallel/test_two_matmul.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -50,7 +50,7 @@ class GradWrap(nn.Cell): def compile_net(net, x, y, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) # model_parallel test diff --git a/tests/ut/python/parallel/test_two_weights_parameter.py b/tests/ut/python/parallel/test_two_weights_parameter.py index cebee4ce806..8041589968d 100644 --- a/tests/ut/python/parallel/test_two_weights_parameter.py +++ b/tests/ut/python/parallel/test_two_weights_parameter.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor, Parameter, ParameterTuple from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -81,4 +81,4 @@ def test_two_weights_parameter(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, x, b) + _cell_graph_executor.compile(train_net, x, b) diff --git a/tests/ut/python/parallel/test_uniform_candidate_sampler.py b/tests/ut/python/parallel/test_uniform_candidate_sampler.py index 8e6171a69d7..405797ba2e8 100644 --- a/tests/ut/python/parallel/test_uniform_candidate_sampler.py +++ b/tests/ut/python/parallel/test_uniform_candidate_sampler.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.context as context from mindspore import Tensor, Parameter import mindspore.nn as nn -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import TrainOneStepCell, Momentum from mindspore.ops import operations as P @@ -78,7 +78,7 @@ def compile_net(net): train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() - _executor.compile(train_net, _x) + _cell_graph_executor.compile(train_net, _x) context.reset_auto_parallel_context() diff --git a/tests/ut/python/parallel/test_unsortedsegmentmax.py b/tests/ut/python/parallel/test_unsortedsegmentmax.py index b13b83cdd06..3c8242b0f76 100644 --- a/tests/ut/python/parallel/test_unsortedsegmentmax.py +++ b/tests/ut/python/parallel/test_unsortedsegmentmax.py @@ -19,7 +19,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.ops.operations.comm_ops import _VirtualDataset @@ -71,7 +71,7 @@ def compile_graph(x, y, segments, strategy1, strategy2, auto=False): context.set_auto_parallel_context(parallel_mode="auto_parallel") else: context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_UnsortedSegmentMax_model_parallel_slice_1d(): diff --git a/tests/ut/python/parallel/test_unsortedsegmentmin.py b/tests/ut/python/parallel/test_unsortedsegmentmin.py index e0fbf943a14..c4998b3879d 100644 --- a/tests/ut/python/parallel/test_unsortedsegmentmin.py +++ b/tests/ut/python/parallel/test_unsortedsegmentmin.py @@ -19,7 +19,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.ops.operations.comm_ops import _VirtualDataset @@ -71,7 +71,7 @@ def compile_graph(x, y, segments, strategy1, strategy2, auto=False): context.set_auto_parallel_context(parallel_mode="auto_parallel") else: context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_unsortedsegmentmin_model_parallel_slice_1d(): diff --git a/tests/ut/python/parallel/test_unsortedsegmentsum.py b/tests/ut/python/parallel/test_unsortedsegmentsum.py index 3085be87d62..9090ff09319 100644 --- a/tests/ut/python/parallel/test_unsortedsegmentsum.py +++ b/tests/ut/python/parallel/test_unsortedsegmentsum.py @@ -20,7 +20,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss @@ -70,7 +70,7 @@ def compile_graph(x, y, segments, strategy1, strategy2, auto=False): net = GradWrap(NetWithLoss(Net(strategy1, strategy2, segments))) net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y) + _cell_graph_executor.compile(net, x, y) def test_unsortedsegmentsum_model_parallel_slice_1d(): diff --git a/tests/ut/python/parallel/test_virtual_dataset_3_input.py b/tests/ut/python/parallel/test_virtual_dataset_3_input.py index f5e99343527..2fc5250ec51 100644 --- a/tests/ut/python/parallel/test_virtual_dataset_3_input.py +++ b/tests/ut/python/parallel/test_virtual_dataset_3_input.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn.wrap.cell_wrapper import VirtualDatasetCellTriple from mindspore.ops import composite as C from mindspore.ops import operations as P @@ -69,7 +69,7 @@ def test_virtualdataset_cell_3_inputs(): b = Tensor(np.ones([64, 2048]), dtype=ms.float32) net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) if __name__ == '__main__': diff --git a/tests/ut/python/parallel/test_virtual_dataset_with_strategy.py b/tests/ut/python/parallel/test_virtual_dataset_with_strategy.py index 100dda6296a..d163ae9d6ae 100644 --- a/tests/ut/python/parallel/test_virtual_dataset_with_strategy.py +++ b/tests/ut/python/parallel/test_virtual_dataset_with_strategy.py @@ -18,7 +18,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.ops.operations.comm_ops import _VirtualDataset @@ -78,7 +78,7 @@ class Net2(nn.Cell): def compile_net(net, x, y, b): net.set_auto_parallel() net.set_train() - _executor.compile(net, x, y, b) + _cell_graph_executor.compile(net, x, y, b) def test_virtual_dataset_model_parallel_semi_auto_parallel(): diff --git a/tests/ut/python/parallel/test_virtual_output.py b/tests/ut/python/parallel/test_virtual_output.py index 3d2067cc287..fc5031abdd0 100644 --- a/tests/ut/python/parallel/test_virtual_output.py +++ b/tests/ut/python/parallel/test_virtual_output.py @@ -19,7 +19,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.ops import operations as P from mindspore.common.parameter import Parameter @@ -118,15 +118,15 @@ class ParallelMulNet(nn.Cell): def compile_graph(x, net): net.set_auto_parallel() net.set_train(False) - _executor.compile(net, x, auto_parallel_mode=True) - strategies = _executor._get_shard_strategy(net) + _cell_graph_executor.compile(net, x, auto_parallel_mode=True) + strategies = _cell_graph_executor._get_shard_strategy(net) return strategies def compile_graph_two_input(x, y, net): net.set_auto_parallel() net.set_train(False) - _executor.compile(net, x, y, auto_parallel_mode=True) - strategies = _executor._get_shard_strategy(net) + _cell_graph_executor.compile(net, x, y, auto_parallel_mode=True) + strategies = _cell_graph_executor._get_shard_strategy(net) return strategies diff --git a/tests/ut/python/pipeline/parse/test_fix_bug.py b/tests/ut/python/pipeline/parse/test_fix_bug.py index 59e5fdd5de6..48faa67019d 100644 --- a/tests/ut/python/pipeline/parse/test_fix_bug.py +++ b/tests/ut/python/pipeline/parse/test_fix_bug.py @@ -21,7 +21,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.common import dtype as ms -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor class assignment1_Net(nn.Cell): @@ -63,7 +63,7 @@ def assignment_operator_base(number): net = assignment1_Net(x) else: net = assignment2_Net(x) - _executor.compile(net, input_me) + _cell_graph_executor.compile(net, input_me) def test_ME_assignment_operator_0010(): @@ -95,7 +95,7 @@ def test_compile_unspported(): input_me = Tensor(input_np) net = unsupported_method_net() with pytest.raises(RuntimeError): - _executor.compile(net, input_me) + _cell_graph_executor.compile(net, input_me) def test_parser_map_0002(): diff --git a/tests/ut/python/pipeline/parse/test_list.py b/tests/ut/python/pipeline/parse/test_list.py index f86c844e087..67deb132e13 100644 --- a/tests/ut/python/pipeline/parse/test_list.py +++ b/tests/ut/python/pipeline/parse/test_list.py @@ -17,7 +17,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.api import _executor +from mindspore.common.api import _cell_graph_executor from mindspore.nn import Cell @@ -37,11 +37,11 @@ def test_list1(): input_np = np.random.randn(2, 3, 4, 5).astype(np.float32) input_me = Tensor(input_np) net = Net1([1]) - _executor.compile(net, input_me) + _cell_graph_executor.compile(net, input_me) def test_list2(): input_np = np.random.randn(2, 3, 4, 5).astype(np.float32) input_me = Tensor(input_np) net = Net1([1, 2]) - _executor.compile(net, input_me) + _cell_graph_executor.compile(net, input_me) diff --git a/tests/ut/python/pipeline/parse/test_parse.py b/tests/ut/python/pipeline/parse/test_parse.py index 4b7ebb59fbf..9e942d48004 100644 --- a/tests/ut/python/pipeline/parse/test_parse.py +++ b/tests/ut/python/pipeline/parse/test_parse.py @@ -28,7 +28,7 @@ from mindspore import Tensor from mindspore import context from mindspore.ops import composite as C from mindspore.ops import operations as P -from mindspore.common.api import ms_function, _executor +from mindspore.common.api import ms_function, _cell_graph_executor from mindspore.ops._grad.grad_base import bprop_getters from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer from mindspore.ops.functional import tensor_add @@ -107,7 +107,7 @@ def test_var_parameter_case2(): np2 = np.random.randn(2, 3, 4, 5).astype(np.float32) input2 = ms.Tensor(np2) - _executor.compile(net, input_data, input1, input2) + _cell_graph_executor.compile(net, input_data, input1, input2) # Test case: test the global flag