From ba66c0d491b322145ee56a203f9dc07b807d586c Mon Sep 17 00:00:00 2001 From: huanghui Date: Thu, 22 Jul 2021 15:48:12 +0800 Subject: [PATCH] add security isolate for save_graphs --- .../ascend/ascend_backend_optimization.cc | 16 +++- .../common/common_backend_optimization.cc | 10 +++ .../backend/optimizer/common/pass_manager.cc | 4 + .../backend/session/ascend_auto_monad.cc | 8 ++ .../ccsrc/backend/session/ascend_session.cc | 18 ++++- .../ccsrc/backend/session/gpu_session.cc | 8 +- .../ccsrc/backend/session/session_basic.cc | 2 + mindspore/ccsrc/debug/trace.cc | 4 +- .../ccsrc/frontend/optimizer/ad/dfunctor.cc | 2 + mindspore/ccsrc/frontend/optimizer/ad/grad.cc | 6 ++ .../ccsrc/frontend/optimizer/ad/kpynative.cc | 2 + mindspore/ccsrc/frontend/optimizer/opt.cc | 3 +- .../ccsrc/frontend/optimizer/optimizer.h | 2 + .../parallel/graph_util/graph_info.cc | 2 + .../frontend/parallel/step_auto_parallel.cc | 3 +- mindspore/ccsrc/pipeline/jit/action.cc | 2 + mindspore/ccsrc/pipeline/jit/pipeline.cc | 8 +- mindspore/ccsrc/pipeline/jit/pipeline_ge.cc | 6 +- .../pipeline/pynative/pynative_execute.cc | 4 + .../device/ascend/tasksink/task_generator.cc | 2 +- .../ccsrc/runtime/framework/graph_compiler.cc | 6 +- mindspore/ccsrc/transform/graph_ir/convert.cc | 2 + mindspore/ccsrc/transform/graph_ir/convert.h | 5 ++ mindspore/core/utils/ms_context.cc | 6 ++ mindspore/core/utils/ms_context.h | 10 +++ tests/st/auto_monad/test_auto_monad.py | 10 +-- tests/st/auto_monad/test_auto_monad_gpu.py | 13 ++-- .../st/control/inner/test_001_single_while.py | 2 +- .../st/control/inner/test_011_if_in_while.py | 2 +- .../inner/test_011_if_in_while_break.py | 2 +- tests/st/control/inner/test_012_if_in_for.py | 2 +- .../control/inner/test_012_if_in_for_break.py | 2 +- .../st/control/inner/test_020_while_in_if.py | 2 +- .../inner/test_021_while_while_normal.py | 2 +- .../inner/test_022_for_while_normal.py | 2 +- .../control/inner/test_101_if_after_while.py | 2 +- .../inner/test_111_if_after_if_in_while.py | 2 +- .../inner/test_120_if_after_while_in_if.py | 2 +- .../inner/test_121_if_after_while_in_while.py | 2 +- .../inner/test_122_if_after_while_in_for.py | 2 +- .../control/inner/test_200_while_after_if.py | 2 +- .../st/control/inner/test_201_for_n_while.py | 2 +- .../control/inner/test_202_while_n_while.py | 2 +- .../inner/test_210_while_after_if_in_if.py | 2 +- .../inner/test_211_while_after_if_in_while.py | 2 +- .../inner/test_212_while_after_if_in_for.py | 2 +- .../inner/test_220_while_after_while_in_if.py | 2 +- .../inner/test_221_while_while_while.py | 2 +- .../control/inner/test_222_for_while_while.py | 2 +- .../inner/test_230_while_after_for_in_if.py | 2 +- .../control/inner/test_231_while_for_while.py | 2 +- .../control/inner/test_232_for_for_while.py | 2 +- .../inner/test_301_while_normal_for.py | 2 +- .../st/control/inner/test_311_while_if_for.py | 2 +- .../inner/test_321_while_while_in_while.py | 2 +- tests/st/control/test_cont_grad.py | 5 -- tests/st/control/test_if_mindir.py | 2 +- tests/st/control/test_while_grad.py | 2 +- tests/st/fl/albert/cloud_train.py | 2 +- .../src/model_utils/moxing_adapter.py | 2 - .../test_fl_fasterrcnn.py | 2 +- .../test_cross_silo_femnist.py | 2 +- .../cross_silo_lenet/test_cross_silo_lenet.py | 2 +- .../hybrid_lenet/test_hybrid_train_lenet.py | 2 +- tests/st/fl/mobile/test_mobile_lenet.py | 2 +- ...st_ub_fusion_matmul_confusion_transpose.py | 1 - .../test_unsorted_segment_sum_fission.py | 1 - tests/st/gnn/gcn/test_gcn.py | 3 +- .../st/heterogeneous_excutor/test_control.py | 1 - tests/st/mix_precision/test_mix_precision.py | 4 +- .../train_and_test_multinpu_ci.py | 2 +- ...rain_and_test_multinpu_ci_data_parallel.py | 2 +- .../bert/bert_performance/test_bert_thor.py | 2 +- .../models/resnet50/test_resnet50_imagenet.py | 6 +- tests/st/ops/ascend/test_ops_infer.py | 2 +- tests/st/ops/cpu/test_cache_ops.py | 3 +- tests/st/ops/cpu/test_l1_regularizer_op.py | 2 +- tests/st/ops/cpu/test_masked_select_op.py | 4 +- tests/st/ops/cpu/test_scatter_nd_update_op.py | 2 +- tests/st/ops/cpu/test_smoothl1loss_op.py | 4 +- tests/st/ops/gpu/test_adam_fusion.py | 2 +- tests/st/ops/gpu/test_maximum_op.py | 4 +- tests/st/ops/gpu/test_minimum_op.py | 8 +- tests/st/ops/gpu/test_relu_v2.py | 6 +- tests/st/ops/gpu/test_smoothl1loss_op.py | 4 +- .../graph_kernel/test_cast_matmul_fusion.py | 2 +- .../probability/bnn_layers/test_bnn_layer.py | 2 +- tests/st/probability/dpn/test_gpu_svi_cvae.py | 2 +- tests/st/probability/dpn/test_gpu_svi_vae.py | 2 +- tests/st/probability/dpn/test_gpu_vae_gan.py | 2 +- .../transforms/test_transform_bnn_layer.py | 2 +- .../transforms/test_transform_bnn_model.py | 2 +- tests/st/tbe_networks/test_resnet_cifar_8p.py | 3 +- tests/syntax/simple_expression/test_assign.py | 2 +- tests/syntax/simple_expression/test_call.py | 2 +- .../syntax/simple_expression/test_compare.py | 2 +- tests/syntax/simple_expression/test_invert.py | 2 +- .../simple_expression/test_parse_exception.py | 2 +- tests/ut/cpp/ir/clone_test.cc | 3 - .../cpp/operator/grad_implementations_test.cc | 1 - tests/ut/cpp/optimizer/ad/kpynative_test.cc | 6 -- tests/ut/cpp/optimizer/clean_test.cc | 17 ---- tests/ut/cpp/optimizer/lib_test.cc | 7 +- tests/ut/cpp/optimizer/opt_test.cc | 5 -- tests/ut/cpp/optimizer/optimizer_test.cc | 3 - tests/ut/cpp/parallel/step_parallel_test.cc | 1 - .../pipeline/parse/parser_abnormal_test.cc | 16 ---- .../cpp/pipeline/parse/parser_class_test.cc | 17 ---- .../pipeline/parse/parser_integrate_test.cc | 1 - .../pipeline/parse/parser_primitive_test.cc | 43 ---------- tests/ut/cpp/pipeline/parse/parser_test.cc | 16 ---- tests/ut/cpp/pipeline/parse/resolve_test.cc | 17 ---- .../static_analysis/evaluator_test.cc | 2 - .../cpp/pipeline/static_analysis/prim_test.cc | 3 - .../static_analysis/specialize_test.cc | 9 --- tests/ut/cpp/transform/convert_test.cc | 78 ------------------- tests/ut/cpp/transform/graph_runner_test.cc | 19 +++-- tests/ut/python/automl/test_case.py | 2 +- tests/ut/python/dtype/test_tuple.py | 2 +- tests/ut/python/nn/test_l1_regularizer.py | 2 +- tests/ut/python/ops/test_control_ops.py | 2 +- tests/ut/python/ops/test_dynamic_shape.py | 2 +- tests/ut/python/ops/test_nn_ops.py | 4 +- tests/ut/python/ops/test_ops.py | 2 +- tests/ut/python/ops/test_ops_attr_infer.py | 2 +- tests/ut/python/ops/test_tensor_getitem.py | 16 ++-- tests/ut/python/ops/test_tensor_slice.py | 2 +- tests/ut/python/optimizer/test_auto_grad.py | 6 +- .../optimizer/test_while_ScatterNdUpdate.py | 2 +- tests/ut/python/parallel/__init__.py | 2 +- tests/ut/python/parallel/conftest.py | 2 +- tests/ut/python/parallel/test_alltoall.py | 3 +- .../test_auto_parallel_double_subgraphs.py | 2 - .../parallel/test_auto_parallel_for_loop.py | 1 - ...t_auto_parallel_for_loop_multi_subgraph.py | 1 - .../test_auto_parallel_for_loop_reshape.py | 1 - .../test_auto_parallel_for_loop_simplify.py | 1 - .../parallel/test_auto_parallel_two_bn.py | 1 - tests/ut/python/parallel/test_batchmm.py | 2 +- tests/ut/python/parallel/test_broadcast_to.py | 4 +- tests/ut/python/parallel/test_concat.py | 1 - .../python/parallel/test_embeddinglookup.py | 1 - tests/ut/python/parallel/test_eval.py | 4 +- tests/ut/python/parallel/test_full_batch.py | 4 +- .../parallel/test_gather_v2_primitive.py | 2 +- tests/ut/python/parallel/test_gathernd.py | 1 - .../python/parallel/test_gathernd_further.py | 1 - .../python/parallel/test_loss_and_o2_level.py | 1 - tests/ut/python/parallel/test_loss_scale.py | 2 +- .../parallel/test_manual_embedding_lookup.py | 3 - .../python/parallel/test_manual_gatherv2.py | 2 - .../python/parallel/test_model_with_loss.py | 1 - .../parallel/test_model_without_loss.py | 1 - .../python/parallel/test_neighborexchange.py | 2 +- tests/ut/python/parallel/test_o2_level.py | 1 - tests/ut/python/parallel/test_onehot_2dim.py | 2 +- tests/ut/python/parallel/test_pack.py | 8 +- tests/ut/python/parallel/test_parallel_moe.py | 3 +- .../python/parallel/test_parameter_merge.py | 1 - tests/ut/python/parallel/test_range.py | 1 - tests/ut/python/parallel/test_reluv2.py | 2 +- .../ut/python/parallel/test_repeated_calc.py | 2 - .../python/parallel/test_reshape_optimized.py | 1 - .../test_reshape_skip_redistribution.py | 1 - .../ut/python/parallel/test_scatter_update.py | 6 +- tests/ut/python/parallel/test_select.py | 1 - tests/ut/python/parallel/test_split.py | 4 +- tests/ut/python/parallel/test_stridedslice.py | 1 - tests/ut/python/parallel/test_tile.py | 1 - tests/ut/python/parallel/test_topk.py | 1 - .../ut/python/parallel/test_train_and_eval.py | 2 +- tests/ut/python/parallel/test_two_matmul.py | 2 - .../test_uniform_candidate_sampler.py | 2 +- .../parallel/test_virtual_dataset_3_input.py | 1 - .../parameter_feature/test_parameter.py | 2 +- .../python/parameter_feature/test_var_grad.py | 2 +- .../python/pipeline/infer/test_auto_monad.py | 2 +- .../parse/test_call_innetr_net_attr.py | 2 +- .../parse/test_grammar_constraints.py | 1 - ...ed_name_or_unsupported_builtin_function.py | 2 +- .../python/pipeline/parse/test_while_param.py | 6 +- tests/ut/python/pynative_mode/test_backend.py | 3 +- tests/ut/python/pynative_mode/test_context.py | 19 +++++ .../pynative_mode/test_insert_grad_of.py | 2 +- .../pynative_mode/test_multigraph_sink.py | 2 +- .../pynative_mode/test_tuple_parameter.py | 3 +- tests/ut/python/utils/test_initializer.py | 2 +- 187 files changed, 310 insertions(+), 468 deletions(-) diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc b/mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc index d211ec3e1cc..d4998c40d80 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc @@ -275,12 +275,14 @@ void AscendMixPrecision(const std::shared_ptr &kernel_grap void AscendBackendIRFusionOptimization(const std::shared_ptr &kernel_graph) { auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); +#ifdef ENABLE_DUMP_IR bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); if (save_graphs) { std::string file_name = "hwopt_d_ir_fusion_before_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir"; DumpIR(file_name, kernel_graph); DumpIRProto(kernel_graph, "before_hwopt_" + std::to_string(kernel_graph->graph_id())); } +#endif auto optimizer = std::make_shared(); auto ir_fusion_pm = std::make_shared("ir_fusion_pm"); ir_fusion_pm->AddPass(std::make_shared()); @@ -307,10 +309,12 @@ void AscendBackendIRFusionOptimization(const std::shared_ptrAddPassManager(ir_fusion_pm); (void)optimizer->Optimize(kernel_graph); kernel_graph->SetExecOrderByDefault(); +#ifdef ENABLE_DUMP_IR if (save_graphs) { std::string file_name = "hwopt_d_ir_fusion_after_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir"; DumpIR(file_name, kernel_graph); } +#endif } void RunOpAscendBackendIRFusionOptimization(const std::shared_ptr &kernel_graph) { @@ -320,10 +324,12 @@ void RunOpAscendBackendIRFusionOptimization(const std::shared_ptrget_param(MS_CTX_SAVE_GRAPHS_FLAG); if (save_graphs) { DumpIR("hwopt_d_ir_fusion_before.ir", kernel_graph); } +#endif auto optimizer = std::make_shared(); auto ir_fusion_pm = std::make_shared("ir_fusion_pm"); ir_fusion_pm->AddPass(std::make_shared()); @@ -353,9 +359,11 @@ void RunOpAscendBackendIRFusionOptimization(const std::shared_ptrAddPassManager(ir_fusion_pm); (void)optimizer->Optimize(kernel_graph); kernel_graph->SetExecOrderByDefault(); +#ifdef ENABLE_DUMP_IR if (save_graphs) { DumpIR("hwopt_d_ir_fusion_after.ir", kernel_graph); } +#endif } void RunOpAscendBackendOptimization(const std::shared_ptr &kernel_graph) { @@ -375,11 +383,13 @@ void RunOpAscendBackendOptimization(const std::shared_ptr void AscendBackendOptimization(const std::shared_ptr &kernel_graph) { auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); +#ifdef ENABLE_DUMP_IR bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); if (save_graphs) { std::string file_name = "hwopt_d_before_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir"; DumpIR(file_name, kernel_graph); } +#endif // data layout optimization AscendDataLayout(kernel_graph); // mixed precision optimization @@ -425,13 +435,13 @@ void AscendBackendOptimization(const std::shared_ptr &kern const std::vector &exec_order = kernel_graph->execution_order(); std::string exec_order_name = "graph_exec_order." + std::to_string(kernel_graph->graph_id()); (void)mindspore::RDR::RecordGraphExecOrder(SubModuleId::SM_OPTIMIZER, exec_order_name, exec_order); -#endif if (save_graphs) { std::string file_name = "hwopt_d_end_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir"; DumpIR(file_name, kernel_graph, true, kWholeStack); DumpIRProto(kernel_graph, "after_hwopt_" + std::to_string(kernel_graph->graph_id())); kernel_graph->DumpFuncGraph("hwopt_d_end"); } +#endif } void AscendBackendUBFusionOptimization(const std::shared_ptr &kernel_graph) { @@ -454,11 +464,13 @@ void AscendBackendUBFusionOptimization(const std::shared_ptrget_param(MS_CTX_SAVE_GRAPHS_FLAG); if (save_graphs) { std::string file_name = "hwopt_d_ub_fusion_before_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir"; DumpIR(file_name, kernel_graph); } +#endif auto fusion_id_allocator = std::make_shared(); MS_EXCEPTION_IF_NULL(fusion_id_allocator); fusion_id_allocator->Init(); @@ -487,10 +499,12 @@ void AscendBackendUBFusionOptimization(const std::shared_ptrAddPassManager(ub_fusion_pm); (void)optimizer->Optimize(kernel_graph); kernel_graph->SetExecOrderByDefault(); +#ifdef ENABLE_DUMP_IR if (save_graphs) { std::string file_name = "hwopt_d_ub_fusion_after_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir"; DumpIR(file_name, kernel_graph); } +#endif } } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/backend/optimizer/common/common_backend_optimization.cc b/mindspore/ccsrc/backend/optimizer/common/common_backend_optimization.cc index 67f72911cc1..0690305ccda 100644 --- a/mindspore/ccsrc/backend/optimizer/common/common_backend_optimization.cc +++ b/mindspore/ccsrc/backend/optimizer/common/common_backend_optimization.cc @@ -35,6 +35,7 @@ namespace opt { void BackendCommonOptimization(const std::shared_ptr &kernel_graph) { MS_EXCEPTION_IF_NULL(kernel_graph); MS_LOG(INFO) << "start common opt graph:" << kernel_graph->graph_id(); +#ifdef ENABLE_DUMP_IR auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); @@ -42,6 +43,7 @@ void BackendCommonOptimization(const std::shared_ptr &kern std::string file_name = "hwopt_common_before_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir"; DumpIR(file_name, kernel_graph); } +#endif auto optimizer = std::make_shared(); auto common_pm = std::make_shared("common_pm"); common_pm->AddPass(std::make_shared()); @@ -55,10 +57,12 @@ void BackendCommonOptimization(const std::shared_ptr &kern optimizer->AddPassManager(common_pm); (void)optimizer->Optimize(kernel_graph); kernel_graph->SetExecOrderByDefault(); +#ifdef ENABLE_DUMP_IR if (save_graphs) { std::string file_name = "hwopt_common_after_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir"; DumpIR(file_name, kernel_graph); } +#endif } void CommonFinalOptimization(const std::shared_ptr &kernel_graph) { @@ -70,6 +74,7 @@ void CommonFinalOptimization(const std::shared_ptr &kernel optimizer->AddPassManager(pm); (void)optimizer->Optimize(kernel_graph); kernel_graph->SetExecOrderByDefault(); +#ifdef ENABLE_DUMP_IR // Dump IR if save_graphs is set. auto context = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context); @@ -78,11 +83,13 @@ void CommonFinalOptimization(const std::shared_ptr &kernel std::string filename = "hwopt_common_final_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir"; DumpIR(filename, kernel_graph); } +#endif } void CommonUnifyMindIROptimization(const std::shared_ptr &kernel_graph) { MS_EXCEPTION_IF_NULL(kernel_graph); MS_LOG(INFO) << "start common unify mindir opt graph:" << kernel_graph->graph_id(); +#ifdef ENABLE_DUMP_IR auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); @@ -91,16 +98,19 @@ void CommonUnifyMindIROptimization(const std::shared_ptr & "hwopt_common_unify_mindir_before_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir"; DumpIR(file_name, kernel_graph); } +#endif auto opt = std::make_shared(); auto pm = std::make_shared("common_unify_mindir_pm"); pm->AddPass(std::make_shared()); opt->AddPassManager(pm); (void)opt->Optimize(kernel_graph); kernel_graph->SetExecOrderByDefault(); +#ifdef ENABLE_DUMP_IR if (save_graphs) { std::string file_name = "hwopt_common_unify_mindir_after_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir"; DumpIR(file_name, kernel_graph); } +#endif } } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/backend/optimizer/common/pass_manager.cc b/mindspore/ccsrc/backend/optimizer/common/pass_manager.cc index 8c38ca34457..6e876ba3380 100644 --- a/mindspore/ccsrc/backend/optimizer/common/pass_manager.cc +++ b/mindspore/ccsrc/backend/optimizer/common/pass_manager.cc @@ -126,6 +126,7 @@ std::string PassManager::GetPassFullname(size_t pass_id, const PassPtr &pass) co } void PassManager::DumpPassIR(const FuncGraphPtr &func_graph, const std::string &pass_fullname) const { +#ifdef ENABLE_DUMP_IR auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); @@ -137,6 +138,7 @@ void PassManager::DumpPassIR(const FuncGraphPtr &func_graph, const std::string & oss << pass_fullname + ".ir"; DumpIR(oss.str(), func_graph, true); } +#endif } bool PassManager::Run(const FuncGraphPtr &func_graph, const std::vector &passes) const { @@ -149,7 +151,9 @@ bool PassManager::Run(const FuncGraphPtr &func_graph, const std::vector if (pass != nullptr) { pass->SetCacheManager(cache_manager_); changed = RunPass(func_graph, num, pass) || changed; +#ifdef ENABLE_DUMP_IR DumpPassIR(func_graph, GetPassFullname(num, pass)); +#endif num++; } } diff --git a/mindspore/ccsrc/backend/session/ascend_auto_monad.cc b/mindspore/ccsrc/backend/session/ascend_auto_monad.cc index 2b2b5d35be1..64b6a91c35d 100644 --- a/mindspore/ccsrc/backend/session/ascend_auto_monad.cc +++ b/mindspore/ccsrc/backend/session/ascend_auto_monad.cc @@ -58,6 +58,7 @@ const char OUTPUT[] = "output"; // Attribute to indicate that the node is last node in an iteration. const char ITEREND[] = "PROFILING_ITER_END"; +#ifdef ENABLE_DUMP_IR bool IsSaveGraph() { auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); @@ -85,8 +86,10 @@ void DumpGraphForDebug(NotNull kg) { DumpAllGraphs(kg, &memo); } } +#endif void DumpExecuteOrder(NotNull kg) { +#ifndef ENABLE_SECURITY if (!IsSaveGraph()) { return; } @@ -135,6 +138,7 @@ void DumpExecuteOrder(NotNull kg) { index++; } fout.close(); +#endif } // Return kNoLabel when label id attribute not set for the graph. @@ -1859,7 +1863,9 @@ void AscendAutoMonad::Run() { kernel_graph_->set_recursive_call(context.HasRecursiveCall()); kernel_graph_->set_subgraph_multi_call(context.HasSubgraphMultiCall()); MS_LOG(DEBUG) << "Ascend auto-monad finish."; +#ifdef ENABLE_DUMP_IR DumpGraphForDebug(kernel_graph_); +#endif } void AscendAutoMonad::GenerateExecuteOrder() { @@ -1868,7 +1874,9 @@ void AscendAutoMonad::GenerateExecuteOrder() { ExecuteOrderGenerator generator(context, kernel_graph_.get()); generator.Run(); MS_LOG(DEBUG) << "Ascend generate execute order finish."; +#ifndef ENABLE_SECURITY DumpExecuteOrder(kernel_graph_); +#endif } } // namespace session } // namespace mindspore diff --git a/mindspore/ccsrc/backend/session/ascend_session.cc b/mindspore/ccsrc/backend/session/ascend_session.cc index c2cb0033ed0..7b27f081fe7 100644 --- a/mindspore/ccsrc/backend/session/ascend_session.cc +++ b/mindspore/ccsrc/backend/session/ascend_session.cc @@ -337,12 +337,14 @@ void AscendSession::UnifyMindIR(const KernelGraphPtr &graph) { SessionBasic::UnifyMindIR(graph); auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); +#ifdef ENABLE_DUMP_IR bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); if (save_graphs) { std::string file_name = "hwopt_d_before_unify_mindir_graph_" + std::to_string(graph->graph_id()) + ".ir"; DumpIR(file_name, graph); DumpIRProto(graph, "before_unify_mindir_hwopt_" + std::to_string(graph->graph_id())); } +#endif auto optimizer = std::make_shared(); auto unify_mindir_pm = std::make_shared("unify_mindir_pm"); unify_mindir_pm->AddPass(std::make_shared()); @@ -384,10 +386,12 @@ void AscendSession::UnifyMindIR(const KernelGraphPtr &graph) { optimizer->AddPassManager(unify_mindir_pm); (void)optimizer->Optimize(graph); graph->SetExecOrderByDefault(); +#ifdef ENABLE_DUMP_IR if (save_graphs) { std::string file_name = "hwopt_d_after_unify_mindir_graph_" + std::to_string(graph->graph_id()) + ".ir"; DumpIR(file_name, graph); } +#endif } void AscendSession::LoadInputData(const std::shared_ptr &kernel_graph, @@ -533,6 +537,7 @@ GraphId AscendSession::CompileGraphImpl(NotNull func_graph) { AnfAlgo::InsertMakeTupleForOutput(NOT_NULL(root_graph)); // root root_graph valiate,include genearte execute order and so on RootGraphExecutorValidate(NOT_NULL(root_graph)); +#ifdef ENABLE_DUMP_IR // dump graph before remove nop nodes auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); @@ -540,6 +545,7 @@ GraphId AscendSession::CompileGraphImpl(NotNull func_graph) { if (save_graphs) { DumpIRProto(root_graph, "before_removeNop_" + std::to_string(graph_sum_)); } +#endif // adjust kernel AdjustKernel(root_graph); @@ -659,6 +665,7 @@ void AscendSession::CompileChildGraph(const KernelGraphPtr &child_graph) { MS_LOG(INFO) << "CompileChildGraph " << child_graph->ToString(); opt::AscendBackendIRFusionOptimization(child_graph); child_graph->SetExecOrderByDefault(); +#ifdef ENABLE_DUMP_IR auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); @@ -666,12 +673,15 @@ void AscendSession::CompileChildGraph(const KernelGraphPtr &child_graph) { std::string file_name = "select_kernel_before_graph_" + std::to_string(child_graph->graph_id()) + ".ir"; DumpIR(file_name, child_graph); } +#endif // select kernel build info SelectKernel(*child_graph); +#ifdef ENABLE_DUMP_IR if (save_graphs) { std::string file_name = "select_kernel_after_graph_" + std::to_string(child_graph->graph_id()) + ".ir"; DumpIR(file_name, child_graph); } +#endif // optimize graph HardwareOptimize(child_graph); // assign static memory of parameters @@ -1197,12 +1207,14 @@ void AscendSession::AdjustKernel(const std::shared_ptr &kernel_grap BuildKernel(kernel_graph); device::ascend::KernelBuildPreprocess(kernel_graph.get()); device::KernelAdjust::GetInstance().InsertSwitchLoop(kernel_graph); +#ifdef ENABLE_DUMP_IR auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); if (save_graphs) { DumpIR("after_adjust_kernel.ir", kernel_graph); } +#endif MS_LOG(INFO) << "Finish!"; } @@ -1678,7 +1690,7 @@ void AscendSession::IrFusionPass(const NotNull graph, NotNullinsert(graph.get()); opt::AscendBackendIRFusionOptimization(graph); graph->SetExecOrderByDefault(); - +#ifdef ENABLE_DUMP_IR auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); @@ -1686,6 +1698,7 @@ void AscendSession::IrFusionPass(const NotNull graph, NotNullgraph_id()) + ".ir"; DumpIR(file_name, graph.get()); } +#endif for (auto &child_graph : graph->child_graph_order()) { IrFusionPass(NOT_NULL(child_graph.lock()), memo); @@ -1744,7 +1757,7 @@ void AscendSession::RecurseSelectKernelInfo(NotNull graph, (*reduce_precision_count)++; } } - +#ifdef ENABLE_DUMP_IR auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); @@ -1752,6 +1765,7 @@ void AscendSession::RecurseSelectKernelInfo(NotNull graph, std::string file_name = "select_kernel_after_graph_" + std::to_string(graph->graph_id()) + ".ir"; DumpIR(file_name, graph.get()); } +#endif MS_LOG(INFO) << "Finish selecting kernel info in graph: " << graph->graph_id(); } diff --git a/mindspore/ccsrc/backend/session/gpu_session.cc b/mindspore/ccsrc/backend/session/gpu_session.cc index 2f4eebc6458..f5f757d94e6 100644 --- a/mindspore/ccsrc/backend/session/gpu_session.cc +++ b/mindspore/ccsrc/backend/session/gpu_session.cc @@ -401,17 +401,19 @@ GraphId GPUSession::CompileGraphImpl(KernelGraphPtr graph) { // Prepare ms context info for dump .pb graph auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); - bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_); MS_EXCEPTION_IF_NULL(runtime_instance); #ifndef ENABLE_SECURITY auto &json_parser = DumpJsonParser::GetInstance(); json_parser.Parse(); #endif +#ifdef ENABLE_DUMP_IR + bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); // Dump .pb graph before graph optimization if (save_graphs) { DumpIRProto(graph, "before_opt_" + std::to_string(graph->graph_id())); } +#endif // Graph optimization irrelevant to device data format Optimize(graph); // Select kernel build info @@ -429,10 +431,12 @@ GraphId GPUSession::CompileGraphImpl(KernelGraphPtr graph) { #endif // Assign CUDA streams AssignStream(graph); +#ifdef ENABLE_DUMP_IR // Dump .pb graph before remove nop nodes if (save_graphs) { DumpIRProto(graph, "before_removeNop_" + std::to_string(graph->graph_id())); } +#endif // Update Graph Dynamic Shape Attr. UpdateGraphDynamicShapeAttr(NOT_NULL(graph)); graph->UpdateGraphDynamicAttr(); @@ -454,9 +458,11 @@ GraphId GPUSession::CompileGraphImpl(KernelGraphPtr graph) { // Get summary nodes. SetSummaryNodes(graph.get()); // Dump .pb graph after graph optimization +#ifdef ENABLE_DUMP_IR if (save_graphs) { DumpIRProto(graph, "after_opt_" + std::to_string(graph->graph_id())); } +#endif #ifndef ENABLE_SECURITY if (json_parser.e2e_dump_enabled()) { graph->set_root_graph_id(graph->graph_id()); diff --git a/mindspore/ccsrc/backend/session/session_basic.cc b/mindspore/ccsrc/backend/session/session_basic.cc index 216a2c9b997..7f34c9bf281 100644 --- a/mindspore/ccsrc/backend/session/session_basic.cc +++ b/mindspore/ccsrc/backend/session/session_basic.cc @@ -1566,7 +1566,9 @@ std::shared_ptr SessionBasic::ConstructKernelGraph(const FuncGraphP } // Create cnode if (!CreateCNodeOfKernelGraph(node, graph.get())) { +#ifdef ENABLE_DUMP_IR DumpIR("construct_kernel_graph_fail.ir", func_graph); +#endif MS_LOG(EXCEPTION) << "Construct func graph " << func_graph->ToString() << " failed." << trace::DumpSourceLines(node); } diff --git a/mindspore/ccsrc/debug/trace.cc b/mindspore/ccsrc/debug/trace.cc index 92ee04c7446..e2246e3be26 100644 --- a/mindspore/ccsrc/debug/trace.cc +++ b/mindspore/ccsrc/debug/trace.cc @@ -427,12 +427,14 @@ void GetEvalStackInfo(std::ostringstream &oss) { MS_LOG(INFO) << "Length of analysis information stack is empty."; return; } + oss << "\nThe function call stack"; +#ifndef ENABLE_SECURITY std::string file_name = GetEvalFailDatPath(); auto ret = OutputAnalyzedGraphWithType(file_name); - oss << "\nThe function call stack"; if (ret) { oss << " (See file '" << file_name << "' for more details)"; } +#endif oss << ":\n"; int index = 0; diff --git a/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.cc b/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.cc index 42fbefbf0de..335fc9ed159 100644 --- a/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.cc +++ b/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.cc @@ -761,7 +761,9 @@ CNodePtr GetJUser(const NodeUsersMap &node_user_map, const CNodePtr &cnode, int for (auto &user : j_users) { user_info << " user: " << user.first->DebugString() << ", index: " << user.second << "\n"; } +#ifdef ENABLE_DUMP_IR DumpIR("J_User_Ex_" + cnode->func_graph()->ToString() + ".ir", cnode->func_graph()); +#endif MS_LOG(EXCEPTION) << "Incorrect J CNode user size: " << size << ", of {" << cnode->DebugString(2) << "/" << index << "}\nUser Info:\n" << user_info.str(); diff --git a/mindspore/ccsrc/frontend/optimizer/ad/grad.cc b/mindspore/ccsrc/frontend/optimizer/ad/grad.cc index 7adb1f5800f..2fda88c57db 100644 --- a/mindspore/ccsrc/frontend/optimizer/ad/grad.cc +++ b/mindspore/ccsrc/frontend/optimizer/ad/grad.cc @@ -42,22 +42,28 @@ FuncGraphPtr PartialEliminateOptPass(const ResourcePtr &resource, const FuncGrap } FuncGraphPtr LiftFv(const pipeline::ResourceBasePtr &resource, const FuncGraphPtr &func_graph) { +#ifdef ENABLE_DUMP_IR bool save_graphs_flag = MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG); if (save_graphs_flag) { DumpIR("before_lift_" + func_graph->ToString() + ".ir", func_graph); } +#endif FuncGraphPtr new_fg = LiftingClone(func_graph); +#ifdef ENABLE_DUMP_IR if (save_graphs_flag) { DumpIR("after_lift_" + new_fg->ToString() + ".ir", new_fg); } +#endif auto new_res = std::dynamic_pointer_cast(resource); if (new_res == nullptr) { MS_LOG(EXCEPTION) << "Parameter resources is not a pipeline::Resource"; } auto opt_fg = PartialEliminateOptPass(new_res, new_fg); +#ifdef ENABLE_DUMP_IR if (save_graphs_flag) { DumpIR("after_opt_" + opt_fg->ToString() + ".ir", opt_fg); } +#endif return opt_fg; } } // namespace diff --git a/mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc b/mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc index d4abe16645d..0180384cf9a 100644 --- a/mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc +++ b/mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc @@ -365,10 +365,12 @@ FuncGraphPtr KPynativeCellImpl::Finish(const AnfNodePtrList &weights, bool grad_ SetOutput(weights, grad_inputs, grad_weights); // Replace Parameter of primal funcgraph with parameter of tape_; ReplacePrimalParameter(weights, has_sens_arg); +#ifdef ENABLE_DUMP_IR auto save_graphs_flg = MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG); if (save_graphs_flg) { DumpIR("before_final_opt.ir", tape_); } +#endif return tape_; } diff --git a/mindspore/ccsrc/frontend/optimizer/opt.cc b/mindspore/ccsrc/frontend/optimizer/opt.cc index 30ec46304b3..ba045d00621 100644 --- a/mindspore/ccsrc/frontend/optimizer/opt.cc +++ b/mindspore/ccsrc/frontend/optimizer/opt.cc @@ -299,7 +299,7 @@ bool SubstitutionList::ApplySubstitutionsToIR(const OptimizerPtr &optimizer, con bool change = ApplySubstitutionToIR(optimizer, func_graph, substitution); changes = changes || change; loop = loop || change; - +#ifdef ENABLE_DUMP_IR static const auto enable_dump_pass_ir = (common::GetEnv("ENV_DUMP_PASS_IR") == "1"); if (enable_dump_pass_ir && MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG)) { auto fg_name = optimizer->name() + "_r" + std::to_string(optimizer->CurPass_.counter) + "_" + @@ -310,6 +310,7 @@ bool SubstitutionList::ApplySubstitutionsToIR(const OptimizerPtr &optimizer, con ExportIR(fg_name + ".dat", func_graph); } } +#endif // Record the status of each substitution if (optimizer->is_on_debug_) { diff --git a/mindspore/ccsrc/frontend/optimizer/optimizer.h b/mindspore/ccsrc/frontend/optimizer/optimizer.h index 9bc63257aff..e1e5be46d9c 100644 --- a/mindspore/ccsrc/frontend/optimizer/optimizer.h +++ b/mindspore/ccsrc/frontend/optimizer/optimizer.h @@ -194,6 +194,7 @@ class Optimizer : public std::enable_shared_from_this { } }; use_profile ? (WITH(MsProfile::GetProfile()->Step(pass_names_[i])) opt_func) : opt_func(); +#ifdef ENABLE_DUMP_IR static const auto enable_dump_pass_ir = (common::GetEnv("ENV_DUMP_PASS_IR") == "1"); if (enable_dump_pass_ir && MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG)) { auto fg_name = @@ -206,6 +207,7 @@ class Optimizer : public std::enable_shared_from_this { } MS_LOG(DEBUG) << "Dump " << pass_names_[i] << " func graph."; } +#endif } }; use_profile ? (WITH(MsProfile::GetProfile()->Lap(counter)) run_runc) : run_runc(); diff --git a/mindspore/ccsrc/frontend/parallel/graph_util/graph_info.cc b/mindspore/ccsrc/frontend/parallel/graph_util/graph_info.cc index 6d8e10f710e..394dd584d87 100644 --- a/mindspore/ccsrc/frontend/parallel/graph_util/graph_info.cc +++ b/mindspore/ccsrc/frontend/parallel/graph_util/graph_info.cc @@ -46,11 +46,13 @@ std::vector FindPrimtive(const FuncGraphPtr &graph, const std::str } void DumpGraph(const FuncGraphPtr &root, const std::string &name) { +#ifdef ENABLE_DUMP_IR if (MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG)) { draw::Draw(name + ".dot", root); DumpIR(name + ".ir", root); ExportIR(name + ".dat", root); } +#endif } // Return true if the cnode is in a for-loop and loop_index indicates the i-th loop; diff --git a/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc b/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc index 73514dd475f..48172eb4ef5 100644 --- a/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc @@ -83,10 +83,11 @@ bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) { 0 }, end_time{0}; (void)gettimeofday(&start_time, nullptr); - +#ifdef ENABLE_DUMP_IR if (MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG)) { draw::Draw(STEP_AUTO_PARALLEL_BEGIN, root); } +#endif MS_LOG(INFO) << "Now entering step auto parallel"; TOTAL_OPS = 0; AnfNodePtr ret = root->get_return(); diff --git a/mindspore/ccsrc/pipeline/jit/action.cc b/mindspore/ccsrc/pipeline/jit/action.cc index 40b5a5714d1..3464fd51ee3 100644 --- a/mindspore/ccsrc/pipeline/jit/action.cc +++ b/mindspore/ccsrc/pipeline/jit/action.cc @@ -565,6 +565,7 @@ bool OptimizeAction(const ResourcePtr &res, const std::vector &passes) if (!result) { MS_LOG(EXCEPTION) << "Pass running to end, failed in pass:" << pass.first; } +#ifdef ENABLE_DUMP_IR if (MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG) && res->func_graph() != nullptr) { auto fg_name = "opt_pass_" + std::to_string(counter) + "_" + pass.first; auto func_graph = res->func_graph(); @@ -574,6 +575,7 @@ bool OptimizeAction(const ResourcePtr &res, const std::vector &passes) ExportIR(fg_name + ".dat", func_graph); MS_LOG(DEBUG) << "Dump " << fg_name << " func graph."; } +#endif counter++; MS_LOG(DEBUG) << "Pass " << pass.first << " end."; }; diff --git a/mindspore/ccsrc/pipeline/jit/pipeline.cc b/mindspore/ccsrc/pipeline/jit/pipeline.cc index 297ec3efcab..9524223535e 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline.cc +++ b/mindspore/ccsrc/pipeline/jit/pipeline.cc @@ -34,6 +34,7 @@ #include "debug/anf_ir_dump.h" #include "debug/dump_proto.h" #include "debug/anf_ir_utils.h" +#include "debug/common.h" #include "utils/config_manager.h" #include "utils/convert_utils.h" #include "utils/convert_utils_py.h" @@ -45,7 +46,6 @@ #include "backend/session/executor_manager.h" #include "debug/trace.h" #include "debug/draw.h" -#include "debug/common.h" #include "pipeline/pynative/pynative_execute.h" #include "frontend/optimizer/py_pass_manager.h" #include "pybind_api/pybind_patch.h" @@ -909,7 +909,6 @@ void Pipeline::Run(const std::string &phase) { } MS_LOG(INFO) << "Recording FuncGraph in pipeline end."; } -#endif if (MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG) && graph != nullptr) { user_graph = graph; @@ -926,6 +925,7 @@ void Pipeline::Run(const std::string &phase) { // generate IR file in a heavily commented format, which can also be reloaded ExportIR(base_name + ".dat", graph); } +#endif i++; #ifdef ENABLE_TIMELINE dump_time.Record(action.first, GetTime(), false); @@ -937,9 +937,11 @@ void Pipeline::Run(const std::string &phase) { MsProfile::Reset(); #endif +#ifdef ENABLE_DUMP_IR if (MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG) && (user_graph != nullptr)) { draw::DrawUserFuncGraph("ModelDigraph.dot", user_graph); } +#endif MS_LOG(INFO) << "End"; } @@ -1345,12 +1347,14 @@ FuncGraphPtr LoadMindIR(const std::string &file_name, char *dec_key, const size_ const std::string &dec_mode) { auto func_graph = mindspore::LoadMindIR(file_name, false, reinterpret_cast(dec_key), key_len, dec_mode); +#ifdef ENABLE_DUMP_IR auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); if (save_graphs) { DumpIR("load.ir", func_graph); } +#endif return func_graph; } diff --git a/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc b/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc index 8bc69dc847d..daa988d18d5 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc +++ b/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc @@ -213,12 +213,13 @@ bool AddDFGraph(const std::map &info, const py::di MS_LOG(ERROR) << "Convert df graph failed, err:" << converter.ErrCode(); return false; } - +#ifdef ENABLE_DUMP_IR if (MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG)) { converter.DrawComputeGraph(GetSaveGraphsPathName("ge_graph.dot")); // for debug converter.DrawInitGraph(GetSaveGraphsPathName("init_graph.dot")); // for debug converter.DrawSaveCheckpointGraph(GetSaveGraphsPathName("save_checkpoint_graph.dot")); // for debug } +#endif std::string init_graph = "init_subgraph." + net_id; std::string checkpoint_name = "save." + net_id; if (phase.find("train") != std::string::npos) { @@ -243,11 +244,12 @@ FuncGraphPtr BuildDFGraph(const std::map &info, co MS_LOG(EXCEPTION) << "No phase in executor:" << GetPhasePrefix(phase); } FuncGraphPtr anf_graph = info.at(phase)->func_graph; - +#ifdef ENABLE_DUMP_IR if (MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG)) { draw::Draw("anf_graph.dot", anf_graph); // for debug DumpIR("anf_graph.ir", anf_graph, true); } +#endif if (!AddDFGraph(info, init_params, phase, broadcast_params)) { MS_LOG(ERROR) << "GenConvertor failed"; diff --git a/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc b/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc index aee450bbe83..af5e902157a 100644 --- a/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc +++ b/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc @@ -1981,9 +1981,11 @@ std::string GradExecutor::GetCellId(const py::object &cell, const py::args &args } void GradExecutor::DumpGraphIR(const std::string &filename, const FuncGraphPtr &graph) { +#ifdef ENABLE_DUMP_IR if (MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG)) { DumpIR(filename, graph); } +#endif } inline bool GradExecutor::IsNestedGrad() const { @@ -2319,7 +2321,9 @@ void GradExecutor::EndGraphInner(py::object *ret, const py::object &cell, const // Just only dump the last forward graph if (MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG) && is_top_cell_end) { curr_g_->set_output(GetObjNode(out, out_id)); +#ifdef ENABLE_DUMP_IR DumpIR("fg.ir", curr_g_); +#endif } // Reset grad flag and update output node of top cell diff --git a/mindspore/ccsrc/runtime/device/ascend/tasksink/task_generator.cc b/mindspore/ccsrc/runtime/device/ascend/tasksink/task_generator.cc index 1a5efe9c16b..699771bcbf5 100644 --- a/mindspore/ccsrc/runtime/device/ascend/tasksink/task_generator.cc +++ b/mindspore/ccsrc/runtime/device/ascend/tasksink/task_generator.cc @@ -44,7 +44,6 @@ bool TaskGenerator::GenTasks(const std::vector &anf_node_list, std::ve #ifdef ENABLE_DUMP_IR string task_info_name = "task_info_graph." + std::to_string(graph_id); (void)mindspore::RDR::RecordTaskDebugInfo(SUBMODULE_ID, task_info_name, task_debug_info_list_); -#endif auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); bool save_graphs = context_ptr->get_param(MS_CTX_SAVE_GRAPHS_FLAG); @@ -54,6 +53,7 @@ bool TaskGenerator::GenTasks(const std::vector &anf_node_list, std::ve DumpTaskInfo(file_path); #endif } +#endif return true; } diff --git a/mindspore/ccsrc/runtime/framework/graph_compiler.cc b/mindspore/ccsrc/runtime/framework/graph_compiler.cc index 5184124e07c..1abaf70fa48 100644 --- a/mindspore/ccsrc/runtime/framework/graph_compiler.cc +++ b/mindspore/ccsrc/runtime/framework/graph_compiler.cc @@ -313,12 +313,13 @@ GraphId GraphCompiler::CompileGraphImpl(const KernelGraphPtr &graph, const Devic MS_EXCEPTION_IF_NULL(device_context); const auto &ms_context = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(ms_context); - +#ifdef ENABLE_DUMP_IR bool save_graphs = ms_context->get_param(MS_CTX_SAVE_GRAPHS_FLAG); // Dump .pb graph before graph optimization. if (save_graphs) { DumpIRProto(graph, "before_opt_" + std::to_string(graph->graph_id())); } +#endif MS_LOG(INFO) << "Get graph outputs before optimizer, graph id: " << graph->graph_id(); auto outputs_before_optimizer = AnfAlgo::GetAllOutputWithIndex(graph->output()); @@ -350,11 +351,12 @@ GraphId GraphCompiler::CompileGraphImpl(const KernelGraphPtr &graph, const Devic session_->SetSummaryNodes(graph.get()); SetSummaryNodesRefCount(graph.get()); - +#ifdef ENABLE_DUMP_IR // Dump .pb graph after graph optimization. if (save_graphs) { DumpIRProto(graph, "after_opt_" + std::to_string(graph->graph_id())); } +#endif #ifdef ENABLE_DEBUGGER auto debugger = Debugger::GetInstance(); diff --git a/mindspore/ccsrc/transform/graph_ir/convert.cc b/mindspore/ccsrc/transform/graph_ir/convert.cc index 18648a83235..4287e4468da 100644 --- a/mindspore/ccsrc/transform/graph_ir/convert.cc +++ b/mindspore/ccsrc/transform/graph_ir/convert.cc @@ -1370,10 +1370,12 @@ void DfGraphConvertor::ProcessSubgraph(AnfNodePtr node, const std::vectorToString() + "_ge_graph.dot"; if (MsContext::GetInstance()->get_param(MS_CTX_SAVE_GRAPHS_FLAG)) { converter.DrawComputeGraph(name); } +#endif branches_map_[node.get()] = *(converter.df_graph_); } diff --git a/mindspore/ccsrc/transform/graph_ir/convert.h b/mindspore/ccsrc/transform/graph_ir/convert.h index 6fa355c242c..b2db30bc400 100644 --- a/mindspore/ccsrc/transform/graph_ir/convert.h +++ b/mindspore/ccsrc/transform/graph_ir/convert.h @@ -76,6 +76,7 @@ class DfGraphConvertor { static void RegisterAdapter(const std::string &name, OpAdapterPtr train_adpt, OpAdapterPtr infer_adpt); void DrawComputeGraph(const std::string &name) { +#ifndef ENABLE_SECURITY std::ofstream fout(name); if (!fout.is_open()) { MS_LOG(ERROR) << "Open file '" << name << "' failed!" @@ -84,8 +85,11 @@ class DfGraphConvertor { } fout << compute_sout_.str(); fout.close(); +#endif } + void DrawInitGraph(const std::string &name) { +#ifndef ENABLE_SECURITY std::ofstream fout(name); if (!fout.is_open()) { MS_LOG(ERROR) << "Open file '" << name << "' failed!" @@ -94,6 +98,7 @@ class DfGraphConvertor { } fout << init_sout_.str(); fout.close(); +#endif } void DrawSaveCheckpointGraph(const std::string &name) { std::ofstream fout(name); diff --git a/mindspore/core/utils/ms_context.cc b/mindspore/core/utils/ms_context.cc index 6c1903d0d70..10bd4c2b132 100644 --- a/mindspore/core/utils/ms_context.cc +++ b/mindspore/core/utils/ms_context.cc @@ -32,8 +32,14 @@ std::map MsContext::policy_map_ = {{"ge", kMsBacke {"vm_prior", kMsBackendVmPrior}}; MsContext::MsContext(const std::string &policy, const std::string &target) { +#ifndef ENABLE_SECURITY set_param(MS_CTX_SAVE_GRAPHS_FLAG, false); set_param(MS_CTX_SAVE_GRAPHS_PATH, "."); +#else + // Need set a default value for arrays even if running in the security mode. + bool_params_[MS_CTX_SAVE_GRAPHS_FLAG - MS_CTX_TYPE_BOOL_BEGIN] = false; + string_params_[MS_CTX_SAVE_GRAPHS_PATH - MS_CTX_TYPE_STRING_BEGIN] = "."; +#endif set_param(MS_CTX_PYTHON_EXE_PATH, "python"); set_param(MS_CTX_KERNEL_BUILD_SERVER_DIR, ""); set_param(MS_CTX_ENABLE_DUMP, false); diff --git a/mindspore/core/utils/ms_context.h b/mindspore/core/utils/ms_context.h index e1e7d2553f4..c02493915b1 100644 --- a/mindspore/core/utils/ms_context.h +++ b/mindspore/core/utils/ms_context.h @@ -199,6 +199,11 @@ class MsContext { // set method implementation for type bool/int/uint32_t/float/std::string template <> inline void MsContext::set_param(MsCtxParam param, const bool &value) { +#ifdef ENABLE_SECURITY + if (param == MS_CTX_SAVE_GRAPHS_FLAG) { + MS_EXCEPTION(ValueError) << "The save_graphs is not supported, please without '-s on' and recompile source."; + } +#endif bool_params_[param - MS_CTX_TYPE_BOOL_BEGIN] = value; } @@ -219,6 +224,11 @@ inline void MsContext::set_param(MsCtxParam param, const float &value) { template <> inline void MsContext::set_param(MsCtxParam param, const std::string &value) { +#ifdef ENABLE_SECURITY + if (param == MS_CTX_SAVE_GRAPHS_PATH) { + MS_EXCEPTION(ValueError) << "The save_graphs is not supported, please without '-s on' and recompile source."; + } +#endif if (seter_ != nullptr && param == MS_CTX_DEVICE_TARGET) { MS_LOG(INFO) << "ms set context device target:" << value; seter_(value); diff --git a/tests/st/auto_monad/test_auto_monad.py b/tests/st/auto_monad/test_auto_monad.py index 0b8c907bf71..4dcbe8fdb93 100644 --- a/tests/st/auto_monad/test_auto_monad.py +++ b/tests/st/auto_monad/test_auto_monad.py @@ -28,6 +28,7 @@ from mindspore.common.parameter import Parameter from mindspore.common.initializer import initializer from mindspore.ops.primitive import constexpr from capture import Capture, capture, check_output +from tests.security_utils import security_off_wrap context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") @@ -1256,7 +1257,6 @@ def use_build_train_network_check_cast_num(network, level, inputs, label, cast_n class AssignNet(Cell): def __init__(self): super().__init__() - #self._save_graphs(save_graph_flag=True, save_graph_path=".") self.relu = ReLU() self.mean = P.ReduceMean(keep_dims=False) self.assign_sub = P.AssignSub() @@ -1269,14 +1269,14 @@ class AssignNet(Cell): x = self.mean(x, (2, 3)) return x - +@security_off_wrap def test_auto_mixed_precision_train_1(pynative_save_graphs): net = AssignNet() input32 = Tensor(np.ones([1, 3, 2, 2]).astype(np.float32)) label32 = Tensor(np.zeros([1, 3]).astype(np.float32)) use_build_train_network_check_cast_num(net, "O0", input32, label32, 0) - +@security_off_wrap def test_auto_mixed_precision_train_2(pynative_save_graphs): net = AssignNet() input32 = Tensor(np.ones([1, 3, 2, 2]).astype(np.float32)) @@ -1287,7 +1287,6 @@ def test_auto_mixed_precision_train_2(pynative_save_graphs): class MixControlNet(Cell): def __init__(self, in_channel, x): super().__init__() - #self._save_graphs(save_graph_flag=True, save_graph_path=".") self.biasadd = P.BiasAdd() self.equal = P.Equal() self.addn = P.AddN() @@ -1354,7 +1353,7 @@ def use_build_train_network_controlflow_check_cast_num(network, level, input_x, assert len(castnum) == cast_num return out_me - +@security_off_wrap def test_auto_mixed_precision_controlflow_auto(pynative_save_graphs): net = MixControlNet(3, 5) input_x = Tensor( @@ -1392,7 +1391,6 @@ def test_if_cast(): return out - context.set_context(save_graphs=False) net = Net(True) beta1 = Tensor(np.array([2]).astype(np.float32)) beta2 = Tensor(np.array([10]).astype(np.float32)) diff --git a/tests/st/auto_monad/test_auto_monad_gpu.py b/tests/st/auto_monad/test_auto_monad_gpu.py index b468239fe14..eff625372e2 100644 --- a/tests/st/auto_monad/test_auto_monad_gpu.py +++ b/tests/st/auto_monad/test_auto_monad_gpu.py @@ -28,6 +28,7 @@ from mindspore import context, Tensor from mindspore.common import ParameterTuple from mindspore.common.parameter import Parameter from mindspore.ops.composite import GradOperation +from tests.security_utils import security_off_wrap context.set_context(mode=context.GRAPH_MODE, device_target="GPU") @@ -128,7 +129,7 @@ class SideEffectCastAll(Cell): out_b = self.cast(self.parameter_b, self.dtype) return out_a, out_b - +@security_off_wrap def test_side_effect_castall(): clear_files() context.set_context(mode=context.GRAPH_MODE, save_graphs=True) @@ -333,7 +334,7 @@ class InplaceNet(Cell): output = self.add(tmp_c1, tmp_c2) return output - +@security_off_wrap def test_ir_fusion_inplace_bn_conv_conv(): clear_files() context.set_context(mode=context.GRAPH_MODE, save_graphs=True) @@ -458,7 +459,7 @@ def use_build_train_network_controlflow_check_cast_num(network, level, input_x, assert len(castnum) == cast_num return out_me - +@security_off_wrap def test_auto_mixed_precision_controlflow_auto(): context.set_context(mode=context.PYNATIVE_MODE, save_graphs=True) net = MixControlNet(3, 5) @@ -472,7 +473,7 @@ def test_auto_mixed_precision_controlflow_auto(): use_build_train_network_controlflow_check_cast_num(net, "auto", input_x, label, cast_num) - +@security_off_wrap def test_updatestate_between_assigns(): class UpdateState_Assigns(Cell): def __init__(self): @@ -497,7 +498,7 @@ def test_updatestate_between_assigns(): updatestate_num = re.findall('UpdateState', content) assert len(updatestate_num) == 1 - +@security_off_wrap def test_updatestate_between_maketuple_assign(): class UpdateState_MakeTuple_Assign(Cell): def __init__(self): @@ -524,7 +525,7 @@ def test_updatestate_between_maketuple_assign(): updatestate_num = re.findall('UpdateState', content) assert len(updatestate_num) == 1 - +@security_off_wrap def test_updatestate_between_assign_maketuple(): class UpdateState_Assign_MakeTuple(Cell): def __init__(self): diff --git a/tests/st/control/inner/test_001_single_while.py b/tests/st/control/inner/test_001_single_while.py index c000c311b7c..7784ab4da7b 100644 --- a/tests/st/control/inner/test_001_single_while.py +++ b/tests/st/control/inner/test_001_single_while.py @@ -19,7 +19,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_011_if_in_while.py b/tests/st/control/inner/test_011_if_in_while.py index baed2c41cb1..f318d2123ed 100644 --- a/tests/st/control/inner/test_011_if_in_while.py +++ b/tests/st/control/inner/test_011_if_in_while.py @@ -22,7 +22,7 @@ from mindspore import context from mindspore.ops import functional as F from mindspore.common.parameter import Parameter -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_011_if_in_while_break.py b/tests/st/control/inner/test_011_if_in_while_break.py index bed6090afa3..4be61884210 100644 --- a/tests/st/control/inner/test_011_if_in_while_break.py +++ b/tests/st/control/inner/test_011_if_in_while_break.py @@ -20,7 +20,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_012_if_in_for.py b/tests/st/control/inner/test_012_if_in_for.py index a11fba52fb3..26bed2813cc 100644 --- a/tests/st/control/inner/test_012_if_in_for.py +++ b/tests/st/control/inner/test_012_if_in_for.py @@ -22,7 +22,7 @@ from mindspore import context from mindspore.common.parameter import Parameter from mindspore.ops import functional as F -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_012_if_in_for_break.py b/tests/st/control/inner/test_012_if_in_for_break.py index aeafb747939..74ebb97f049 100644 --- a/tests/st/control/inner/test_012_if_in_for_break.py +++ b/tests/st/control/inner/test_012_if_in_for_break.py @@ -20,7 +20,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_020_while_in_if.py b/tests/st/control/inner/test_020_while_in_if.py index 7df3299073e..241c8c0d969 100644 --- a/tests/st/control/inner/test_020_while_in_if.py +++ b/tests/st/control/inner/test_020_while_in_if.py @@ -23,7 +23,7 @@ from mindspore import context from mindspore.common.parameter import Parameter from mindspore.ops import functional as F -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_021_while_while_normal.py b/tests/st/control/inner/test_021_while_while_normal.py index 4098e705e6e..12aee2075cf 100644 --- a/tests/st/control/inner/test_021_while_while_normal.py +++ b/tests/st/control/inner/test_021_while_while_normal.py @@ -21,7 +21,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_022_for_while_normal.py b/tests/st/control/inner/test_022_for_while_normal.py index cc77c8de99b..5c24575ad69 100644 --- a/tests/st/control/inner/test_022_for_while_normal.py +++ b/tests/st/control/inner/test_022_for_while_normal.py @@ -21,7 +21,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_101_if_after_while.py b/tests/st/control/inner/test_101_if_after_while.py index ff9ec4d1145..537414de629 100644 --- a/tests/st/control/inner/test_101_if_after_while.py +++ b/tests/st/control/inner/test_101_if_after_while.py @@ -22,7 +22,7 @@ from mindspore import context from mindspore.common.parameter import Parameter from mindspore.ops import functional as F -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_111_if_after_if_in_while.py b/tests/st/control/inner/test_111_if_after_if_in_while.py index dd8dff20a35..7ea0fb922a6 100644 --- a/tests/st/control/inner/test_111_if_after_if_in_while.py +++ b/tests/st/control/inner/test_111_if_after_if_in_while.py @@ -21,7 +21,7 @@ from mindspore.ops import composite as C from mindspore import context from mindspore.common.parameter import Parameter -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_120_if_after_while_in_if.py b/tests/st/control/inner/test_120_if_after_while_in_if.py index 423dc4b0f2d..1ad976abdb5 100644 --- a/tests/st/control/inner/test_120_if_after_while_in_if.py +++ b/tests/st/control/inner/test_120_if_after_while_in_if.py @@ -22,7 +22,7 @@ from mindspore.ops import composite as C from mindspore import context from mindspore.common.parameter import Parameter -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_121_if_after_while_in_while.py b/tests/st/control/inner/test_121_if_after_while_in_while.py index 137dd560ef4..762a6f38c92 100644 --- a/tests/st/control/inner/test_121_if_after_while_in_while.py +++ b/tests/st/control/inner/test_121_if_after_while_in_while.py @@ -23,7 +23,7 @@ from mindspore.ops import functional as F from mindspore import context from mindspore.common.parameter import Parameter -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_122_if_after_while_in_for.py b/tests/st/control/inner/test_122_if_after_while_in_for.py index cc17110b2eb..2814891a64e 100644 --- a/tests/st/control/inner/test_122_if_after_while_in_for.py +++ b/tests/st/control/inner/test_122_if_after_while_in_for.py @@ -23,7 +23,7 @@ from mindspore.ops import functional as F from mindspore import context from mindspore.common.parameter import Parameter -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_200_while_after_if.py b/tests/st/control/inner/test_200_while_after_if.py index 2d249f64641..b1f84acc6c5 100644 --- a/tests/st/control/inner/test_200_while_after_if.py +++ b/tests/st/control/inner/test_200_while_after_if.py @@ -21,7 +21,7 @@ from mindspore.ops import composite as C from mindspore import context from mindspore.common.parameter import Parameter -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_201_for_n_while.py b/tests/st/control/inner/test_201_for_n_while.py index b60f62df18b..7dee2f36505 100644 --- a/tests/st/control/inner/test_201_for_n_while.py +++ b/tests/st/control/inner/test_201_for_n_while.py @@ -21,7 +21,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_202_while_n_while.py b/tests/st/control/inner/test_202_while_n_while.py index d28f7b669b9..c6c21913b61 100644 --- a/tests/st/control/inner/test_202_while_n_while.py +++ b/tests/st/control/inner/test_202_while_n_while.py @@ -21,7 +21,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_210_while_after_if_in_if.py b/tests/st/control/inner/test_210_while_after_if_in_if.py index 1e8fbff51e4..09145371ec2 100644 --- a/tests/st/control/inner/test_210_while_after_if_in_if.py +++ b/tests/st/control/inner/test_210_while_after_if_in_if.py @@ -21,7 +21,7 @@ from mindspore.ops import composite as C from mindspore import context from mindspore.common.parameter import Parameter -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_211_while_after_if_in_while.py b/tests/st/control/inner/test_211_while_after_if_in_while.py index 3ea4e78ece9..245911a3fd5 100644 --- a/tests/st/control/inner/test_211_while_after_if_in_while.py +++ b/tests/st/control/inner/test_211_while_after_if_in_while.py @@ -21,7 +21,7 @@ from mindspore.ops import composite as C from mindspore import context from mindspore.common.parameter import Parameter -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_212_while_after_if_in_for.py b/tests/st/control/inner/test_212_while_after_if_in_for.py index f99752327ce..c8471dc4583 100644 --- a/tests/st/control/inner/test_212_while_after_if_in_for.py +++ b/tests/st/control/inner/test_212_while_after_if_in_for.py @@ -21,7 +21,7 @@ from mindspore.ops import composite as C from mindspore import context from mindspore.common.parameter import Parameter -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_220_while_after_while_in_if.py b/tests/st/control/inner/test_220_while_after_while_in_if.py index 804d3382d40..571e298a83f 100644 --- a/tests/st/control/inner/test_220_while_after_while_in_if.py +++ b/tests/st/control/inner/test_220_while_after_while_in_if.py @@ -22,7 +22,7 @@ from mindspore.ops import composite as C from mindspore import context from mindspore.common.parameter import Parameter -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_221_while_while_while.py b/tests/st/control/inner/test_221_while_while_while.py index 6b6c834dd3c..7c355158ab2 100644 --- a/tests/st/control/inner/test_221_while_while_while.py +++ b/tests/st/control/inner/test_221_while_while_while.py @@ -21,7 +21,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_222_for_while_while.py b/tests/st/control/inner/test_222_for_while_while.py index d937db81067..b2dfbb753ac 100644 --- a/tests/st/control/inner/test_222_for_while_while.py +++ b/tests/st/control/inner/test_222_for_while_while.py @@ -21,7 +21,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_230_while_after_for_in_if.py b/tests/st/control/inner/test_230_while_after_for_in_if.py index 8e9170b18f8..152c7bb670f 100644 --- a/tests/st/control/inner/test_230_while_after_for_in_if.py +++ b/tests/st/control/inner/test_230_while_after_for_in_if.py @@ -22,7 +22,7 @@ from mindspore.ops import composite as C from mindspore import context from mindspore.common.parameter import Parameter -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_231_while_for_while.py b/tests/st/control/inner/test_231_while_for_while.py index 16b1d6600dc..db4f3db9595 100644 --- a/tests/st/control/inner/test_231_while_for_while.py +++ b/tests/st/control/inner/test_231_while_for_while.py @@ -21,7 +21,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_232_for_for_while.py b/tests/st/control/inner/test_232_for_for_while.py index 94515ff0c61..07d8f32ea0d 100644 --- a/tests/st/control/inner/test_232_for_for_while.py +++ b/tests/st/control/inner/test_232_for_for_while.py @@ -21,7 +21,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_301_while_normal_for.py b/tests/st/control/inner/test_301_while_normal_for.py index 45365ca5c91..e437a4a498b 100644 --- a/tests/st/control/inner/test_301_while_normal_for.py +++ b/tests/st/control/inner/test_301_while_normal_for.py @@ -21,7 +21,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_311_while_if_for.py b/tests/st/control/inner/test_311_while_if_for.py index 02a1616211b..390208453cb 100644 --- a/tests/st/control/inner/test_311_while_if_for.py +++ b/tests/st/control/inner/test_311_while_if_for.py @@ -21,7 +21,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/inner/test_321_while_while_in_while.py b/tests/st/control/inner/test_321_while_while_in_while.py index 38d1e29cae6..19c023b138a 100644 --- a/tests/st/control/inner/test_321_while_while_in_while.py +++ b/tests/st/control/inner/test_321_while_while_in_while.py @@ -21,7 +21,7 @@ from mindspore import Tensor from mindspore.ops import composite as C from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class ForwardNet(nn.Cell): diff --git a/tests/st/control/test_cont_grad.py b/tests/st/control/test_cont_grad.py index 4387875537d..de416ae7415 100644 --- a/tests/st/control/test_cont_grad.py +++ b/tests/st/control/test_cont_grad.py @@ -24,11 +24,6 @@ from mindspore.common.parameter import Parameter, ParameterTuple from mindspore.ops import composite as C from mindspore.ops import operations as P -# from tests.vm_impl.math_ops_vm_impl import * -# from tests.vm_impl.vm_interface import * -# from tests.vm_impl import * -# context.set_context(save_graphs=True) - grad_by_list = C.GradOperation(get_by_list=True) grad_all = C.GradOperation(get_all=True) diff --git a/tests/st/control/test_if_mindir.py b/tests/st/control/test_if_mindir.py index 42f8a2929a2..6fc368b7244 100644 --- a/tests/st/control/test_if_mindir.py +++ b/tests/st/control/test_if_mindir.py @@ -153,7 +153,7 @@ def test_load_mindir_and_run(): @pytest.mark.platform_arm_ascend_training @pytest.mark.env_onecard def test_single_if(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True, save_graphs_path="./ifir") + context.set_context(mode=context.GRAPH_MODE) network = SingleIfNet() x = Tensor(np.array([1]).astype(np.float32)) diff --git a/tests/st/control/test_while_grad.py b/tests/st/control/test_while_grad.py index 6d667cdc545..936ddd2a474 100644 --- a/tests/st/control/test_while_grad.py +++ b/tests/st/control/test_while_grad.py @@ -42,7 +42,7 @@ class GradNet(nn.Cell): @pytest.mark.platform_x86_ascend_training @pytest.mark.env_onecard def test_while_grad(): - context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True) + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") x = Tensor([2.0], dtype=mstype.float32) y = Tensor([2.0], dtype=mstype.float32) GradNet(Net())(x, y) diff --git a/tests/st/fl/albert/cloud_train.py b/tests/st/fl/albert/cloud_train.py index 78f0fb80483..5b0bdf467be 100644 --- a/tests/st/fl/albert/cloud_train.py +++ b/tests/st/fl/albert/cloud_train.py @@ -145,7 +145,7 @@ def server_train(args): os.makedirs(output_dir) # mindspore context - context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE, device_target=device_target) context.set_fl_context(**fl_ctx) print('Context setting is done! Time cost: {}'.format(time() - start)) sys.stdout.flush() diff --git a/tests/st/fl/cross_silo_faster_rcnn/src/model_utils/moxing_adapter.py b/tests/st/fl/cross_silo_faster_rcnn/src/model_utils/moxing_adapter.py index 830d19a6fc9..761ea896867 100644 --- a/tests/st/fl/cross_silo_faster_rcnn/src/model_utils/moxing_adapter.py +++ b/tests/st/fl/cross_silo_faster_rcnn/src/model_utils/moxing_adapter.py @@ -17,7 +17,6 @@ import os import functools -from mindspore import context from mindspore.profiler import Profiler from .config import config @@ -93,7 +92,6 @@ def moxing_wrapper(pre_process=None, post_process=None): sync_data(config.train_url, config.output_path) print("Workspace downloaded: ", os.listdir(config.output_path)) - context.set_context(save_graphs_path=os.path.join(config.output_path, str(get_rank_id()))) config.device_num = get_device_num() config.device_id = get_device_id() if not os.path.exists(config.output_path): diff --git a/tests/st/fl/cross_silo_faster_rcnn/test_fl_fasterrcnn.py b/tests/st/fl/cross_silo_faster_rcnn/test_fl_fasterrcnn.py index e3342b7d79d..8f732cd5990 100644 --- a/tests/st/fl/cross_silo_faster_rcnn/test_fl_fasterrcnn.py +++ b/tests/st/fl/cross_silo_faster_rcnn/test_fl_fasterrcnn.py @@ -86,7 +86,7 @@ ctx = { "encrypt_type": encrypt_type } -context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE, device_target=device_target) context.set_fl_context(**ctx) # print(**ctx, flush=True) # context.set_context(mode=context.GRAPH_MODE, device_target="GPU", device_id=get_device_id()) diff --git a/tests/st/fl/cross_silo_femnist/test_cross_silo_femnist.py b/tests/st/fl/cross_silo_femnist/test_cross_silo_femnist.py index 67e66c14c46..c7981d3873e 100644 --- a/tests/st/fl/cross_silo_femnist/test_cross_silo_femnist.py +++ b/tests/st/fl/cross_silo_femnist/test_cross_silo_femnist.py @@ -114,7 +114,7 @@ ctx = { "encrypt_type": encrypt_type } -context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE, device_target=device_target) context.set_fl_context(**ctx) diff --git a/tests/st/fl/cross_silo_lenet/test_cross_silo_lenet.py b/tests/st/fl/cross_silo_lenet/test_cross_silo_lenet.py index 96b22c9ccd3..f54f9776b02 100644 --- a/tests/st/fl/cross_silo_lenet/test_cross_silo_lenet.py +++ b/tests/st/fl/cross_silo_lenet/test_cross_silo_lenet.py @@ -95,7 +95,7 @@ ctx = { "encrypt_type": encrypt_type } -context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE, device_target=device_target) context.set_fl_context(**ctx) if __name__ == "__main__": diff --git a/tests/st/fl/hybrid_lenet/test_hybrid_train_lenet.py b/tests/st/fl/hybrid_lenet/test_hybrid_train_lenet.py index 744c36efa61..93b4d3cb9c4 100644 --- a/tests/st/fl/hybrid_lenet/test_hybrid_train_lenet.py +++ b/tests/st/fl/hybrid_lenet/test_hybrid_train_lenet.py @@ -125,7 +125,7 @@ ctx = { "enable_ssl": enable_ssl } -context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE, device_target=device_target) context.set_fl_context(**ctx) if __name__ == "__main__": diff --git a/tests/st/fl/mobile/test_mobile_lenet.py b/tests/st/fl/mobile/test_mobile_lenet.py index 42c9a6dc010..9a84c60f080 100644 --- a/tests/st/fl/mobile/test_mobile_lenet.py +++ b/tests/st/fl/mobile/test_mobile_lenet.py @@ -120,7 +120,7 @@ ctx = { "enable_ssl": enable_ssl } -context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE, device_target=device_target) context.set_fl_context(**ctx) if __name__ == "__main__": diff --git a/tests/st/fusion/test_ub_fusion_matmul_confusion_transpose.py b/tests/st/fusion/test_ub_fusion_matmul_confusion_transpose.py index 18211d4ddf1..4d5173d873a 100644 --- a/tests/st/fusion/test_ub_fusion_matmul_confusion_transpose.py +++ b/tests/st/fusion/test_ub_fusion_matmul_confusion_transpose.py @@ -21,7 +21,6 @@ from mindspore import Tensor from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") -context.set_context(save_graphs=True) class Net(nn.Cell): diff --git a/tests/st/fusion/test_unsorted_segment_sum_fission.py b/tests/st/fusion/test_unsorted_segment_sum_fission.py index 628403b76a4..6f364f855a9 100644 --- a/tests/st/fusion/test_unsorted_segment_sum_fission.py +++ b/tests/st/fusion/test_unsorted_segment_sum_fission.py @@ -21,7 +21,6 @@ from mindspore import Tensor from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") -context.set_context(save_graphs=True) class Net(nn.Cell): diff --git a/tests/st/gnn/gcn/test_gcn.py b/tests/st/gnn/gcn/test_gcn.py index 4bcbf087395..ae8b39d4b63 100644 --- a/tests/st/gnn/gcn/test_gcn.py +++ b/tests/st/gnn/gcn/test_gcn.py @@ -38,8 +38,7 @@ SEED = 20 def test_gcn(): print("test_gcn begin") np.random.seed(SEED) - context.set_context(mode=context.GRAPH_MODE, - device_target="Ascend", save_graphs=False) + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") config = ConfigGCN() config.dropout = 0.0 adj, feature, label_onehot, _ = get_adj_features_labels(DATA_DIR) diff --git a/tests/st/heterogeneous_excutor/test_control.py b/tests/st/heterogeneous_excutor/test_control.py index 6ab7a244957..d6bc2575ca5 100644 --- a/tests/st/heterogeneous_excutor/test_control.py +++ b/tests/st/heterogeneous_excutor/test_control.py @@ -60,7 +60,6 @@ def test_net(): net1 = Net1() output1 = net1(Tensor(x), Tensor(y)) - context.set_context(save_graphs=True) net2 = Net2() output2 = net2(Tensor(x), Tensor(y)) assert np.allclose(output1[0].asnumpy(), output2[0].asnumpy()) diff --git a/tests/st/mix_precision/test_mix_precision.py b/tests/st/mix_precision/test_mix_precision.py index 9b02cc2fa5e..2114145da8b 100644 --- a/tests/st/mix_precision/test_mix_precision.py +++ b/tests/st/mix_precision/test_mix_precision.py @@ -30,7 +30,7 @@ from utils import allclose_nparray from utils import FakeDataInitMode from utils import find_newest_validateir_file from utils import clean_all_ir_files - +from tests.security_utils import security_off_wrap def read_validateir_file(path_folder): filename = find_newest_validateir_file(path_folder) @@ -109,6 +109,7 @@ def test_sit_auto_mix_precision_train_o3(): @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training @pytest.mark.env_onecard +@security_off_wrap def test_sit_auto_mix_precision_model_o0(): input_data = np.random.randn(32, 3, 224, 224).astype(np.float32) dataset1 = FakeData(size=32, @@ -142,6 +143,7 @@ def test_sit_auto_mix_precision_model_o0(): @pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard +@security_off_wrap def test_sit_auto_mix_precision_model_o2(): input_data = np.random.randn(32, 3, 224, 224).astype(np.float32) dataset1 = FakeData(size=32, diff --git a/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/train_and_test_multinpu_ci.py b/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/train_and_test_multinpu_ci.py index 1f46eed9f1a..202475fc80c 100644 --- a/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/train_and_test_multinpu_ci.py +++ b/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/train_and_test_multinpu_ci.py @@ -30,7 +30,7 @@ from src.metrics import AUCMetric from src.config import WideDeepConfig sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True) +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, gradients_mean=True) init() diff --git a/tests/st/model_zoo_tests/wide_and_deep/train_and_test_multinpu_ci_data_parallel.py b/tests/st/model_zoo_tests/wide_and_deep/train_and_test_multinpu_ci_data_parallel.py index 77d301f556d..09e1b5ea092 100644 --- a/tests/st/model_zoo_tests/wide_and_deep/train_and_test_multinpu_ci_data_parallel.py +++ b/tests/st/model_zoo_tests/wide_and_deep/train_and_test_multinpu_ci_data_parallel.py @@ -29,7 +29,7 @@ from src.metrics import AUCMetric from src.config import WideDeepConfig sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True) +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True) init() diff --git a/tests/st/networks/models/bert/bert_performance/test_bert_thor.py b/tests/st/networks/models/bert/bert_performance/test_bert_thor.py index ebbf4692045..902154a7fea 100644 --- a/tests/st/networks/models/bert/bert_performance/test_bert_thor.py +++ b/tests/st/networks/models/bert/bert_performance/test_bert_thor.py @@ -143,7 +143,7 @@ def _set_bert_all_reduce_split(): def train_process_bert_thor(q, device_id, epoch_size, device_num): os.system("mkdir " + str(device_id)) os.chdir(str(device_id)) - context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id) context.set_context(reserve_class_name_in_scope=False) context.set_context(max_call_depth=3000) os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH diff --git a/tests/st/networks/models/resnet50/test_resnet50_imagenet.py b/tests/st/networks/models/resnet50/test_resnet50_imagenet.py index ca5f8553b95..80bdf9a7ef5 100644 --- a/tests/st/networks/models/resnet50/test_resnet50_imagenet.py +++ b/tests/st/networks/models/resnet50/test_resnet50_imagenet.py @@ -132,7 +132,7 @@ class LossGet(Callback): def train_process(q, device_id, epoch_size, device_num, enable_hccl): os.system("mkdir " + str(device_id)) os.chdir(str(device_id)) - context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False) + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(device_id=device_id) os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH os.environ['RANK_ID'] = str(device_id) @@ -230,7 +230,7 @@ def train_process(q, device_id, epoch_size, device_num, enable_hccl): def train_process_thor(q, device_id, epoch_size, device_num, enable_hccl): os.system("mkdir " + str(device_id)) os.chdir(str(device_id)) - context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False) + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(device_id=device_id) os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH_2 os.environ['RANK_ID'] = str(device_id - 4) @@ -313,7 +313,7 @@ def train_process_thor(q, device_id, epoch_size, device_num, enable_hccl): @pytest.mark.env_single def test_resnet_and_resnet_thor_imagenet_4p(): # reset context - context.set_context(save_graphs=False, enable_graph_kernel=False, enable_sparse=False) + context.set_context(enable_graph_kernel=False, enable_sparse=False) context.reset_auto_parallel_context() context.reset_ps_context() diff --git a/tests/st/ops/ascend/test_ops_infer.py b/tests/st/ops/ascend/test_ops_infer.py index 48aa79e1096..2a7646ce942 100644 --- a/tests/st/ops/ascend/test_ops_infer.py +++ b/tests/st/ops/ascend/test_ops_infer.py @@ -21,7 +21,7 @@ from mindspore import Tensor from mindspore.ops import operations as P from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=True) +context.set_context(mode=context.GRAPH_MODE) def test_cast_op_attr(): diff --git a/tests/st/ops/cpu/test_cache_ops.py b/tests/st/ops/cpu/test_cache_ops.py index 82879fc9489..b81c86c2675 100644 --- a/tests/st/ops/cpu/test_cache_ops.py +++ b/tests/st/ops/cpu/test_cache_ops.py @@ -21,8 +21,7 @@ from mindspore import Tensor from mindspore import Parameter from mindspore.ops import operations as P -context.set_context(mode=context.GRAPH_MODE, - device_target='CPU', save_graphs=True) +context.set_context(mode=context.GRAPH_MODE, device_target='CPU') class UpdateCacheNet(nn.Cell): diff --git a/tests/st/ops/cpu/test_l1_regularizer_op.py b/tests/st/ops/cpu/test_l1_regularizer_op.py index 2345b28619e..477c537ad0a 100644 --- a/tests/st/ops/cpu/test_l1_regularizer_op.py +++ b/tests/st/ops/cpu/test_l1_regularizer_op.py @@ -19,7 +19,7 @@ import mindspore.nn as nn import mindspore.context as context from mindspore import Tensor, ms_function -context.set_context(mode=context.GRAPH_MODE, save_graphs=True) +context.set_context(mode=context.GRAPH_MODE) class Net_l1_regularizer(nn.Cell): diff --git a/tests/st/ops/cpu/test_masked_select_op.py b/tests/st/ops/cpu/test_masked_select_op.py index 7cf7e1851b4..16679835118 100644 --- a/tests/st/ops/cpu/test_masked_select_op.py +++ b/tests/st/ops/cpu/test_masked_select_op.py @@ -32,7 +32,7 @@ def maskedselect(): @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_maskedselect(): - context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=True) + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") y = maskedselect() expect = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4] assert (y.asnumpy() == expect).all() @@ -68,7 +68,7 @@ def masked_select_grad(): @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_masked_select_grad(): - context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=True) + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") dx = masked_select_grad() expect = [4, 6, 8, 10] assert (dx.asnumpy() == expect).all() diff --git a/tests/st/ops/cpu/test_scatter_nd_update_op.py b/tests/st/ops/cpu/test_scatter_nd_update_op.py index 4158092abd9..588c40c1986 100644 --- a/tests/st/ops/cpu/test_scatter_nd_update_op.py +++ b/tests/st/ops/cpu/test_scatter_nd_update_op.py @@ -23,7 +23,7 @@ from mindspore import Parameter from mindspore.common import dtype as mstype from mindspore.ops import operations as P -context.set_context(mode=context.GRAPH_MODE, device_target='CPU', save_graphs=False) +context.set_context(mode=context.GRAPH_MODE, device_target='CPU') @pytest.mark.level0 diff --git a/tests/st/ops/cpu/test_smoothl1loss_op.py b/tests/st/ops/cpu/test_smoothl1loss_op.py index 3c6c0f70c35..43c3fbd696f 100644 --- a/tests/st/ops/cpu/test_smoothl1loss_op.py +++ b/tests/st/ops/cpu/test_smoothl1loss_op.py @@ -33,7 +33,7 @@ def smoothl1loss(beta): @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_smoothl1loss(): - context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=True) + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") epsilon = 1e-6 @@ -81,7 +81,7 @@ def smoothl1loss_grad(beta): @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_smoothl1loss_grad(): - context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=True) + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") epsilon = 1e-6 diff --git a/tests/st/ops/gpu/test_adam_fusion.py b/tests/st/ops/gpu/test_adam_fusion.py index 32154afba4b..f98465b8b00 100644 --- a/tests/st/ops/gpu/test_adam_fusion.py +++ b/tests/st/ops/gpu/test_adam_fusion.py @@ -25,7 +25,7 @@ from mindspore.ops import functional as F from mindspore.common import dtype as mstype from mindspore.common.parameter import Parameter -context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True) +context.set_context(mode=context.GRAPH_MODE, device_target="GPU") class Net(nn.Cell): diff --git a/tests/st/ops/gpu/test_maximum_op.py b/tests/st/ops/gpu/test_maximum_op.py index 4e009dae430..55874b52b5c 100644 --- a/tests/st/ops/gpu/test_maximum_op.py +++ b/tests/st/ops/gpu/test_maximum_op.py @@ -71,7 +71,7 @@ def test_maximum(): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_broadcast(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU') + context.set_context(mode=context.GRAPH_MODE, device_target='GPU') x1_np = np.array([[[[0.659578], [0.49113268], @@ -195,7 +195,7 @@ def test_broadcast(): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_broadcast_diff_dims(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU') + context.set_context(mode=context.GRAPH_MODE, device_target='GPU') x1_np = np.array([[[0.275478, 0.48933202, 0.71846116], [0.9803821, 0.57205725, 0.28511533]], diff --git a/tests/st/ops/gpu/test_minimum_op.py b/tests/st/ops/gpu/test_minimum_op.py index 78198db45a9..17546cfc395 100644 --- a/tests/st/ops/gpu/test_minimum_op.py +++ b/tests/st/ops/gpu/test_minimum_op.py @@ -48,7 +48,7 @@ class Grad(Cell): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_nobroadcast(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU') + context.set_context(mode=context.GRAPH_MODE, device_target='GPU') x1_np = np.random.rand(3, 4).astype(np.float32) x2_np = np.random.rand(3, 4).astype(np.float32) @@ -66,7 +66,7 @@ def test_nobroadcast(): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_broadcast(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU') + context.set_context(mode=context.GRAPH_MODE, device_target='GPU') x1_np = np.array([[[[0.659578], [0.49113268], @@ -191,7 +191,7 @@ def test_broadcast(): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_broadcast_diff_dims(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU') + context.set_context(mode=context.GRAPH_MODE, device_target='GPU') x1_np = np.array([[[0.275478, 0.48933202, 0.71846116], [0.9803821, 0.57205725, 0.28511533]], @@ -224,7 +224,7 @@ def test_broadcast_diff_dims(): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_broadcast_int32(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU') + context.set_context(mode=context.GRAPH_MODE, device_target='GPU') x1_np = np.random.rand(3, 4).astype(np.int32) x2_np = np.random.rand(3, 4).astype(np.int32) diff --git a/tests/st/ops/gpu/test_relu_v2.py b/tests/st/ops/gpu/test_relu_v2.py index ac279dc2d23..f000531afa2 100644 --- a/tests/st/ops/gpu/test_relu_v2.py +++ b/tests/st/ops/gpu/test_relu_v2.py @@ -38,7 +38,7 @@ class ReluNet(nn.Cell): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_ReluV2(): - context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True) + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], @@ -77,7 +77,7 @@ class AddReluNet(nn.Cell): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_AddRelu(): - context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True) + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") x1 = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], @@ -118,7 +118,7 @@ class AddReluGradNet(nn.Cell): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_AddReluGrad(): - context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True) + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") x = Tensor(np.array([[[[-1, 1, 10], [1, -1, 1], diff --git a/tests/st/ops/gpu/test_smoothl1loss_op.py b/tests/st/ops/gpu/test_smoothl1loss_op.py index 4145f5e971d..f4ef2851f5a 100644 --- a/tests/st/ops/gpu/test_smoothl1loss_op.py +++ b/tests/st/ops/gpu/test_smoothl1loss_op.py @@ -33,7 +33,7 @@ def smoothl1loss(beta): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_smoothl1loss(): - context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True) + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") epsilon = 1e-6 @@ -81,7 +81,7 @@ def smoothl1loss_grad(beta): @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_smoothl1loss_grad(): - context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True) + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") epsilon = 1e-6 diff --git a/tests/st/ops/graph_kernel/test_cast_matmul_fusion.py b/tests/st/ops/graph_kernel/test_cast_matmul_fusion.py index 6612f2b094c..d4b24346b5f 100644 --- a/tests/st/ops/graph_kernel/test_cast_matmul_fusion.py +++ b/tests/st/ops/graph_kernel/test_cast_matmul_fusion.py @@ -36,7 +36,7 @@ class Net(Cell): def get_output(i0, i1, i2, enable_graph_kernel=False): if enable_graph_kernel: - context.set_context(enable_graph_kernel=True, save_graphs=False) + context.set_context(enable_graph_kernel=True) net = Net() output = net(i0, i1, i2) return output diff --git a/tests/st/probability/bnn_layers/test_bnn_layer.py b/tests/st/probability/bnn_layers/test_bnn_layer.py index 5be5100f881..bf4f4895ff4 100644 --- a/tests/st/probability/bnn_layers/test_bnn_layer.py +++ b/tests/st/probability/bnn_layers/test_bnn_layer.py @@ -24,7 +24,7 @@ import mindspore.ops as ops from mindspore import context from dataset import create_dataset -context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU") +context.set_context(mode=context.GRAPH_MODE, device_target="GPU") def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): diff --git a/tests/st/probability/dpn/test_gpu_svi_cvae.py b/tests/st/probability/dpn/test_gpu_svi_cvae.py index 1c08a26af96..de232269668 100644 --- a/tests/st/probability/dpn/test_gpu_svi_cvae.py +++ b/tests/st/probability/dpn/test_gpu_svi_cvae.py @@ -23,7 +23,7 @@ import mindspore.ops as ops from mindspore.nn.probability.dpn import ConditionalVAE from mindspore.nn.probability.infer import ELBO, SVI -context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU") +context.set_context(mode=context.GRAPH_MODE, device_target="GPU") IMAGE_SHAPE = (-1, 1, 32, 32) image_path = os.path.join('/home/workspace/mindspore_dataset/mnist', "train") diff --git a/tests/st/probability/dpn/test_gpu_svi_vae.py b/tests/st/probability/dpn/test_gpu_svi_vae.py index 6a84bed90e6..47a9b7c8ad5 100644 --- a/tests/st/probability/dpn/test_gpu_svi_vae.py +++ b/tests/st/probability/dpn/test_gpu_svi_vae.py @@ -23,7 +23,7 @@ import mindspore.ops as ops from mindspore.nn.probability.dpn import VAE from mindspore.nn.probability.infer import ELBO, SVI -context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU") +context.set_context(mode=context.GRAPH_MODE, device_target="GPU") IMAGE_SHAPE = (-1, 1, 32, 32) image_path = os.path.join('/home/workspace/mindspore_dataset/mnist', "train") diff --git a/tests/st/probability/dpn/test_gpu_vae_gan.py b/tests/st/probability/dpn/test_gpu_vae_gan.py index a606bd12ad7..ab65aefa1d1 100644 --- a/tests/st/probability/dpn/test_gpu_vae_gan.py +++ b/tests/st/probability/dpn/test_gpu_vae_gan.py @@ -25,7 +25,7 @@ import mindspore.ops as ops from mindspore.nn.probability.dpn import VAE from mindspore.nn.probability.infer import ELBO, SVI -context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU") +context.set_context(mode=context.GRAPH_MODE, device_target="GPU") IMAGE_SHAPE = (-1, 1, 32, 32) image_path = os.path.join('/home/workspace/mindspore_dataset/mnist', "train") diff --git a/tests/st/probability/transforms/test_transform_bnn_layer.py b/tests/st/probability/transforms/test_transform_bnn_layer.py index 35c0e9d599e..b44e4760fad 100644 --- a/tests/st/probability/transforms/test_transform_bnn_layer.py +++ b/tests/st/probability/transforms/test_transform_bnn_layer.py @@ -25,7 +25,7 @@ from mindspore import context from dataset import create_dataset -context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU") +context.set_context(mode=context.GRAPH_MODE, device_target="GPU") def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): diff --git a/tests/st/probability/transforms/test_transform_bnn_model.py b/tests/st/probability/transforms/test_transform_bnn_model.py index b03cf173e0f..dadfdc49fcf 100644 --- a/tests/st/probability/transforms/test_transform_bnn_model.py +++ b/tests/st/probability/transforms/test_transform_bnn_model.py @@ -24,7 +24,7 @@ from mindspore import context from dataset import create_dataset -context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU") +context.set_context(mode=context.GRAPH_MODE, device_target="GPU") def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): diff --git a/tests/st/tbe_networks/test_resnet_cifar_8p.py b/tests/st/tbe_networks/test_resnet_cifar_8p.py index 410bda1571f..66298e5be31 100644 --- a/tests/st/tbe_networks/test_resnet_cifar_8p.py +++ b/tests/st/tbe_networks/test_resnet_cifar_8p.py @@ -145,8 +145,7 @@ class LossGet(Callback): def train_process(q, device_id, epoch_size, num_classes, device_num, batch_size, enable_hccl): os.system("mkdir " + str(device_id)) os.chdir(str(device_id)) - context.set_context(mode=context.GRAPH_MODE, - device_target="Ascend", save_graphs=False) + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(device_id=device_id) os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH os.environ['RANK_ID'] = str(device_id) diff --git a/tests/syntax/simple_expression/test_assign.py b/tests/syntax/simple_expression/test_assign.py index 3eb21147729..a2c7164f4c4 100644 --- a/tests/syntax/simple_expression/test_assign.py +++ b/tests/syntax/simple_expression/test_assign.py @@ -19,7 +19,7 @@ import mindspore from mindspore import context from mindspore.common.tensor import Tensor -context.set_context(mode=context.GRAPH_MODE, save_graphs=True, save_graphs_path="graph_paths") +context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): diff --git a/tests/syntax/simple_expression/test_call.py b/tests/syntax/simple_expression/test_call.py index f84f37bb30a..59ae63f8b1f 100644 --- a/tests/syntax/simple_expression/test_call.py +++ b/tests/syntax/simple_expression/test_call.py @@ -22,7 +22,7 @@ from mindspore import context from mindspore.common.tensor import Tensor from mindspore.ops import operations as P -context.set_context(mode=context.GRAPH_MODE, save_graphs=True, save_graphs_path="graph_paths") +context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): diff --git a/tests/syntax/simple_expression/test_compare.py b/tests/syntax/simple_expression/test_compare.py index 3a957d6d9c3..8c18a431997 100644 --- a/tests/syntax/simple_expression/test_compare.py +++ b/tests/syntax/simple_expression/test_compare.py @@ -19,7 +19,7 @@ import mindspore from mindspore import context from mindspore.common.tensor import Tensor -context.set_context(mode=context.GRAPH_MODE, save_graphs=True, save_graphs_path="graph_paths") +context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): diff --git a/tests/syntax/simple_expression/test_invert.py b/tests/syntax/simple_expression/test_invert.py index 78166c9e49b..c9839124a61 100644 --- a/tests/syntax/simple_expression/test_invert.py +++ b/tests/syntax/simple_expression/test_invert.py @@ -17,7 +17,7 @@ import mindspore.nn as nn from mindspore import context -context.set_context(mode=context.GRAPH_MODE, save_graphs=True, save_graphs_path="graph_paths") +context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): diff --git a/tests/syntax/simple_expression/test_parse_exception.py b/tests/syntax/simple_expression/test_parse_exception.py index bdfd60c1538..71ab861714e 100644 --- a/tests/syntax/simple_expression/test_parse_exception.py +++ b/tests/syntax/simple_expression/test_parse_exception.py @@ -7,7 +7,7 @@ from mindspore.common.tensor import Tensor import mindspore.ops as ops import mindspore -context.set_context(mode=context.GRAPH_MODE, save_graphs=True, save_graphs_path="graph_path") +context.set_context(mode=context.GRAPH_MODE) class TestNoReturn(nn.Cell): diff --git a/tests/ut/cpp/ir/clone_test.cc b/tests/ut/cpp/ir/clone_test.cc index f2facabf6a2..269bb5abc89 100644 --- a/tests/ut/cpp/ir/clone_test.cc +++ b/tests/ut/cpp/ir/clone_test.cc @@ -113,7 +113,6 @@ TEST_F(TestCloner, test_clone_closure) { Cloner cl(gs, true); auto g_clone = cl[g]; - draw::Draw("test_clone_closure_g_clone.dot", g_clone); FuncGraphIndex idx2(g_clone, DeepLinkedGraphSearch); std::string name_list = "xy"; @@ -130,10 +129,8 @@ TEST_F(TestCloner, test_clone_lifting) { // parse ast to graph FuncGraphPtr parsed_f = getPyFun(py_code); - draw::Draw("test_clone_before_lifting.dot", parsed_f); auto g_lifting = LiftingClone(parsed_f); - draw::Draw("test_clone_after_lifting.dot", g_lifting); FuncGraphIndex idx(g_lifting); auto g = idx.GetFirstFuncGraph("j"); diff --git a/tests/ut/cpp/operator/grad_implementations_test.cc b/tests/ut/cpp/operator/grad_implementations_test.cc index 9d37d6c4744..39e999366d0 100644 --- a/tests/ut/cpp/operator/grad_implementations_test.cc +++ b/tests/ut/cpp/operator/grad_implementations_test.cc @@ -35,7 +35,6 @@ class TestGradImplementations : public UT::Common { TEST_F(TestGradImplementations, TestGetAugmentedGraph) { FuncGraphPtr fg = ad::g_k_prims.KPrimitive(nullptr, NewValueNode(kPrimScalarMul), nullptr); ASSERT_TRUE(fg != nullptr); - draw::Draw("gradImpl_TestGetAugmentedFuncGraph.dot", fg); auto fg1 = ad::g_k_prims.KPrimitive(nullptr, NewValueNode(kPrimScalarMul), nullptr); diff --git a/tests/ut/cpp/optimizer/ad/kpynative_test.cc b/tests/ut/cpp/optimizer/ad/kpynative_test.cc index 2ffb0ef5dea..21919db3a23 100644 --- a/tests/ut/cpp/optimizer/ad/kpynative_test.cc +++ b/tests/ut/cpp/optimizer/ad/kpynative_test.cc @@ -106,23 +106,17 @@ class TestKPynative : public UT::Common { TEST_F(TestKPynative, test_simple_add) { auto primal_fg = BuildPrimalFuncGraph("test_simple_add"); resource->manager()->KeepRoots({primal_fg}); - ExportIR(primal_fg->ToString() + ".dat", primal_fg); auto bprop_fg = BuildBpropFuncGraph(primal_fg); resource->manager()->KeepRoots({bprop_fg}); - - ExportIR(bprop_fg->ToString() + ".dat", bprop_fg); } TEST_F(TestKPynative, test_stop_gradient) { auto primal_fg = BuildStopGradient("test_stop_gradient"); resource->manager()->KeepRoots({primal_fg}); - ExportIR(primal_fg->ToString() + ".dat", primal_fg); auto bprop_fg = BuildBpropFuncGraph(primal_fg); resource->manager()->KeepRoots({bprop_fg}); - - ExportIR(bprop_fg->ToString() + ".dat", bprop_fg); } } // namespace ad } // namespace mindspore diff --git a/tests/ut/cpp/optimizer/clean_test.cc b/tests/ut/cpp/optimizer/clean_test.cc index c944cd09ba1..545d1e4d398 100644 --- a/tests/ut/cpp/optimizer/clean_test.cc +++ b/tests/ut/cpp/optimizer/clean_test.cc @@ -114,14 +114,8 @@ TEST_F(TestClean, TestEraseClassGetAttr) { ASSERT_EQ(dataclass_count, 1); - // draw func_graph before erase class - draw::Draw("opt_before_erase_class.dot", func_graph); - SimplifyDataStructures(func_graph, manager); - // draw func_graph after erase class - draw::Draw("opt_after_erase_class.dot", func_graph); - int tuple_getitem_count = 0; for (auto node : manager->all_nodes()) { @@ -166,11 +160,7 @@ TEST_F(TestClean, TestEraseClassMakeRecord) { auto manager = Manage(func_graph); - draw::Draw("opt_erase_class_record_before.dot", func_graph); - SimplifyDataStructures(func_graph, manager); - - draw::Draw("opt_erase_class_record_after.dot", func_graph); } TEST_F(TestClean, TestEraseClassPartial) { @@ -207,18 +197,13 @@ TEST_F(TestClean, TestEraseClassPartial) { func_graph->add_parameter(para1); auto manager = Manage(func_graph); - - draw::Draw("opt_erase_class_partial_before.dot", func_graph); SimplifyDataStructures(func_graph, manager); - draw::Draw("opt_erase_class_partial_after.dot", func_graph); } TEST_F(TestClean, TestEraseTuple) { ASSERT_TRUE(nullptr != me_graph); std::shared_ptr manager = Manage(me_graph); - draw::Draw("opt_before_erase_tuple.dot", me_graph); - int abstract_tuple_count = 0; for (auto node : manager->all_nodes()) { @@ -241,8 +226,6 @@ TEST_F(TestClean, TestEraseTuple) { } ASSERT_EQ(abstract_tuple_count, 3); - - draw::Draw("opt_after_erase_tuple.dot", me_graph); } } // namespace opt diff --git a/tests/ut/cpp/optimizer/lib_test.cc b/tests/ut/cpp/optimizer/lib_test.cc index 8fb86f3eacf..f963e496da5 100644 --- a/tests/ut/cpp/optimizer/lib_test.cc +++ b/tests/ut/cpp/optimizer/lib_test.cc @@ -65,11 +65,6 @@ class TestOptLib : public UT::Common { FuncGraphPtr gbefore_clone = BasicClone(gbefore); OptimizerPtr optimizer = std::make_shared("ut_test", std::make_shared()); transform(gbefore_clone, optimizer); - if (save_graphs) { - draw::Draw("before.dot", gbefore); - draw::Draw("after.dot", gbefore_clone); - draw::Draw("expected.dot", gafter); - } return Isomorphic(gbefore_clone, gafter, &equiv_graph, &equiv_node); } bool CheckOpt(FuncGraphPtr before, FuncGraphPtr after, std::vector opts = {}, @@ -563,6 +558,7 @@ TEST_F(TestOptLib, test_reducesum_one) { ASSERT_TRUE(CheckOpt(before4, after3, patterns)); } +#ifndef ENABLE_SECURITY TEST_F(TestOptLib, test_print_tuple_wrapper) { FuncGraphPtr before1 = getPyFun.CallAndParseRet("test_print_tuple_wrapper", "before1"); FuncGraphPtr before2 = getPyFun.CallAndParseRet("test_print_tuple_wrapper", "before2"); @@ -574,6 +570,7 @@ TEST_F(TestOptLib, test_print_tuple_wrapper) { ASSERT_TRUE(CheckOpt(before2, after2, patterns)); ASSERT_TRUE(CheckOpt(before3, before3, patterns)); } +#endif TEST_F(TestOptLib, test_constant_duplicate_mul) { FuncGraphPtr beforell = getPyFun.CallAndParseRet("test_constant_duplicate_mul", "beforell"); diff --git a/tests/ut/cpp/optimizer/opt_test.cc b/tests/ut/cpp/optimizer/opt_test.cc index 4ab829dbd86..c6414ac3c50 100644 --- a/tests/ut/cpp/optimizer/opt_test.cc +++ b/tests/ut/cpp/optimizer/opt_test.cc @@ -185,7 +185,6 @@ TEST_F(TestOptOpt, CSE) { // add func_graph the GraphManager FuncGraphManagerPtr manager1 = Manage(test_graph1); - draw::Draw("opt_cse_before_1.dot", test_graph1); ASSERT_EQ(manager1->all_nodes().size(), 9); @@ -196,20 +195,16 @@ TEST_F(TestOptOpt, CSE) { ASSERT_TRUE(is_changed); ASSERT_EQ(manager1->all_nodes().size(), 8); - draw::Draw("opt_cse_after_1.dot", test_graph1); - // test a more complicated case test_f2 FuncGraphPtr test_graph2 = getPyFun.CallAndParseRet("test_cse", "test_f2"); ASSERT_TRUE(nullptr != test_graph2); FuncGraphManagerPtr manager2 = Manage(test_graph2); - draw::Draw("opt_cse_before_2.dot", test_graph2); ASSERT_EQ(manager2->all_nodes().size(), 16); is_changed = cse->Cse(test_graph2, manager2); ASSERT_TRUE(is_changed); ASSERT_EQ(manager2->all_nodes().size(), 12); - draw::Draw("opt_cse_after_2.dot", test_graph2); } } // namespace opt diff --git a/tests/ut/cpp/optimizer/optimizer_test.cc b/tests/ut/cpp/optimizer/optimizer_test.cc index a5e119cc27b..ead50d58095 100644 --- a/tests/ut/cpp/optimizer/optimizer_test.cc +++ b/tests/ut/cpp/optimizer/optimizer_test.cc @@ -60,9 +60,6 @@ TEST_F(TestOptOptimizer, test_step_opt) { EXPECT_TRUE(optimizer.get() != nullptr); auto after = optimizer->step(before); - - draw::Draw("optimizer_test_expendJ_before.dot", before); - draw::Draw("optimizer_test_expendJ_after.dot", after); } } // namespace opt diff --git a/tests/ut/cpp/parallel/step_parallel_test.cc b/tests/ut/cpp/parallel/step_parallel_test.cc index 1e12153a8e4..73bf4e2ba37 100644 --- a/tests/ut/cpp/parallel/step_parallel_test.cc +++ b/tests/ut/cpp/parallel/step_parallel_test.cc @@ -416,7 +416,6 @@ TEST_F(TestStepParallel, ForwardCommunication1) { PrimitivePtr prim = cnode->input(0)->cast()->value()->cast(); if (prim->name() == "MatMul") { ForwardCommunication(op_list, cnode); - draw::Draw("forwardcommunication.dot", func_graph); } } AnfNodeSet after_nodes = manager->all_nodes(); diff --git a/tests/ut/cpp/pipeline/parse/parser_abnormal_test.cc b/tests/ut/cpp/pipeline/parse/parser_abnormal_test.cc index 2d21b591ea3..33f824ac7c9 100644 --- a/tests/ut/cpp/pipeline/parse/parser_abnormal_test.cc +++ b/tests/ut/cpp/pipeline/parse/parser_abnormal_test.cc @@ -50,14 +50,6 @@ TEST_F(TestParserAbnormal, TestParseRecursion) { bool ret_ = ResolveAll(manager); ASSERT_TRUE(ret_); - - // draw graph - int i = 0; - for (auto tmp : manager->func_graphs()) { - std::string name = "ut_parser_recursion_" + std::to_string(i) + ".dot"; - draw::Draw(name, tmp); - i++; - } } int test_performance(int x) { return x; } @@ -98,14 +90,6 @@ TEST_F(TestParserAbnormal, TestParseExprStatement) { ASSERT_TRUE(ret_); - // draw func graph - int i = 0; - for (auto tmp : manager->func_graphs()) { - std::string name = "ut_parser_ExprStatement_" + std::to_string(i) + ".dot"; - draw::Draw(name, tmp); - i++; - } - // check the 'append' node bool is_append_node = false; int count = 0; diff --git a/tests/ut/cpp/pipeline/parse/parser_class_test.cc b/tests/ut/cpp/pipeline/parse/parser_class_test.cc index 8d9cc8ebc8a..99fe9843526 100644 --- a/tests/ut/cpp/pipeline/parse/parser_class_test.cc +++ b/tests/ut/cpp/pipeline/parse/parser_class_test.cc @@ -74,14 +74,6 @@ TEST_F(TestParserClass, TestParseDataClassApi) { FuncGraphPtr graph_inf = ParsePythonCode(inf_method); ASSERT_TRUE(nullptr != graph_inf); manager->AddFuncGraph(graph_inf); - - // draw graph - int i = 0; - for (auto tmp : manager->func_graphs()) { - std::string name = "ut_parser_class_" + std::to_string(i) + ".dot"; - draw::Draw(name, tmp); - i++; - } } /* # skip ut test cases temporarily @@ -91,7 +83,6 @@ TEST_F(TestParserClass, TestParseMethod) { Parser::InitParserEnvironment(obj_); FuncGraphPtr func_graph = ParsePythonCode(obj_); ASSERT_TRUE(nullptr != func_graph); - draw::Draw("ut_parser_method_x.dot", func_graph); // save the func_graph to manager std::shared_ptr manager = Manage(func_graph); @@ -100,14 +91,6 @@ TEST_F(TestParserClass, TestParseMethod) { bool ret_ = ResolveAll(manager); ASSERT_TRUE(ret_); - - // draw graph - int i = 0; - for (auto tmp : manager->func_graphs()) { - std::string name = "ut_parser_method_" + std::to_string(i) + ".dot"; - draw::Draw(name, tmp); - i++; - } } // Test case 3: common test for debug ptest case diff --git a/tests/ut/cpp/pipeline/parse/parser_integrate_test.cc b/tests/ut/cpp/pipeline/parse/parser_integrate_test.cc index 1f54298a811..8ac9c8c46ff 100644 --- a/tests/ut/cpp/pipeline/parse/parser_integrate_test.cc +++ b/tests/ut/cpp/pipeline/parse/parser_integrate_test.cc @@ -82,7 +82,6 @@ TEST_F(TestParserIntegrate, TestParseGraphTestNone) { TEST_F(TestParserIntegrate, TestParseGraphResolveGetAttr) { getPyFun.SetDoResolve(true); auto func_graph = getPyFun("test_get_attr"); - draw::Draw("getattr.dot", func_graph); ASSERT_TRUE(func_graph != nullptr); } diff --git a/tests/ut/cpp/pipeline/parse/parser_primitive_test.cc b/tests/ut/cpp/pipeline/parse/parser_primitive_test.cc index 937ad1fe5eb..c8d3f887e9a 100644 --- a/tests/ut/cpp/pipeline/parse/parser_primitive_test.cc +++ b/tests/ut/cpp/pipeline/parse/parser_primitive_test.cc @@ -49,14 +49,6 @@ TEST_F(TestParserPrimitive, TestParserOpsMethod1) { bool ret_ = ResolveAll(manager); ASSERT_TRUE(ret_); - - // draw graph - int i = 0; - for (auto tmp : manager->func_graphs()) { - std::string name = "ut_parser_ops_1_" + std::to_string(i) + ".dot"; - draw::Draw(name, tmp); - i++; - } } TEST_F(TestParserPrimitive, TestParserOpsMethod2) { @@ -72,14 +64,6 @@ TEST_F(TestParserPrimitive, TestParserOpsMethod2) { bool ret_ = ResolveAll(manager); ASSERT_TRUE(ret_); - - // draw graph - int i = 0; - for (auto tmp : manager->func_graphs()) { - std::string name = "ut_parser_ops_2_" + std::to_string(i) + ".dot"; - draw::Draw(name, tmp); - i++; - } } // Test primitive class obj @@ -89,7 +73,6 @@ TEST_F(TestParserPrimitive, TestParsePrimitive) { Parser::InitParserEnvironment(obj_); FuncGraphPtr func_graph = ParsePythonCode(obj_); ASSERT_TRUE(nullptr != func_graph); - draw::Draw("ut_parser_primitive_x.dot", func_graph); // save the func_graph to manager std::shared_ptr manager = Manage(func_graph); @@ -98,14 +81,6 @@ TEST_F(TestParserPrimitive, TestParsePrimitive) { bool ret_ = ResolveAll(manager); ASSERT_TRUE(ret_); - - // draw graph - int i = 0; - for (auto tmp : manager->func_graphs()) { - std::string name = "ut_parser_ops_3_" + std::to_string(i) + ".dot"; - draw::Draw(name, tmp); - i++; - } #endif } @@ -116,7 +91,6 @@ TEST_F(TestParserPrimitive, TestParsePrimitiveParmeter) { Parser::InitParserEnvironment(obj_); FuncGraphPtr func_graph = ParsePythonCode(obj_); ASSERT_TRUE(nullptr != func_graph); - draw::Draw("ut_parser_primitive_x.dot", func_graph); // save the func_graph to manager std::shared_ptr manager = Manage(func_graph); @@ -125,14 +99,6 @@ TEST_F(TestParserPrimitive, TestParsePrimitiveParmeter) { bool ret_ = ResolveAll(manager); ASSERT_TRUE(ret_); - - // draw graph - int i = 0; - for (auto tmp : manager->func_graphs()) { - std::string name = "ut_parser_ops_4_" + std::to_string(i) + ".dot"; - draw::Draw(name, tmp); - i++; - } } TEST_F(TestParserPrimitive, TestParsePrimitiveParmeter2) { @@ -140,7 +106,6 @@ TEST_F(TestParserPrimitive, TestParsePrimitiveParmeter2) { Parser::InitParserEnvironment(obj_); FuncGraphPtr func_graph = ParsePythonCode(obj_); ASSERT_TRUE(nullptr != func_graph); - draw::Draw("ut_parser_primitive_x.dot", func_graph); // save the func_graph to manager std::shared_ptr manager = Manage(func_graph); @@ -149,14 +114,6 @@ TEST_F(TestParserPrimitive, TestParsePrimitiveParmeter2) { bool ret_ = ResolveAll(manager); ASSERT_TRUE(ret_); - - // draw graph - int i = 0; - for (auto tmp : manager->func_graphs()) { - std::string name = "ut_parser_ops_5_" + std::to_string(i) + ".dot"; - draw::Draw(name, tmp); - i++; - } } */ diff --git a/tests/ut/cpp/pipeline/parse/parser_test.cc b/tests/ut/cpp/pipeline/parse/parser_test.cc index c365affc9b5..7de484824af 100644 --- a/tests/ut/cpp/pipeline/parse/parser_test.cc +++ b/tests/ut/cpp/pipeline/parse/parser_test.cc @@ -164,14 +164,6 @@ TEST_F(TestParser, TestParseGraphForStatement) { bool ret_ = ResolveAll(manager); ASSERT_TRUE(ret_); - - // draw graph - int i = 0; - for (auto tmp : manager->func_graphs()) { - std::string name = "ut_parser_for_loop_" + std::to_string(i) + ".dot"; - draw::Draw(name, tmp); - i++; - } } TEST_F(TestParser, TestParseGraphCompareExprLt) { @@ -321,14 +313,6 @@ TEST_F(TestParser, TestParseGraphBoolNot) { bool ret_ = ResolveAll(manager); ASSERT_TRUE(ret_); - - // draw graph - int i = 0; - for (auto tmp : manager->func_graphs()) { - std::string name = "ut_parser_for_not_" + std::to_string(i) + ".dot"; - draw::Draw(name, tmp); - i++; - } } TEST_F(TestParser, TestCallPythonFnUseTupleParamete) { diff --git a/tests/ut/cpp/pipeline/parse/resolve_test.cc b/tests/ut/cpp/pipeline/parse/resolve_test.cc index 5f504a477f4..1eb8412d848 100644 --- a/tests/ut/cpp/pipeline/parse/resolve_test.cc +++ b/tests/ut/cpp/pipeline/parse/resolve_test.cc @@ -52,14 +52,6 @@ TEST_F(TestResolve, TestResolveApi) { ASSERT_TRUE(ret_); ASSERT_EQ(manager->func_graphs().size(), (size_t)2); - - // draw graph - int i = 0; - for (auto func_graph : manager->func_graphs()) { - std::string name = "ut_resolve_graph_" + std::to_string(i) + ".dot"; - draw::Draw(name, func_graph); - i++; - } } TEST_F(TestResolve, TestParseGraphTestClosureResolve) { @@ -67,7 +59,6 @@ TEST_F(TestResolve, TestParseGraphTestClosureResolve) { python_adapter::CallPyFn("gtest_input.pipeline.parse.parser_test", "test_reslove_closure", 123); FuncGraphPtr func_graph = ParsePythonCode(test_fn); ASSERT_TRUE(func_graph != nullptr); - draw::Draw("test_reslove_closure.dot", func_graph); // save the func_graph to manager std::shared_ptr manager = Manage(func_graph); @@ -77,14 +68,6 @@ TEST_F(TestResolve, TestParseGraphTestClosureResolve) { ASSERT_TRUE(ret_); ASSERT_EQ(manager->func_graphs().size(), (size_t)2); - - // draw graph - int i = 0; - for (auto func_graph : manager->func_graphs()) { - std::string name = "ut_test_reslove_closure_graph_" + std::to_string(i) + ".dot"; - draw::Draw(name, func_graph); - i++; - } } } // namespace parse } // namespace mindspore diff --git a/tests/ut/cpp/pipeline/static_analysis/evaluator_test.cc b/tests/ut/cpp/pipeline/static_analysis/evaluator_test.cc index 1b94a766ae1..785d8f3c2d4 100644 --- a/tests/ut/cpp/pipeline/static_analysis/evaluator_test.cc +++ b/tests/ut/cpp/pipeline/static_analysis/evaluator_test.cc @@ -136,7 +136,6 @@ TEST_F(TestPartialEvaluator, test_infer_dataclass_resolved) { getPyFun.SetDoResolve(true); FuncGraphPtr func_graph = getPyFun("test_dataclass_fun_sub"); ASSERT_TRUE(nullptr != func_graph); - draw::Draw("test_dataclass_fun_sub.dot", func_graph); AbstractBasePtrList args_spec_list; float x = 5.1; @@ -226,7 +225,6 @@ TEST_F(TestPartialEvaluator, test_infer_construct_sub_unresolved) { getPyFun.SetDoResolve(false); FuncGraphPtr func_graph = getPyFun.CallAndParseRet("test_net_construct_sub"); ASSERT_TRUE(nullptr != func_graph); - draw::Draw("test_infer_simple_net.dot", func_graph); AbstractBasePtrList args_spec_list; double x = 1.2; diff --git a/tests/ut/cpp/pipeline/static_analysis/prim_test.cc b/tests/ut/cpp/pipeline/static_analysis/prim_test.cc index 3212eac0600..71891ea5d8c 100644 --- a/tests/ut/cpp/pipeline/static_analysis/prim_test.cc +++ b/tests/ut/cpp/pipeline/static_analysis/prim_test.cc @@ -276,7 +276,6 @@ TEST_F(TestPrim, test_J_2) { inputs.push_back(jf_jx); CNodePtr cnode_return = func_graph->NewCNode(inputs); func_graph->set_return(cnode_return); - draw::Draw("test_J_2.dot", func_graph); int64_t v1 = 1; AbstractBasePtr abstract_v1 = FromValue(v1, false); @@ -468,7 +467,6 @@ TEST_F(TestPrim, test_relu) { TEST_F(TestPrim, test_relu2) { FuncGraphPtr func_graph = getPyFun("get_relu"); ASSERT_TRUE(func_graph != nullptr); - draw::Draw("test_relu.dot", func_graph); auto arr = ArrayOfTensor(UTPrimUtils::kF32, {3, 4, 5}); auto expected = ArrayOfTensor(UTPrimUtils::kF32, {3, 4, 5}); @@ -597,7 +595,6 @@ TEST_F(TestPrim, test_softmax_cross_entropy_with_logits) { TEST_F(TestPrim, test_tensor_to_scalar_prim) { FuncGraphPtr func_graph = getPyFun("get_tensor_to_scalar"); ASSERT_TRUE(func_graph != nullptr); - draw::Draw("get_tensor_to_scalar.dot", func_graph); auto logits = ArrayOfTensor(UTPrimUtils::kF64, {64, 10}); auto labels = ArrayOfTensor(UTPrimUtils::kF64, {64, 10}); diff --git a/tests/ut/cpp/pipeline/static_analysis/specialize_test.cc b/tests/ut/cpp/pipeline/static_analysis/specialize_test.cc index 0d5f180ccce..542c9826767 100644 --- a/tests/ut/cpp/pipeline/static_analysis/specialize_test.cc +++ b/tests/ut/cpp/pipeline/static_analysis/specialize_test.cc @@ -139,11 +139,7 @@ TEST_F(TestSpecializeGraph, test_specialize1) { args_spec_list.push_back(abstract_v1); args_spec_list.push_back(abstract_v2); AnalysisResult result = engine_->Run(graph_alpha_, args_spec_list); - draw::Draw("befor_graph_alpha.dot", graph_alpha_); FuncGraphPtr new_graph = special_->Run(graph_alpha_, result.context); - if (new_graph) { - draw::Draw("after_graph_alpha.dot", new_graph); - } } class TestSpecializeMetaFuncGraph : public UT::Common { @@ -220,12 +216,7 @@ TEST_F(TestSpecializeMetaFuncGraph, test_specialize) { args_spec_list.push_back(abstract_v1); args_spec_list.push_back(abstract_v2); AnalysisResult result = engine_->Run(graph_, args_spec_list); - - draw::Draw("befor_graph.dot", graph_); FuncGraphPtr new_graph = special_->Run(graph_, result.context); - if (new_graph) { - draw::Draw("after_graph.dot", new_graph); - } } } // namespace abstract diff --git a/tests/ut/cpp/transform/convert_test.cc b/tests/ut/cpp/transform/convert_test.cc index 3f01fdf5c48..c9ae0da9a92 100644 --- a/tests/ut/cpp/transform/convert_test.cc +++ b/tests/ut/cpp/transform/convert_test.cc @@ -72,12 +72,8 @@ bool MakeDfGraph(PrimitivePtr prim, unsigned int nparam) { std::shared_ptr anf_graph = MakeFuncGraph(prim, nparam); std::shared_ptr graph_manager = MakeManager({anf_graph}); - draw::Draw("ut_prim_" + prim->name() + ".dot", anf_graph); - DumpIR("ut_prim_" + prim->name() + ".ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph(prim->name() + ".dot"); if (converter.ErrCode() != 0) { MS_LOG(ERROR) << "DfGraphConvertor convert " << prim->name() << " error, error code is: " << converter.ErrCode(); return false; @@ -99,13 +95,8 @@ TEST_F(TestConvert, TestConvertConv2d) { FuncGraphPtr anf_graph = MakeFuncGraph(conv2d, 2); std::shared_ptr graph_manager = MakeManager({anf_graph}); - - draw::Draw("ut_prim_conv2d1.dot", anf_graph); - DumpIR("ut_prim_conv2d1.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("conv2d.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -115,12 +106,8 @@ TEST_F(TestConvert, TestConvertMaxpooling) { FuncGraphPtr anf_graph = MakeFuncGraph(prim, 5); // ary, ksize, stride, padding, data_format std::shared_ptr graph_manager = MakeManager({anf_graph}); - draw::Draw("ut_prim_maxpooling.dot", anf_graph); - DumpIR("ut_prim_maxpooling.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("maxpooling.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -177,12 +164,8 @@ TEST_F(TestConvert, TestConvertBatchNorm) { anf_graph->set_return(cnode_return); std::shared_ptr graph_manager = MakeManager({anf_graph}); - draw::Draw("ut_prim_batchnorm.dot", anf_graph); - DumpIR("ut_prim_batchnorm.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("batchnrom.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -211,8 +194,6 @@ TEST_F(TestConvert, TestConvertConvBackpropInput) { auto anf_graph = *(manager->func_graphs().begin()); DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - - converter.DrawComputeGraph("Conv2DBackpropInput.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -241,8 +222,6 @@ TEST_F(TestConvert, TestConvertConvBackpropFilter) { auto anf_graph = *(manager->func_graphs().begin()); DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - - converter.DrawComputeGraph("Conv2DBackpropFilter.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -266,8 +245,6 @@ TEST_F(TestConvert, TestConvertReluGrad) { auto anf_graph = *(manager->func_graphs().begin()); DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - - converter.DrawComputeGraph("ReluGrad.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -290,8 +267,6 @@ TEST_F(TestConvert, TestConvertBiasAdd) { auto anf_graph = *(manager->func_graphs().begin()); DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - - converter.DrawComputeGraph("BiasAdd.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -314,8 +289,6 @@ TEST_F(TestConvert, TestConvertBiasAddGrad) { auto anf_graph = *(manager->func_graphs().begin()); DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - - converter.DrawComputeGraph("BiasAddGrad.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -344,8 +317,6 @@ TEST_F(TestConvert, TestConvertMaxPoolGradWithArgmax) { auto anf_graph = *(manager->func_graphs().begin()); DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - - converter.DrawComputeGraph("MaxPoolGradWithArgmax.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -355,13 +326,8 @@ TEST_F(TestConvert, TestConcat) { std::shared_ptr anf_graph = MakeFuncGraph(prim, 2); std::shared_ptr graph_manager = MakeManager({anf_graph}); - - draw::Draw("ut_prim_concat.dot", anf_graph); - DumpIR("ut_prim_concat.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("concat.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -371,13 +337,8 @@ TEST_F(TestConvert, TestGatherV2) { std::shared_ptr anf_graph = MakeFuncGraph(prim, 3); std::shared_ptr graph_manager = MakeManager({anf_graph}); - - draw::Draw("ut_prim_gatherv2.dot", anf_graph); - DumpIR("ut_prim_gatherv2.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("gatherv2.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -387,13 +348,8 @@ TEST_F(TestConvert, TestCast) { std::shared_ptr anf_graph = MakeFuncGraph(prim, 2); std::shared_ptr graph_manager = MakeManager({anf_graph}); - - draw::Draw("ut_prim_cast.dot", anf_graph); - DumpIR("ut_prim_cast.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("cast.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -403,13 +359,8 @@ TEST_F(TestConvert, TestExp) { std::shared_ptr anf_graph = MakeFuncGraph(prim, 1); std::shared_ptr graph_manager = MakeManager({anf_graph}); - - draw::Draw("ut_prim_exp.dot", anf_graph); - DumpIR("ut_prim_exp.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("exp.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -419,13 +370,8 @@ TEST_F(TestConvert, TestFloor) { std::shared_ptr anf_graph = MakeFuncGraph(prim, 1); std::shared_ptr graph_manager = MakeManager({anf_graph}); - - draw::Draw("ut_prim_floor.dot", anf_graph); - DumpIR("ut_prim_floor.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("floor.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -436,12 +382,8 @@ TEST_F(TestConvert, TestGreaterEqual) { std::shared_ptr anf_graph = MakeFuncGraph(prim, 2); std::shared_ptr graph_manager = MakeManager({anf_graph}); - draw::Draw("ut_prim_greater_equal.dot", anf_graph); - DumpIR("ut_prim_greater_equal.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("greater_equal.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -453,12 +395,8 @@ TEST_F(TestConvert, TestLess) { std::shared_ptr anf_graph = MakeFuncGraph(prim, 2); std::shared_ptr graph_manager = MakeManager({anf_graph}); - draw::Draw("ut_prim_less.dot", anf_graph); - DumpIR("ut_prim_less.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("less.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -469,12 +407,8 @@ TEST_F(TestConvert, TestLessEqual) { std::shared_ptr anf_graph = MakeFuncGraph(prim, 2); std::shared_ptr graph_manager = MakeManager({anf_graph}); - draw::Draw("ut_prim_less_equal.dot", anf_graph); - DumpIR("ut_prim_less_equal.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("less_equal.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -485,12 +419,8 @@ TEST_F(TestConvert, TestLogicalNot) { std::shared_ptr anf_graph = MakeFuncGraph(prim, 1); std::shared_ptr graph_manager = MakeManager({anf_graph}); - draw::Draw("ut_prim_logical_not.dot", anf_graph); - DumpIR("ut_prim_logical_not.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("logical_not.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -502,12 +432,8 @@ TEST_F(TestConvert, TestAssignAdd) { std::shared_ptr anf_graph = MakeFuncGraph(prim, 2); std::shared_ptr graph_manager = MakeManager({anf_graph}); - draw::Draw("ut_prim_assign_add.dot", anf_graph); - DumpIR("ut_prim_assign_add.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("assign_add.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } @@ -519,12 +445,8 @@ TEST_F(TestConvert, LogSoftmax) { std::shared_ptr anf_graph = MakeFuncGraph(prim, 1); std::shared_ptr graph_manager = MakeManager({anf_graph}); - draw::Draw("ut_prim_log_softmax.dot", anf_graph); - DumpIR("ut_prim_log_softmax.ir", anf_graph); - DfGraphConvertor converter(anf_graph); auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph(); - converter.DrawComputeGraph("log_softmax.dot"); ASSERT_EQ(converter.ErrCode(), 0); ASSERT_NE(df_graph, nullptr); } diff --git a/tests/ut/cpp/transform/graph_runner_test.cc b/tests/ut/cpp/transform/graph_runner_test.cc index 54ad101a53a..d2951c5c3cb 100644 --- a/tests/ut/cpp/transform/graph_runner_test.cc +++ b/tests/ut/cpp/transform/graph_runner_test.cc @@ -142,9 +142,9 @@ TEST_F(TestGraphRunner, TestRunGraphException) { MeTensorPtr init_tensor_ptr = MakeTensor(kF32, list0); dict["x1"] = init_tensor_ptr; - std::shared_ptr convertor = MakeGeGraph(); - (*convertor).ConvertAllNode().InitParam(dict).BuildGraph(); - auto df_graph = (*convertor).GetComputeGraph(); + std::shared_ptr converter = MakeGeGraph(); + (*converter).ConvertAllNode().InitParam(dict).BuildGraph(); + auto df_graph = (*converter).GetComputeGraph(); graph_manager.AddGraph("test_graph", df_graph); std::initializer_list list1{1, 1, 2, 3}; @@ -176,13 +176,13 @@ TEST_F(TestGraphRunner, TestRunGraph) { DfGraphManager &graph_manager = DfGraphManager::GetInstance(); graph_manager.ClearGraph(); - std::shared_ptr convertor = MakeGeGraph(); + std::shared_ptr converter = MakeGeGraph(); std::map dict; std::initializer_list list0{2, 1, 2, 2}; dict.emplace("x1", MakeTensor(kF32, list0)); - (*convertor).ConvertAllNode().InitParam(dict).BuildGraph(); - graph_manager.AddGraph("test_graph", (*convertor).GetComputeGraph()); + (*converter).ConvertAllNode().InitParam(dict).BuildGraph(); + graph_manager.AddGraph("test_graph", (*converter).GetComputeGraph()); TypePtr type_id = kFloat32; @@ -214,14 +214,13 @@ TEST_F(TestGraphRunner, TestAPI) { DfGraphManager &graph_manager = DfGraphManager::GetInstance(); graph_manager.ClearGraph(); - std::shared_ptr convertor = MakeGeGraph(); + std::shared_ptr converter = MakeGeGraph(); std::map dict; std::initializer_list list0{2, 1, 2, 2}; dict.emplace("x1", MakeTensor(kF32, list0)); - (*convertor).ConvertAllNode().InitParam(dict).BuildGraph(); - (*convertor).DrawComputeGraph("TestGraphRunner_TestAPI_Training.dot"); - graph_manager.AddGraph("fp_bp_subgraph", (*convertor).GetComputeGraph()); + (*converter).ConvertAllNode().InitParam(dict).BuildGraph(); + graph_manager.AddGraph("fp_bp_subgraph", (*converter).GetComputeGraph()); std::initializer_list list1{1, 1, 4, 4}; std::initializer_list list2{2, 3, 4, 5}; diff --git a/tests/ut/python/automl/test_case.py b/tests/ut/python/automl/test_case.py index 39bcebca02e..99746c2b6c2 100644 --- a/tests/ut/python/automl/test_case.py +++ b/tests/ut/python/automl/test_case.py @@ -33,7 +33,7 @@ class Net(nn.Cell): def test_case(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) net = Net() data = Tensor(np.ones((1, 1, 224, 224)), mindspore.float32) idx = Tensor(1, mindspore.int32) diff --git a/tests/ut/python/dtype/test_tuple.py b/tests/ut/python/dtype/test_tuple.py index 3b0a1693d13..0bef45333d1 100644 --- a/tests/ut/python/dtype/test_tuple.py +++ b/tests/ut/python/dtype/test_tuple.py @@ -24,7 +24,7 @@ from tests.mindspore_test_framework.mindspore_test import mindspore_test from tests.mindspore_test_framework.pipeline.forward.compile_forward \ import pipeline_for_compile_forward_ge_graph_for_case_by_case_config -context.set_context(mode=context.GRAPH_MODE, save_graphs=True) +context.set_context(mode=context.GRAPH_MODE) class TupleGraphNet(nn.Cell): diff --git a/tests/ut/python/nn/test_l1_regularizer.py b/tests/ut/python/nn/test_l1_regularizer.py index 6d93b3cbb6c..a9bd33cc27b 100644 --- a/tests/ut/python/nn/test_l1_regularizer.py +++ b/tests/ut/python/nn/test_l1_regularizer.py @@ -19,7 +19,7 @@ import mindspore.nn as nn import mindspore.context as context from mindspore import Tensor, ms_function -context.set_context(mode=context.GRAPH_MODE, save_graphs=True) +context.set_context(mode=context.GRAPH_MODE) class Net_l1_regularizer(nn.Cell): diff --git a/tests/ut/python/ops/test_control_ops.py b/tests/ut/python/ops/test_control_ops.py index 5d1ecbae560..5ba3a283237 100644 --- a/tests/ut/python/ops/test_control_ops.py +++ b/tests/ut/python/ops/test_control_ops.py @@ -1022,7 +1022,7 @@ def test_recursive_call(): out = self.fc(x) return out - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) os.environ['ENV_RECURSIVE_EVAL'] = '1' old_max_call_depth = context.get_context('max_call_depth') context.set_context(max_call_depth=80) diff --git a/tests/ut/python/ops/test_dynamic_shape.py b/tests/ut/python/ops/test_dynamic_shape.py index 23c5f004af1..841a498549c 100755 --- a/tests/ut/python/ops/test_dynamic_shape.py +++ b/tests/ut/python/ops/test_dynamic_shape.py @@ -19,7 +19,7 @@ from mindspore import Tensor, context, nn, Parameter from mindspore import dtype as mstype from mindspore.ops import operations as P -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) def test_sparse_apply_proximal_ada_grad(): diff --git a/tests/ut/python/ops/test_nn_ops.py b/tests/ut/python/ops/test_nn_ops.py index b0d9f3fe6a0..db53ab300b1 100644 --- a/tests/ut/python/ops/test_nn_ops.py +++ b/tests/ut/python/ops/test_nn_ops.py @@ -31,7 +31,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ import pipeline_for_compile_forward_ge_graph_for_case_by_case_config from ....mindspore_test_framework.pipeline.forward.verify_exception \ import pipeline_for_verify_exception_for_case_by_case_config -context.set_context(mode=context.GRAPH_MODE, save_graphs=True) +context.set_context(mode=context.GRAPH_MODE) def conv3x3(in_channels, out_channels, stride=1, padding=1): """3x3 convolution """ @@ -408,7 +408,7 @@ def test_max_pool_with_arg_max(): x = Tensor(np.ones([1, 1, 3, 3], np.float32)) net = NetMaxPoolWithArgMax() - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) ret = net(x) print(ret) diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 9e19e47a8e4..a4a6be30027 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -963,7 +963,7 @@ def test_strided_slice_const(): return out net = StridedSLiceConstNet() - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) x = Tensor(np.ones([6, 7, 8, 9, 10]), mstype.float32) ret = net(x) assert ret.shape == (0, 1, 7, 8, 9, 3, 1) diff --git a/tests/ut/python/ops/test_ops_attr_infer.py b/tests/ut/python/ops/test_ops_attr_infer.py index 70b104b6f4b..f1893628091 100644 --- a/tests/ut/python/ops/test_ops_attr_infer.py +++ b/tests/ut/python/ops/test_ops_attr_infer.py @@ -26,7 +26,7 @@ from mindspore import Tensor from mindspore.ops import functional as F from mindspore.ops import prim_attr_register, PrimitiveWithInfer -context.set_context(mode=context.GRAPH_MODE, save_graphs=True) +context.set_context(mode=context.GRAPH_MODE) class FakeOp(PrimitiveWithInfer): diff --git a/tests/ut/python/ops/test_tensor_getitem.py b/tests/ut/python/ops/test_tensor_getitem.py index b5dc13651cb..93f1d725151 100644 --- a/tests/ut/python/ops/test_tensor_getitem.py +++ b/tests/ut/python/ops/test_tensor_getitem.py @@ -48,7 +48,7 @@ class TensorItemByItem(Cell): def test_tensor_fancy_index_integer_list(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) index = [0, 2, 1] net = NetWorkFancyIndex(index) input_np = np.arange(60).reshape(3, 4, 5) @@ -57,7 +57,7 @@ def test_tensor_fancy_index_integer_list(): def test_tensor_fancy_index_boolean_list(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) index = [True, True, False] net = NetWorkFancyIndex(index) input_np = np.arange(60).reshape(3, 4, 5) @@ -66,7 +66,7 @@ def test_tensor_fancy_index_boolean_list(): def test_tensor_fancy_index_integer_boolean_list_graph(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) index = [1, 2, True, False] net = NetWorkFancyIndex(index) input_np = np.arange(60).reshape(3, 4, 5) @@ -75,7 +75,7 @@ def test_tensor_fancy_index_integer_boolean_list_graph(): def test_tensor_fancy_index_integer_list_mixed(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) index = (1, [2, 1, 3], slice(1, 3, 1), ..., 4) net = NetWorkFancyIndex(index) input_np = np.arange(3*4*5*6*7*8).reshape(3, 4, 5, 6, 7, 8) @@ -84,7 +84,7 @@ def test_tensor_fancy_index_integer_list_mixed(): def test_tensor_fancy_index_integer_tuple_mixed(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) index = (1, (2, 1, 3), slice(1, 3, 1), ..., 4) net = NetWorkFancyIndex(index) input_np = np.arange(3*4*5*6*7*8).reshape(3, 4, 5, 6, 7, 8) @@ -93,7 +93,7 @@ def test_tensor_fancy_index_integer_tuple_mixed(): def test_tensor_fancy_index_integer_list_tuple_mixed(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) index = (1, [2, 1, 3], (3, 2, 1), slice(1, 3, 1), ..., 4) net = NetWorkFancyIndex(index) input_np = np.arange(3*4*5*6*7*8).reshape(3, 4, 5, 6, 7, 8) @@ -102,7 +102,7 @@ def test_tensor_fancy_index_integer_list_tuple_mixed(): def test_tensor_fancy_index_integer_list_tuple_bool_mixed(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) index = (1, [2, 1, 3], True, (3, 2, 1), slice(1, 3, 1), ..., True, 4) net = NetWorkFancyIndex(index) input_np = np.arange(3*4*5*6*7*8).reshape(3, 4, 5, 6, 7, 8) @@ -111,7 +111,7 @@ def test_tensor_fancy_index_integer_list_tuple_bool_mixed(): def test_tensor_fancy_index_integer_list_tuple_bool_mixed_error(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) index = (1, [2, 1, 3], True, (3, 2, 1), slice(1, 3, 1), ..., False, 4) net = NetWorkFancyIndex(index) input_np = np.arange(3*4*5*6*7*8).reshape(3, 4, 5, 6, 7, 8) diff --git a/tests/ut/python/ops/test_tensor_slice.py b/tests/ut/python/ops/test_tensor_slice.py index e761c924dae..1436bc87e1b 100644 --- a/tests/ut/python/ops/test_tensor_slice.py +++ b/tests/ut/python/ops/test_tensor_slice.py @@ -445,7 +445,7 @@ class TensorSetItemByMixedTensors(Cell): def test_tensor_assign(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) net = TensorAssignWithSlice() net2 = TensorAssignWithSlice2() # The test case is no longer appropriate since x[1:3:-1] = np.array(2) does diff --git a/tests/ut/python/optimizer/test_auto_grad.py b/tests/ut/python/optimizer/test_auto_grad.py index b51bfba1f7a..2d73e8b93e7 100644 --- a/tests/ut/python/optimizer/test_auto_grad.py +++ b/tests/ut/python/optimizer/test_auto_grad.py @@ -152,7 +152,7 @@ def test_second_grad_with_j_primitive(): # A CNode being used as FV is MapMorphism after MapMorphism of call-site CNode; def test_ad_fv_cnode_order(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -180,7 +180,7 @@ def test_ad_fv_cnode_order(): # True and False branch of switch have different number of parameters. def test_if_branch_with_different_params(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -217,7 +217,7 @@ def test_if_branch_with_different_params(): # Otherwise, "Illegal AnfNode for evaluating" may be reported # because weight1 in Net may use old_parameter other than replicated one. def test_limit_lift_fv_scope(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() diff --git a/tests/ut/python/optimizer/test_while_ScatterNdUpdate.py b/tests/ut/python/optimizer/test_while_ScatterNdUpdate.py index 4bd21197aca..147efc374fc 100644 --- a/tests/ut/python/optimizer/test_while_ScatterNdUpdate.py +++ b/tests/ut/python/optimizer/test_while_ScatterNdUpdate.py @@ -18,7 +18,7 @@ from mindspore.common import dtype as mstype from mindspore.ops import operations as P -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self, data): diff --git a/tests/ut/python/parallel/__init__.py b/tests/ut/python/parallel/__init__.py index 7f4e7e22ed1..a4096c253e3 100644 --- a/tests/ut/python/parallel/__init__.py +++ b/tests/ut/python/parallel/__init__.py @@ -21,7 +21,7 @@ from mindspore.communication._comm_helper import GlobalComm def setup_module(): auto_parallel_context().set_enable_all_reduce_fusion(enable_all_reduce_fusion=True) - context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False) + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") GlobalComm.INITED = True reset_cost_model_context() reset_algo_parameters() diff --git a/tests/ut/python/parallel/conftest.py b/tests/ut/python/parallel/conftest.py index 847d6216d5c..34a3a1f9e9f 100644 --- a/tests/ut/python/parallel/conftest.py +++ b/tests/ut/python/parallel/conftest.py @@ -24,7 +24,7 @@ from mindspore.parallel.algo_parameter_config import reset_algo_parameters def reset_test_context(): context.reset_auto_parallel_context() auto_parallel_context().set_enable_all_reduce_fusion(enable_all_reduce_fusion=True) - context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False) + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") reset_cost_model_context() reset_algo_parameters() _reset_op_id() diff --git a/tests/ut/python/parallel/test_alltoall.py b/tests/ut/python/parallel/test_alltoall.py index e9d19ceefbe..a24552e351e 100644 --- a/tests/ut/python/parallel/test_alltoall.py +++ b/tests/ut/python/parallel/test_alltoall.py @@ -103,7 +103,7 @@ def all_to_all_common(strategy1): def test_all_to_all(): strategy1 = ((8, 1),) - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) _reset_op_id() strategys = all_to_all_common(strategy1) print(strategys) @@ -116,7 +116,6 @@ def test_all_to_all(): assert v == [[8, 1]] elif re.search('MatMul-op', k) is not None: assert v == [[1, 1], [1, 8]] - context.set_context(save_graphs=False) def test_all_to_all_success(): diff --git a/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py b/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py index d2242c39cad..1af57a624fd 100644 --- a/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py +++ b/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py @@ -105,7 +105,6 @@ class TrainStepWarp(nn.Cell): def test_double_subgraphs(): _set_multi_subgraphs() - context.set_context(save_graphs=False) context.set_auto_parallel_context(device_num=8, global_rank=0) context.set_auto_parallel_context(parallel_mode="auto_parallel") net = TrainStepWarp(NetWithLoss(Net())) @@ -156,7 +155,6 @@ class DatasetLenet(): return self def test_double_subgraphs_train(): - context.set_context(save_graphs=False) context.set_auto_parallel_context(device_num=1, global_rank=0) context.set_auto_parallel_context(parallel_mode="auto_parallel") net = TrainStepWarp(NetWithLoss(Net())) diff --git a/tests/ut/python/parallel/test_auto_parallel_for_loop.py b/tests/ut/python/parallel/test_auto_parallel_for_loop.py index eae68a3499d..b6ffbca9b92 100644 --- a/tests/ut/python/parallel/test_auto_parallel_for_loop.py +++ b/tests/ut/python/parallel/test_auto_parallel_for_loop.py @@ -118,7 +118,6 @@ _w1 = Tensor(np.ones([512, 128]), dtype=ms.float32) def test_auto_parallel(): - context.set_context(save_graphs=False) context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0) net = Full(_w1, 3) net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_auto_parallel_for_loop_multi_subgraph.py b/tests/ut/python/parallel/test_auto_parallel_for_loop_multi_subgraph.py index cb5c87d3ce5..be5a70f8681 100644 --- a/tests/ut/python/parallel/test_auto_parallel_for_loop_multi_subgraph.py +++ b/tests/ut/python/parallel/test_auto_parallel_for_loop_multi_subgraph.py @@ -121,7 +121,6 @@ class TrainStepWarp(nn.Cell): def test_double_subgraphs(): - context.set_context(save_graphs=False) context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0) net = TrainStepWarp(NetWithLoss(Net())) _set_multi_subgraphs() diff --git a/tests/ut/python/parallel/test_auto_parallel_for_loop_reshape.py b/tests/ut/python/parallel/test_auto_parallel_for_loop_reshape.py index 66d05693075..b483b4f35dd 100644 --- a/tests/ut/python/parallel/test_auto_parallel_for_loop_reshape.py +++ b/tests/ut/python/parallel/test_auto_parallel_for_loop_reshape.py @@ -125,7 +125,6 @@ _w1 = Tensor(np.ones([512, 128, 1]), dtype=ms.float32) def test_auto_parallel(): - context.set_context(save_graphs=False) context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0) net = Full(_w1, 3) net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_auto_parallel_for_loop_simplify.py b/tests/ut/python/parallel/test_auto_parallel_for_loop_simplify.py index 16bd8430b6f..a364add6497 100644 --- a/tests/ut/python/parallel/test_auto_parallel_for_loop_simplify.py +++ b/tests/ut/python/parallel/test_auto_parallel_for_loop_simplify.py @@ -83,7 +83,6 @@ _w1 = Tensor(np.ones([512, 128]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=False) learning_rate = 0.1 momentum = 0.9 epoch_size = 2 diff --git a/tests/ut/python/parallel/test_auto_parallel_two_bn.py b/tests/ut/python/parallel/test_auto_parallel_two_bn.py index c0d4c71ad8a..eb080e360c4 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_bn.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_bn.py @@ -68,7 +68,6 @@ def test_two_bn(): out = self.block2(out) return out - context.set_context(save_graphs=False) context.set_auto_parallel_context(device_num=8, global_rank=0) context.set_auto_parallel_context(parallel_mode="auto_parallel") net = NetWithLoss(Net()) diff --git a/tests/ut/python/parallel/test_batchmm.py b/tests/ut/python/parallel/test_batchmm.py index 27cce89d52d..ff34596c807 100644 --- a/tests/ut/python/parallel/test_batchmm.py +++ b/tests/ut/python/parallel/test_batchmm.py @@ -54,7 +54,7 @@ _wo = Tensor(np.ones([48, 64, 16]), dtype=ms.float32) def compile_net(net): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_broadcast_to.py b/tests/ut/python/parallel/test_broadcast_to.py index 69f4f975b94..505ccf6efcf 100644 --- a/tests/ut/python/parallel/test_broadcast_to.py +++ b/tests/ut/python/parallel/test_broadcast_to.py @@ -64,7 +64,7 @@ _x2 = Tensor(np.ones([64, 64]), dtype=ms.float32) def compile_net(net): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() @@ -74,7 +74,7 @@ def compile_net(net): def compile_net2(net): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_concat.py b/tests/ut/python/parallel/test_concat.py index 4c44f2e7424..75d3c6b79ec 100644 --- a/tests/ut/python/parallel/test_concat.py +++ b/tests/ut/python/parallel/test_concat.py @@ -80,7 +80,6 @@ w3 = Tensor(np.ones([64, 64, 32]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=False) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_embeddinglookup.py b/tests/ut/python/parallel/test_embeddinglookup.py index 959d2bef230..998450cf198 100644 --- a/tests/ut/python/parallel/test_embeddinglookup.py +++ b/tests/ut/python/parallel/test_embeddinglookup.py @@ -95,7 +95,6 @@ def test_embeddinglookup_reducescatter_false_grad(): def test_embeddinglookup_reducescatter_true_grad(): - context.set_context(save_graphs=False) shape = [8, 8] offset = 8 net = GradWrap(NetWithLoss(Net(shape, offset))) diff --git a/tests/ut/python/parallel/test_eval.py b/tests/ut/python/parallel/test_eval.py index 0d0ce6de273..764efd9a0e3 100644 --- a/tests/ut/python/parallel/test_eval.py +++ b/tests/ut/python/parallel/test_eval.py @@ -52,7 +52,7 @@ _b = Tensor(np.ones([64, 64]), dtype=ms.float32) def test_train_and_eval(): - context.set_context(save_graphs=False, mode=0) + context.set_context(mode=0) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16) strategy1 = ((4, 4), (4, 4)) strategy2 = ((4, 4),) @@ -69,7 +69,7 @@ def test_train_and_eval(): context.reset_auto_parallel_context() def test_train_and_eval_auto(): - context.set_context(save_graphs=False, mode=0) + context.set_context(mode=0) context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16) strategy1 = ((4, 4), (4, 4)) strategy2 = ((4, 4),) diff --git a/tests/ut/python/parallel/test_full_batch.py b/tests/ut/python/parallel/test_full_batch.py index 6b5e3c65987..35a09c02758 100644 --- a/tests/ut/python/parallel/test_full_batch.py +++ b/tests/ut/python/parallel/test_full_batch.py @@ -69,7 +69,7 @@ def all_to_all_common(strategy1): momentum = 0.9 epoch_size = 2 - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=8, dataset_strategy="full_batch") @@ -96,7 +96,7 @@ def test_data_parallel_mode(): learning_rate = 0.1 momentum = 0.9 epoch_size = 2 - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, full_batch=True) predict = Tensor(np.ones([256, 128]), dtype=ms.float32) diff --git a/tests/ut/python/parallel/test_gather_v2_primitive.py b/tests/ut/python/parallel/test_gather_v2_primitive.py index d307fb7a57e..567f6efc2fa 100644 --- a/tests/ut/python/parallel/test_gather_v2_primitive.py +++ b/tests/ut/python/parallel/test_gather_v2_primitive.py @@ -133,7 +133,7 @@ def net_trains(criterion, rank): max_epoch = 20 input_channels = 256 out_channels = 512 - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=device_number, global_rank=rank) diff --git a/tests/ut/python/parallel/test_gathernd.py b/tests/ut/python/parallel/test_gathernd.py index 2dd16c9ba2f..fa109eb37af 100644 --- a/tests/ut/python/parallel/test_gathernd.py +++ b/tests/ut/python/parallel/test_gathernd.py @@ -64,7 +64,6 @@ _w1 = Tensor(np.ones([128, 64]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=True) learning_rate = 0.1 momentum = 0.9 epoch_size = 2 diff --git a/tests/ut/python/parallel/test_gathernd_further.py b/tests/ut/python/parallel/test_gathernd_further.py index c3fb6a09b21..0182cc90cc9 100644 --- a/tests/ut/python/parallel/test_gathernd_further.py +++ b/tests/ut/python/parallel/test_gathernd_further.py @@ -97,7 +97,6 @@ _b = Tensor(np.ones([1, 16, 32]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=True) learning_rate = 0.1 momentum = 0.9 epoch_size = 2 diff --git a/tests/ut/python/parallel/test_loss_and_o2_level.py b/tests/ut/python/parallel/test_loss_and_o2_level.py index 73a44f46a9f..dc1402c0e43 100755 --- a/tests/ut/python/parallel/test_loss_and_o2_level.py +++ b/tests/ut/python/parallel/test_loss_and_o2_level.py @@ -63,7 +63,6 @@ _w1 = Tensor(np.ones([512, 128]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=False) learning_rate = 0.1 momentum = 0.9 epoch_size = 2 diff --git a/tests/ut/python/parallel/test_loss_scale.py b/tests/ut/python/parallel/test_loss_scale.py index ebf10b68141..bb71d8e553c 100644 --- a/tests/ut/python/parallel/test_loss_scale.py +++ b/tests/ut/python/parallel/test_loss_scale.py @@ -191,7 +191,7 @@ def test_loss_scale(): def test_loss_scale2(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=8) predict = Tensor(np.ones([64, 64]), dtype=ms.float32) label = Tensor(np.ones([64,]), dtype=ms.int32) diff --git a/tests/ut/python/parallel/test_manual_embedding_lookup.py b/tests/ut/python/parallel/test_manual_embedding_lookup.py index 1ee815ffca8..0132e5544e8 100644 --- a/tests/ut/python/parallel/test_manual_embedding_lookup.py +++ b/tests/ut/python/parallel/test_manual_embedding_lookup.py @@ -66,7 +66,6 @@ _b = Tensor(np.ones([8, 8, 8]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=False) optimizer = LazyAdam(net.trainable_params(), learning_rate=0.1) optimizer.sparse_opt.add_prim_attr("primitive_target", "CPU") train_net = TrainOneStepCell(net, optimizer) @@ -113,7 +112,6 @@ def test_normal_split_with_offset(): def test_auto_parallel_error(): - context.set_context(save_graphs=False) context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=2, global_rank=0) net = Net() with pytest.raises(RuntimeError): @@ -121,7 +119,6 @@ def test_auto_parallel_error(): def test_auto_parallel(): - context.set_context(save_graphs=False) context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=2, global_rank=0) net = Net(split_string="fake") compile_net(net) diff --git a/tests/ut/python/parallel/test_manual_gatherv2.py b/tests/ut/python/parallel/test_manual_gatherv2.py index a41bf0a53c1..fe4036ba021 100644 --- a/tests/ut/python/parallel/test_manual_gatherv2.py +++ b/tests/ut/python/parallel/test_manual_gatherv2.py @@ -60,7 +60,6 @@ _b = Tensor(np.ones([64, 8]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=False) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() @@ -106,7 +105,6 @@ def test_normal_split_with_offset(): def test_auto_parallel_error(): - context.set_context(save_graphs=False) context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=2, global_rank=0) net = Net() with pytest.raises(RuntimeError): diff --git a/tests/ut/python/parallel/test_model_with_loss.py b/tests/ut/python/parallel/test_model_with_loss.py index c433ea48629..4a27890dd82 100644 --- a/tests/ut/python/parallel/test_model_with_loss.py +++ b/tests/ut/python/parallel/test_model_with_loss.py @@ -63,7 +63,6 @@ _w1 = Tensor(np.ones([512, 128]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=False) learning_rate = 0.1 momentum = 0.9 epoch_size = 2 diff --git a/tests/ut/python/parallel/test_model_without_loss.py b/tests/ut/python/parallel/test_model_without_loss.py index 140e7da4f8d..82a16b326b0 100644 --- a/tests/ut/python/parallel/test_model_without_loss.py +++ b/tests/ut/python/parallel/test_model_without_loss.py @@ -103,7 +103,6 @@ w3 = Tensor(np.ones([64, 64, 32]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=False) learning_rate = 0.1 momentum = 0.9 epoch_size = 2 diff --git a/tests/ut/python/parallel/test_neighborexchange.py b/tests/ut/python/parallel/test_neighborexchange.py index 472148511df..e3df0a7a8c5 100644 --- a/tests/ut/python/parallel/test_neighborexchange.py +++ b/tests/ut/python/parallel/test_neighborexchange.py @@ -29,7 +29,7 @@ _x2 = Tensor(np.ones([16, 32]), dtype=ms.float32) def compile_net(net): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_train() diff --git a/tests/ut/python/parallel/test_o2_level.py b/tests/ut/python/parallel/test_o2_level.py index 294e7c75411..3a17196aa9c 100644 --- a/tests/ut/python/parallel/test_o2_level.py +++ b/tests/ut/python/parallel/test_o2_level.py @@ -90,7 +90,6 @@ _w2 = Tensor(np.ones([128, 64, 1]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=False) learning_rate = 0.1 momentum = 0.9 epoch_size = 2 diff --git a/tests/ut/python/parallel/test_onehot_2dim.py b/tests/ut/python/parallel/test_onehot_2dim.py index 16b6c9cddb2..c9de95473c6 100644 --- a/tests/ut/python/parallel/test_onehot_2dim.py +++ b/tests/ut/python/parallel/test_onehot_2dim.py @@ -45,7 +45,7 @@ _wi = Tensor(np.ones([48, 16]), dtype=ms.float32) def compile_net(net): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_pack.py b/tests/ut/python/parallel/test_pack.py index 431f80f9f83..d8ef71d9977 100644 --- a/tests/ut/python/parallel/test_pack.py +++ b/tests/ut/python/parallel/test_pack.py @@ -136,7 +136,7 @@ _x_c = Tensor(np.ones([8, 8, 8]), dtype=ms.float32) def compile_net(net): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() @@ -146,7 +146,7 @@ def compile_net(net): def compile_net1(net): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() @@ -156,7 +156,7 @@ def compile_net1(net): def compile_net2(net): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() @@ -166,7 +166,7 @@ def compile_net2(net): def compile_net_con(net): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_parallel_moe.py b/tests/ut/python/parallel/test_parallel_moe.py index 220ac512ab5..d48c11eeaaa 100644 --- a/tests/ut/python/parallel/test_parallel_moe.py +++ b/tests/ut/python/parallel/test_parallel_moe.py @@ -15,7 +15,7 @@ import numpy as np import mindspore.common.dtype as mstype import mindspore.nn as nn -from mindspore import Tensor, context +from mindspore import Tensor from mindspore.context import set_auto_parallel_context, ParallelMode from mindspore.ops import composite as C from mindspore.parallel.nn import Transformer, TransformerOpParallelConfig, MoEConfig @@ -64,7 +64,6 @@ class NetWithLossFiveInputs(nn.Cell): def test_transformer_model(): - context.set_context(save_graphs=True) set_auto_parallel_context(device_num=16, global_rank=0, full_batch=True, enable_alltoall=True, parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL) diff --git a/tests/ut/python/parallel/test_parameter_merge.py b/tests/ut/python/parallel/test_parameter_merge.py index e9e3577601a..0ab39684e2b 100644 --- a/tests/ut/python/parallel/test_parameter_merge.py +++ b/tests/ut/python/parallel/test_parameter_merge.py @@ -79,7 +79,6 @@ def clean_all_ckpt_files(folder_path): def compile_net(net): - context.set_context(save_graphs=False) learning_rate = 0.1 momentum = 0.9 epoch_size = 2 diff --git a/tests/ut/python/parallel/test_range.py b/tests/ut/python/parallel/test_range.py index 40078d4703e..293e8fa86bb 100644 --- a/tests/ut/python/parallel/test_range.py +++ b/tests/ut/python/parallel/test_range.py @@ -74,7 +74,6 @@ _w1 = Tensor(np.ones([64, 8]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=False) learning_rate = 0.1 momentum = 0.9 epoch_size = 2 diff --git a/tests/ut/python/parallel/test_reluv2.py b/tests/ut/python/parallel/test_reluv2.py index e02c950dcb9..209ef95f4f6 100644 --- a/tests/ut/python/parallel/test_reluv2.py +++ b/tests/ut/python/parallel/test_reluv2.py @@ -40,7 +40,7 @@ _x = Tensor(np.ones([32, 16, 48, 64]), dtype=ms.float32) def compile_net(net): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_repeated_calc.py b/tests/ut/python/parallel/test_repeated_calc.py index 52104fbe1fd..1f528e5d8b6 100644 --- a/tests/ut/python/parallel/test_repeated_calc.py +++ b/tests/ut/python/parallel/test_repeated_calc.py @@ -73,7 +73,6 @@ def test_tensoradd_reshape_matmul(): strategy2 = ((8, 1), (1, 8)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - context.set_context(save_graphs=False) x = Tensor(np.ones([32, 8, 16]), dtype=ms.float32) y = Tensor(np.ones([32, 8, 16]), dtype=ms.float32) @@ -99,7 +98,6 @@ def test_two_matmul(): strategy2 = ((8, 1), (1, 1)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - context.set_context(save_graphs=False) x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) diff --git a/tests/ut/python/parallel/test_reshape_optimized.py b/tests/ut/python/parallel/test_reshape_optimized.py index c6d7eb97158..e3830658b3b 100644 --- a/tests/ut/python/parallel/test_reshape_optimized.py +++ b/tests/ut/python/parallel/test_reshape_optimized.py @@ -40,7 +40,6 @@ _b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=False) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_reshape_skip_redistribution.py b/tests/ut/python/parallel/test_reshape_skip_redistribution.py index 872d985d3d6..97648577508 100644 --- a/tests/ut/python/parallel/test_reshape_skip_redistribution.py +++ b/tests/ut/python/parallel/test_reshape_skip_redistribution.py @@ -43,7 +43,6 @@ _x = Tensor(np.ones([64, 64]), dtype=ms.float32) _b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=False) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_scatter_update.py b/tests/ut/python/parallel/test_scatter_update.py index 45c2f58a2ed..1f56fa0cde1 100644 --- a/tests/ut/python/parallel/test_scatter_update.py +++ b/tests/ut/python/parallel/test_scatter_update.py @@ -40,7 +40,7 @@ class Net(nn.Cell): def test_distribute_predict(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, full_batch=True) inputs = Tensor(np.ones([32, 64, 128]).astype(np.float32)) strategy1 = ((1, 2, 4), (1, 1), (1, 1, 2, 4)) @@ -54,7 +54,7 @@ def test_distribute_predict(): def test_scatter_update_wrong_strategy(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, full_batch=True) inputs = Tensor(np.ones([32, 64, 128]).astype(np.float32)) strategy1 = ((1, 2, 4), (1, 1), (1, 1, 4, 2)) @@ -67,7 +67,7 @@ def test_scatter_update_wrong_strategy(): def test_distribute_predict_auto_parallel(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, full_batch=True) inputs = Tensor(np.ones([32, 64, 128]).astype(np.float32)) net = Net() diff --git a/tests/ut/python/parallel/test_select.py b/tests/ut/python/parallel/test_select.py index 2c05e53143a..e03053131b3 100644 --- a/tests/ut/python/parallel/test_select.py +++ b/tests/ut/python/parallel/test_select.py @@ -65,7 +65,6 @@ _w2 = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=True) learning_rate = 0.1 momentum = 0.9 epoch_size = 2 diff --git a/tests/ut/python/parallel/test_split.py b/tests/ut/python/parallel/test_split.py index 677d511765c..72525d99c23 100644 --- a/tests/ut/python/parallel/test_split.py +++ b/tests/ut/python/parallel/test_split.py @@ -75,7 +75,7 @@ _x1 = Tensor(np.ones([48, 64, 32]), dtype=ms.float32) _w2 = Tensor(np.ones([48, 64, 32]), dtype=ms.float32) def compile_net(net): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() @@ -85,7 +85,7 @@ def compile_net(net): def compile_net1(net): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_stridedslice.py b/tests/ut/python/parallel/test_stridedslice.py index 6532b5d351c..e229997bf47 100644 --- a/tests/ut/python/parallel/test_stridedslice.py +++ b/tests/ut/python/parallel/test_stridedslice.py @@ -67,7 +67,6 @@ _b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=False) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_tile.py b/tests/ut/python/parallel/test_tile.py index a53c4fbc100..f25219bcb0a 100644 --- a/tests/ut/python/parallel/test_tile.py +++ b/tests/ut/python/parallel/test_tile.py @@ -78,7 +78,6 @@ _b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) def compile_net(net, x=_b, b=_b): - context.set_context(save_graphs=True) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_topk.py b/tests/ut/python/parallel/test_topk.py index 317da3aa04f..fc853c23b97 100644 --- a/tests/ut/python/parallel/test_topk.py +++ b/tests/ut/python/parallel/test_topk.py @@ -63,7 +63,6 @@ _w1 = Tensor(np.ones([128, 64]), dtype=ms.float32) def compile_net(net): - context.set_context(save_graphs=True) learning_rate = 0.1 momentum = 0.9 epoch_size = 2 diff --git a/tests/ut/python/parallel/test_train_and_eval.py b/tests/ut/python/parallel/test_train_and_eval.py index 1af8d2c27c2..d21bef1197a 100644 --- a/tests/ut/python/parallel/test_train_and_eval.py +++ b/tests/ut/python/parallel/test_train_and_eval.py @@ -52,7 +52,7 @@ _b = Tensor(np.ones([64, 64]), dtype=ms.float32) def test_train_and_eval(): - context.set_context(save_graphs=False, mode=0) + context.set_context(mode=0) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16) strategy1 = ((4, 4), (4, 4)) strategy2 = ((4, 4),) diff --git a/tests/ut/python/parallel/test_two_matmul.py b/tests/ut/python/parallel/test_two_matmul.py index 3505d406558..b2869912fbb 100644 --- a/tests/ut/python/parallel/test_two_matmul.py +++ b/tests/ut/python/parallel/test_two_matmul.py @@ -141,7 +141,6 @@ def test_matmul_forward_reduce_scatter(): return out context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) - context.set_context(save_graphs=False) strategy1 = ((2, 2), (2, 2)) strategy2 = ((4, 2), (4, 2)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) @@ -166,7 +165,6 @@ def test_matmul_forward_reduce_scatter_transpose(): return out context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) - context.set_context(save_graphs=False) strategy1 = ((2, 4), (2, 4)) strategy2 = ((8, 2), (8, 2)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) diff --git a/tests/ut/python/parallel/test_uniform_candidate_sampler.py b/tests/ut/python/parallel/test_uniform_candidate_sampler.py index 405797ba2e8..c29e3d6e8c8 100644 --- a/tests/ut/python/parallel/test_uniform_candidate_sampler.py +++ b/tests/ut/python/parallel/test_uniform_candidate_sampler.py @@ -73,7 +73,7 @@ _x = Tensor(np.ones([48, 16]), dtype=ms.int32) def compile_net(net): - context.set_context(mode=context.GRAPH_MODE, save_graphs=False) + context.set_context(mode=context.GRAPH_MODE) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() diff --git a/tests/ut/python/parallel/test_virtual_dataset_3_input.py b/tests/ut/python/parallel/test_virtual_dataset_3_input.py index 2fc5250ec51..2cdb99f3958 100644 --- a/tests/ut/python/parallel/test_virtual_dataset_3_input.py +++ b/tests/ut/python/parallel/test_virtual_dataset_3_input.py @@ -60,7 +60,6 @@ def test_virtualdataset_cell_3_inputs(): out = self.matmul2(out, b) return out - context.set_context(save_graphs=False) context.set_auto_parallel_context(parallel_mode="auto_parallel") context.set_auto_parallel_context(device_num=8, global_rank=0) net = GradWrap(VirtualDatasetCellTriple(NetWithLoss(Net(None, None, None)))) diff --git a/tests/ut/python/parameter_feature/test_parameter.py b/tests/ut/python/parameter_feature/test_parameter.py index 5577bd1395f..02a64ebd3d4 100644 --- a/tests/ut/python/parameter_feature/test_parameter.py +++ b/tests/ut/python/parameter_feature/test_parameter.py @@ -20,7 +20,7 @@ from mindspore import Tensor, Parameter from mindspore.nn import Cell from mindspore.ops import operations as P -context.set_context(mode=context.GRAPH_MODE, save_graphs=True) +context.set_context(mode=context.GRAPH_MODE) grad_all = C.GradOperation(get_all=True) grad_all_with_sens = C.GradOperation(sens_param=True) diff --git a/tests/ut/python/parameter_feature/test_var_grad.py b/tests/ut/python/parameter_feature/test_var_grad.py index 6b3d05a9781..498f40adb17 100644 --- a/tests/ut/python/parameter_feature/test_var_grad.py +++ b/tests/ut/python/parameter_feature/test_var_grad.py @@ -22,7 +22,7 @@ from mindspore.common.parameter import ParameterTuple from mindspore.nn import Cell from mindspore.ops import operations as P -context.set_context(mode=context.GRAPH_MODE, save_graphs=True) +context.set_context(mode=context.GRAPH_MODE) grad_by_list = C.GradOperation(get_by_list=True) diff --git a/tests/ut/python/pipeline/infer/test_auto_monad.py b/tests/ut/python/pipeline/infer/test_auto_monad.py index a71f340f36c..688b13003a1 100644 --- a/tests/ut/python/pipeline/infer/test_auto_monad.py +++ b/tests/ut/python/pipeline/infer/test_auto_monad.py @@ -13,7 +13,7 @@ from mindspore.common.parameter import Parameter, ParameterTuple grad_all_list = C.GradOperation(get_all=True, get_by_list=True) grad_by_list = C.GradOperation(get_by_list=True) -context.set_context(mode=context.GRAPH_MODE, save_graphs=False) +context.set_context(mode=context.GRAPH_MODE) def test_load_grad(): diff --git a/tests/ut/python/pipeline/parse/test_call_innetr_net_attr.py b/tests/ut/python/pipeline/parse/test_call_innetr_net_attr.py index 8d490a3266a..2f1948d1691 100644 --- a/tests/ut/python/pipeline/parse/test_call_innetr_net_attr.py +++ b/tests/ut/python/pipeline/parse/test_call_innetr_net_attr.py @@ -20,7 +20,7 @@ from mindspore import Tensor, Parameter from mindspore import context from mindspore.ops import composite as C -context.set_context(mode=context.GRAPH_MODE, save_graphs=True) +context.set_context(mode=context.GRAPH_MODE) class InnerInNet(nn.Cell): diff --git a/tests/ut/python/pipeline/parse/test_grammar_constraints.py b/tests/ut/python/pipeline/parse/test_grammar_constraints.py index 98f3503a05c..568a0d4bcc7 100644 --- a/tests/ut/python/pipeline/parse/test_grammar_constraints.py +++ b/tests/ut/python/pipeline/parse/test_grammar_constraints.py @@ -159,7 +159,6 @@ def test_nest_branch_with_return(): else: return 5 - context.set_context(save_graphs=True) net = NetBranchWithReturn() x = Tensor(0, mstype.int32) y = Tensor(5, mstype.int32) diff --git a/tests/ut/python/pipeline/parse/test_use_undefined_name_or_unsupported_builtin_function.py b/tests/ut/python/pipeline/parse/test_use_undefined_name_or_unsupported_builtin_function.py index a48072606ae..f3bb6422da7 100644 --- a/tests/ut/python/pipeline/parse/test_use_undefined_name_or_unsupported_builtin_function.py +++ b/tests/ut/python/pipeline/parse/test_use_undefined_name_or_unsupported_builtin_function.py @@ -20,7 +20,7 @@ from mindspore import Tensor from mindspore import context from mindspore import dtype as mstype -context.set_context(mode=context.GRAPH_MODE, save_graphs=True) +context.set_context(mode=context.GRAPH_MODE) def test_use_undefined_name(): diff --git a/tests/ut/python/pipeline/parse/test_while_param.py b/tests/ut/python/pipeline/parse/test_while_param.py index 7bd7ff96806..7853074b5d8 100644 --- a/tests/ut/python/pipeline/parse/test_while_param.py +++ b/tests/ut/python/pipeline/parse/test_while_param.py @@ -36,7 +36,7 @@ class WhileSubGraphParam(Cell): def test_while_loop_phi(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) x = Tensor(0, ms.float32) y = Tensor(10, ms.float32) z = Tensor(100, ms.float32) @@ -60,7 +60,7 @@ class WhileSubGraphParam2(Cell): def test_while_loop_phi_2(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) x = Tensor(0, ms.float32) y = Tensor(10, ms.float32) z = Tensor(100, ms.float32) @@ -85,7 +85,7 @@ class WhileSubGraphParam3(Cell): def test_while_loop_phi_3(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) x = Tensor(0, ms.float32) net = WhileSubGraphParam3(x) diff --git a/tests/ut/python/pynative_mode/test_backend.py b/tests/ut/python/pynative_mode/test_backend.py index 537c58d07aa..a61072ea5ad 100644 --- a/tests/ut/python/pynative_mode/test_backend.py +++ b/tests/ut/python/pynative_mode/test_backend.py @@ -23,6 +23,7 @@ from mindspore._checkparam import args_type_check from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.ops import operations as P +from tests.security_utils import security_off_wrap def setup_module(): @@ -50,7 +51,7 @@ def test_vm_backend(): output = add() assert output.asnumpy().shape == (1, 3, 3, 4) - +@security_off_wrap def test_vm_set_context(): """ test_vm_set_context """ context.set_context(save_graphs=True, save_graphs_path="mindspore_ir_path", mode=context.GRAPH_MODE) diff --git a/tests/ut/python/pynative_mode/test_context.py b/tests/ut/python/pynative_mode/test_context.py index 0301385f9f7..d3171c33385 100644 --- a/tests/ut/python/pynative_mode/test_context.py +++ b/tests/ut/python/pynative_mode/test_context.py @@ -19,6 +19,8 @@ import json import pytest from mindspore import context +from mindspore._c_expression import security +from tests.security_utils import security_off_wrap # pylint: disable=W0212 @@ -38,6 +40,22 @@ def test_contex_create_context(): context._k_context = None +def test_set_save_graphs_in_security(): + """ test set save_graphs in the security mode""" + if security.enable_security(): + with pytest.raises(ValueError) as err: + context.set_context(save_graphs=True) + assert "not supported" in str(err.value) + + +def test_set_save_graphs_path_in_security(): + """ test set save_graphs_path in the security mode""" + if security.enable_security(): + with pytest.raises(ValueError) as err: + context.set_context(save_graphs_path="ir_files") + assert "not supported" in str(err.value) + + def test_switch_mode(): """ test_switch_mode """ context.set_context(mode=context.GRAPH_MODE) @@ -132,6 +150,7 @@ def test_print_file_path(): context.set_context(print_file_path="./") +@security_off_wrap def test_set_context(): """ test_set_context """ context.set_context.__wrapped__(device_id=0) diff --git a/tests/ut/python/pynative_mode/test_insert_grad_of.py b/tests/ut/python/pynative_mode/test_insert_grad_of.py index 0b9ad0b27d0..d49eab505a7 100644 --- a/tests/ut/python/pynative_mode/test_insert_grad_of.py +++ b/tests/ut/python/pynative_mode/test_insert_grad_of.py @@ -111,7 +111,7 @@ def test_print_shape_type(): def test_cell_assign(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) class GradNetWrap(nn.Cell): """ GradNetWrap definition """ diff --git a/tests/ut/python/pynative_mode/test_multigraph_sink.py b/tests/ut/python/pynative_mode/test_multigraph_sink.py index cdf246e29dc..65c75b7a942 100644 --- a/tests/ut/python/pynative_mode/test_multigraph_sink.py +++ b/tests/ut/python/pynative_mode/test_multigraph_sink.py @@ -20,7 +20,7 @@ from mindspore.common.tensor import Tensor def setup_module(module): - context.set_context(mode=context.PYNATIVE_MODE, save_graphs=False, device_target="Ascend") + context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") c1 = Tensor([2], mstype.int32) diff --git a/tests/ut/python/pynative_mode/test_tuple_parameter.py b/tests/ut/python/pynative_mode/test_tuple_parameter.py index 169ec67c218..d971dc2907a 100644 --- a/tests/ut/python/pynative_mode/test_tuple_parameter.py +++ b/tests/ut/python/pynative_mode/test_tuple_parameter.py @@ -12,7 +12,7 @@ def setup_module(module): class Block1(nn.Cell): - """ Define Cell with tuple input as paramter.""" + """ Define Cell with tuple input as parameter.""" def __init__(self): super(Block1, self).__init__() @@ -60,7 +60,6 @@ class Net2(nn.Cell): return res def test_net(): - context.set_context(save_graphs=True) x = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32) * 2) y = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32) * 3) net1 = Net1() diff --git a/tests/ut/python/utils/test_initializer.py b/tests/ut/python/utils/test_initializer.py index 427a035ce40..50a25d0c683 100644 --- a/tests/ut/python/utils/test_initializer.py +++ b/tests/ut/python/utils/test_initializer.py @@ -246,7 +246,7 @@ class Net(nn.Cell): def test_weight_shape(): - context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + context.set_context(mode=context.GRAPH_MODE) a = np.arange(20).reshape(5, 4) t = Tensor(a, dtype=ms.float32) net = Net()