forked from mindspore-Ecosystem/mindspore
!20585 add security isolate for save_graphs
Merge pull request !20585 from huanghui/add-security-isolate-for-DumpIR
This commit is contained in:
commit
bbdacd41f4
|
@ -275,12 +275,14 @@ void AscendMixPrecision(const std::shared_ptr<session::KernelGraph> &kernel_grap
|
|||
void AscendBackendIRFusionOptimization(const std::shared_ptr<session::KernelGraph> &kernel_graph) {
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
if (save_graphs) {
|
||||
std::string file_name = "hwopt_d_ir_fusion_before_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, kernel_graph);
|
||||
DumpIRProto(kernel_graph, "before_hwopt_" + std::to_string(kernel_graph->graph_id()));
|
||||
}
|
||||
#endif
|
||||
auto optimizer = std::make_shared<GraphOptimizer>();
|
||||
auto ir_fusion_pm = std::make_shared<PassManager>("ir_fusion_pm");
|
||||
ir_fusion_pm->AddPass(std::make_shared<BnSplit>());
|
||||
|
@ -307,10 +309,12 @@ void AscendBackendIRFusionOptimization(const std::shared_ptr<session::KernelGrap
|
|||
optimizer->AddPassManager(ir_fusion_pm);
|
||||
(void)optimizer->Optimize(kernel_graph);
|
||||
kernel_graph->SetExecOrderByDefault();
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (save_graphs) {
|
||||
std::string file_name = "hwopt_d_ir_fusion_after_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, kernel_graph);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void RunOpAscendBackendIRFusionOptimization(const std::shared_ptr<session::KernelGraph> &kernel_graph) {
|
||||
|
@ -320,10 +324,12 @@ void RunOpAscendBackendIRFusionOptimization(const std::shared_ptr<session::Kerne
|
|||
MS_LOG(INFO) << "IRFusion is not enable, skip";
|
||||
return;
|
||||
}
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
if (save_graphs) {
|
||||
DumpIR("hwopt_d_ir_fusion_before.ir", kernel_graph);
|
||||
}
|
||||
#endif
|
||||
auto optimizer = std::make_shared<GraphOptimizer>();
|
||||
auto ir_fusion_pm = std::make_shared<PassManager>("ir_fusion_pm");
|
||||
ir_fusion_pm->AddPass(std::make_shared<InsertPlaceholderForDynamicRNN>());
|
||||
|
@ -354,9 +360,11 @@ void RunOpAscendBackendIRFusionOptimization(const std::shared_ptr<session::Kerne
|
|||
optimizer->AddPassManager(ir_fusion_pm);
|
||||
(void)optimizer->Optimize(kernel_graph);
|
||||
kernel_graph->SetExecOrderByDefault();
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (save_graphs) {
|
||||
DumpIR("hwopt_d_ir_fusion_after.ir", kernel_graph);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void RunOpAscendBackendOptimization(const std::shared_ptr<session::KernelGraph> &kernel_graph) {
|
||||
|
@ -376,11 +384,13 @@ void RunOpAscendBackendOptimization(const std::shared_ptr<session::KernelGraph>
|
|||
void AscendBackendOptimization(const std::shared_ptr<session::KernelGraph> &kernel_graph) {
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
if (save_graphs) {
|
||||
std::string file_name = "hwopt_d_before_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, kernel_graph);
|
||||
}
|
||||
#endif
|
||||
// data layout optimization
|
||||
AscendDataLayout(kernel_graph);
|
||||
// mixed precision optimization
|
||||
|
@ -426,13 +436,13 @@ void AscendBackendOptimization(const std::shared_ptr<session::KernelGraph> &kern
|
|||
const std::vector<CNodePtr> &exec_order = kernel_graph->execution_order();
|
||||
std::string exec_order_name = "graph_exec_order." + std::to_string(kernel_graph->graph_id());
|
||||
(void)mindspore::RDR::RecordGraphExecOrder(SubModuleId::SM_OPTIMIZER, exec_order_name, exec_order);
|
||||
#endif
|
||||
if (save_graphs) {
|
||||
std::string file_name = "hwopt_d_end_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, kernel_graph, true, kWholeStack);
|
||||
DumpIRProto(kernel_graph, "after_hwopt_" + std::to_string(kernel_graph->graph_id()));
|
||||
kernel_graph->DumpFuncGraph("hwopt_d_end");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void AscendBackendUBFusionOptimization(const std::shared_ptr<session::KernelGraph> &kernel_graph) {
|
||||
|
@ -455,11 +465,13 @@ void AscendBackendUBFusionOptimization(const std::shared_ptr<session::KernelGrap
|
|||
build_manager.AscendPreBuild(kernel_graph);
|
||||
}
|
||||
}
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
if (save_graphs) {
|
||||
std::string file_name = "hwopt_d_ub_fusion_before_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, kernel_graph);
|
||||
}
|
||||
#endif
|
||||
auto fusion_id_allocator = std::make_shared<FusionIdAllocator>();
|
||||
MS_EXCEPTION_IF_NULL(fusion_id_allocator);
|
||||
fusion_id_allocator->Init();
|
||||
|
@ -488,10 +500,12 @@ void AscendBackendUBFusionOptimization(const std::shared_ptr<session::KernelGrap
|
|||
optimizer->AddPassManager(ub_fusion_pm);
|
||||
(void)optimizer->Optimize(kernel_graph);
|
||||
kernel_graph->SetExecOrderByDefault();
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (save_graphs) {
|
||||
std::string file_name = "hwopt_d_ub_fusion_after_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, kernel_graph);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
} // namespace opt
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -35,6 +35,7 @@ namespace opt {
|
|||
void BackendCommonOptimization(const std::shared_ptr<session::KernelGraph> &kernel_graph) {
|
||||
MS_EXCEPTION_IF_NULL(kernel_graph);
|
||||
MS_LOG(INFO) << "start common opt graph:" << kernel_graph->graph_id();
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
|
@ -42,6 +43,7 @@ void BackendCommonOptimization(const std::shared_ptr<session::KernelGraph> &kern
|
|||
std::string file_name = "hwopt_common_before_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, kernel_graph);
|
||||
}
|
||||
#endif
|
||||
auto optimizer = std::make_shared<GraphOptimizer>();
|
||||
auto common_pm = std::make_shared<PassManager>("common_pm");
|
||||
common_pm->AddPass(std::make_shared<ConvertConstInputToAttr>());
|
||||
|
@ -55,10 +57,12 @@ void BackendCommonOptimization(const std::shared_ptr<session::KernelGraph> &kern
|
|||
optimizer->AddPassManager(common_pm);
|
||||
(void)optimizer->Optimize(kernel_graph);
|
||||
kernel_graph->SetExecOrderByDefault();
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (save_graphs) {
|
||||
std::string file_name = "hwopt_common_after_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, kernel_graph);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void CommonFinalOptimization(const std::shared_ptr<session::KernelGraph> &kernel_graph) {
|
||||
|
@ -70,6 +74,7 @@ void CommonFinalOptimization(const std::shared_ptr<session::KernelGraph> &kernel
|
|||
optimizer->AddPassManager(pm);
|
||||
(void)optimizer->Optimize(kernel_graph);
|
||||
kernel_graph->SetExecOrderByDefault();
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
// Dump IR if save_graphs is set.
|
||||
auto context = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context);
|
||||
|
@ -78,11 +83,13 @@ void CommonFinalOptimization(const std::shared_ptr<session::KernelGraph> &kernel
|
|||
std::string filename = "hwopt_common_final_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir";
|
||||
DumpIR(filename, kernel_graph);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void CommonUnifyMindIROptimization(const std::shared_ptr<session::KernelGraph> &kernel_graph) {
|
||||
MS_EXCEPTION_IF_NULL(kernel_graph);
|
||||
MS_LOG(INFO) << "start common unify mindir opt graph:" << kernel_graph->graph_id();
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
|
@ -91,16 +98,19 @@ void CommonUnifyMindIROptimization(const std::shared_ptr<session::KernelGraph> &
|
|||
"hwopt_common_unify_mindir_before_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, kernel_graph);
|
||||
}
|
||||
#endif
|
||||
auto opt = std::make_shared<GraphOptimizer>();
|
||||
auto pm = std::make_shared<PassManager>("common_unify_mindir_pm");
|
||||
pm->AddPass(std::make_shared<ConvTransposeToConvBackpropInputPass>());
|
||||
opt->AddPassManager(pm);
|
||||
(void)opt->Optimize(kernel_graph);
|
||||
kernel_graph->SetExecOrderByDefault();
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (save_graphs) {
|
||||
std::string file_name = "hwopt_common_unify_mindir_after_graph_" + std::to_string(kernel_graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, kernel_graph);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
} // namespace opt
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -126,6 +126,7 @@ std::string PassManager::GetPassFullname(size_t pass_id, const PassPtr &pass) co
|
|||
}
|
||||
|
||||
void PassManager::DumpPassIR(const FuncGraphPtr &func_graph, const std::string &pass_fullname) const {
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
|
@ -137,6 +138,7 @@ void PassManager::DumpPassIR(const FuncGraphPtr &func_graph, const std::string &
|
|||
oss << pass_fullname + ".ir";
|
||||
DumpIR(oss.str(), func_graph, true);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool PassManager::Run(const FuncGraphPtr &func_graph, const std::vector<PassPtr> &passes) const {
|
||||
|
@ -149,7 +151,9 @@ bool PassManager::Run(const FuncGraphPtr &func_graph, const std::vector<PassPtr>
|
|||
if (pass != nullptr) {
|
||||
pass->SetCacheManager(cache_manager_);
|
||||
changed = RunPass(func_graph, num, pass) || changed;
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
DumpPassIR(func_graph, GetPassFullname(num, pass));
|
||||
#endif
|
||||
num++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,6 +58,7 @@ const char OUTPUT[] = "output";
|
|||
// Attribute to indicate that the node is last node in an iteration.
|
||||
const char ITEREND[] = "PROFILING_ITER_END";
|
||||
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
bool IsSaveGraph() {
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
|
@ -85,8 +86,10 @@ void DumpGraphForDebug(NotNull<KernelGraphPtr> kg) {
|
|||
DumpAllGraphs(kg, &memo);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void DumpExecuteOrder(NotNull<KernelGraphPtr> kg) {
|
||||
#ifndef ENABLE_SECURITY
|
||||
if (!IsSaveGraph()) {
|
||||
return;
|
||||
}
|
||||
|
@ -135,6 +138,7 @@ void DumpExecuteOrder(NotNull<KernelGraphPtr> kg) {
|
|||
index++;
|
||||
}
|
||||
fout.close();
|
||||
#endif
|
||||
}
|
||||
|
||||
// Return kNoLabel when label id attribute not set for the graph.
|
||||
|
@ -1859,7 +1863,9 @@ void AscendAutoMonad::Run() {
|
|||
kernel_graph_->set_recursive_call(context.HasRecursiveCall());
|
||||
kernel_graph_->set_subgraph_multi_call(context.HasSubgraphMultiCall());
|
||||
MS_LOG(DEBUG) << "Ascend auto-monad finish.";
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
DumpGraphForDebug(kernel_graph_);
|
||||
#endif
|
||||
}
|
||||
|
||||
void AscendAutoMonad::GenerateExecuteOrder() {
|
||||
|
@ -1868,7 +1874,9 @@ void AscendAutoMonad::GenerateExecuteOrder() {
|
|||
ExecuteOrderGenerator generator(context, kernel_graph_.get());
|
||||
generator.Run();
|
||||
MS_LOG(DEBUG) << "Ascend generate execute order finish.";
|
||||
#ifndef ENABLE_SECURITY
|
||||
DumpExecuteOrder(kernel_graph_);
|
||||
#endif
|
||||
}
|
||||
} // namespace session
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -337,12 +337,14 @@ void AscendSession::UnifyMindIR(const KernelGraphPtr &graph) {
|
|||
SessionBasic::UnifyMindIR(graph);
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
if (save_graphs) {
|
||||
std::string file_name = "hwopt_d_before_unify_mindir_graph_" + std::to_string(graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, graph);
|
||||
DumpIRProto(graph, "before_unify_mindir_hwopt_" + std::to_string(graph->graph_id()));
|
||||
}
|
||||
#endif
|
||||
auto optimizer = std::make_shared<opt::GraphOptimizer>();
|
||||
auto unify_mindir_pm = std::make_shared<opt::PassManager>("unify_mindir_pm");
|
||||
unify_mindir_pm->AddPass(std::make_shared<opt::SpaceToBatchNDAttrUpdate>());
|
||||
|
@ -384,10 +386,12 @@ void AscendSession::UnifyMindIR(const KernelGraphPtr &graph) {
|
|||
optimizer->AddPassManager(unify_mindir_pm);
|
||||
(void)optimizer->Optimize(graph);
|
||||
graph->SetExecOrderByDefault();
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (save_graphs) {
|
||||
std::string file_name = "hwopt_d_after_unify_mindir_graph_" + std::to_string(graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, graph);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void AscendSession::LoadInputData(const std::shared_ptr<KernelGraph> &kernel_graph,
|
||||
|
@ -533,6 +537,7 @@ GraphId AscendSession::CompileGraphImpl(NotNull<FuncGraphPtr> func_graph) {
|
|||
AnfAlgo::InsertMakeTupleForOutput(NOT_NULL(root_graph));
|
||||
// root root_graph valiate,include genearte execute order and so on
|
||||
RootGraphExecutorValidate(NOT_NULL(root_graph));
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
// dump graph before remove nop nodes
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
|
@ -540,6 +545,7 @@ GraphId AscendSession::CompileGraphImpl(NotNull<FuncGraphPtr> func_graph) {
|
|||
if (save_graphs) {
|
||||
DumpIRProto(root_graph, "before_removeNop_" + std::to_string(graph_sum_));
|
||||
}
|
||||
#endif
|
||||
|
||||
// adjust kernel
|
||||
AdjustKernel(root_graph);
|
||||
|
@ -659,6 +665,7 @@ void AscendSession::CompileChildGraph(const KernelGraphPtr &child_graph) {
|
|||
MS_LOG(INFO) << "CompileChildGraph " << child_graph->ToString();
|
||||
opt::AscendBackendIRFusionOptimization(child_graph);
|
||||
child_graph->SetExecOrderByDefault();
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
|
@ -666,12 +673,15 @@ void AscendSession::CompileChildGraph(const KernelGraphPtr &child_graph) {
|
|||
std::string file_name = "select_kernel_before_graph_" + std::to_string(child_graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, child_graph);
|
||||
}
|
||||
#endif
|
||||
// select kernel build info
|
||||
SelectKernel(*child_graph);
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (save_graphs) {
|
||||
std::string file_name = "select_kernel_after_graph_" + std::to_string(child_graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, child_graph);
|
||||
}
|
||||
#endif
|
||||
// optimize graph
|
||||
HardwareOptimize(child_graph);
|
||||
// assign static memory of parameters
|
||||
|
@ -1200,12 +1210,14 @@ void AscendSession::AdjustKernel(const std::shared_ptr<KernelGraph> &kernel_grap
|
|||
BuildKernel(kernel_graph);
|
||||
device::ascend::KernelBuildPreprocess(kernel_graph.get());
|
||||
device::KernelAdjust::GetInstance().InsertSwitchLoop(kernel_graph);
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
if (save_graphs) {
|
||||
DumpIR("after_adjust_kernel.ir", kernel_graph);
|
||||
}
|
||||
#endif
|
||||
MS_LOG(INFO) << "Finish!";
|
||||
}
|
||||
|
||||
|
@ -1681,7 +1693,7 @@ void AscendSession::IrFusionPass(const NotNull<KernelGraphPtr> graph, NotNull<st
|
|||
memo->insert(graph.get());
|
||||
opt::AscendBackendIRFusionOptimization(graph);
|
||||
graph->SetExecOrderByDefault();
|
||||
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
|
@ -1689,6 +1701,7 @@ void AscendSession::IrFusionPass(const NotNull<KernelGraphPtr> graph, NotNull<st
|
|||
std::string file_name = "select_kernel_before_graph_" + std::to_string(graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, graph.get());
|
||||
}
|
||||
#endif
|
||||
|
||||
for (auto &child_graph : graph->child_graph_order()) {
|
||||
IrFusionPass(NOT_NULL(child_graph.lock()), memo);
|
||||
|
@ -1747,7 +1760,7 @@ void AscendSession::RecurseSelectKernelInfo(NotNull<KernelGraphPtr> graph,
|
|||
(*reduce_precision_count)++;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
|
@ -1755,6 +1768,7 @@ void AscendSession::RecurseSelectKernelInfo(NotNull<KernelGraphPtr> graph,
|
|||
std::string file_name = "select_kernel_after_graph_" + std::to_string(graph->graph_id()) + ".ir";
|
||||
DumpIR(file_name, graph.get());
|
||||
}
|
||||
#endif
|
||||
MS_LOG(INFO) << "Finish selecting kernel info in graph: " << graph->graph_id();
|
||||
}
|
||||
|
||||
|
|
|
@ -401,17 +401,19 @@ GraphId GPUSession::CompileGraphImpl(KernelGraphPtr graph) {
|
|||
// Prepare ms context info for dump .pb graph
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_);
|
||||
MS_EXCEPTION_IF_NULL(runtime_instance);
|
||||
#ifndef ENABLE_SECURITY
|
||||
auto &json_parser = DumpJsonParser::GetInstance();
|
||||
json_parser.Parse();
|
||||
#endif
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
// Dump .pb graph before graph optimization
|
||||
if (save_graphs) {
|
||||
DumpIRProto(graph, "before_opt_" + std::to_string(graph->graph_id()));
|
||||
}
|
||||
#endif
|
||||
// Graph optimization irrelevant to device data format
|
||||
Optimize(graph);
|
||||
// Select kernel build info
|
||||
|
@ -429,10 +431,12 @@ GraphId GPUSession::CompileGraphImpl(KernelGraphPtr graph) {
|
|||
#endif
|
||||
// Assign CUDA streams
|
||||
AssignStream(graph);
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
// Dump .pb graph before remove nop nodes
|
||||
if (save_graphs) {
|
||||
DumpIRProto(graph, "before_removeNop_" + std::to_string(graph->graph_id()));
|
||||
}
|
||||
#endif
|
||||
// Update Graph Dynamic Shape Attr.
|
||||
UpdateGraphDynamicShapeAttr(NOT_NULL(graph));
|
||||
graph->UpdateGraphDynamicAttr();
|
||||
|
@ -454,9 +458,11 @@ GraphId GPUSession::CompileGraphImpl(KernelGraphPtr graph) {
|
|||
// Get summary nodes.
|
||||
SetSummaryNodes(graph.get());
|
||||
// Dump .pb graph after graph optimization
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (save_graphs) {
|
||||
DumpIRProto(graph, "after_opt_" + std::to_string(graph->graph_id()));
|
||||
}
|
||||
#endif
|
||||
#ifndef ENABLE_SECURITY
|
||||
if (json_parser.e2e_dump_enabled()) {
|
||||
graph->set_root_graph_id(graph->graph_id());
|
||||
|
|
|
@ -1566,7 +1566,9 @@ std::shared_ptr<KernelGraph> SessionBasic::ConstructKernelGraph(const FuncGraphP
|
|||
}
|
||||
// Create cnode
|
||||
if (!CreateCNodeOfKernelGraph(node, graph.get())) {
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
DumpIR("construct_kernel_graph_fail.ir", func_graph);
|
||||
#endif
|
||||
MS_LOG(EXCEPTION) << "Construct func graph " << func_graph->ToString() << " failed."
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
|
|
|
@ -429,12 +429,14 @@ void GetEvalStackInfo(std::ostringstream &oss) {
|
|||
MS_LOG(INFO) << "Length of analysis information stack is empty.";
|
||||
return;
|
||||
}
|
||||
oss << "\nThe function call stack";
|
||||
#ifndef ENABLE_SECURITY
|
||||
std::string file_name = GetEvalFailDatPath();
|
||||
auto ret = OutputAnalyzedGraphWithType(file_name);
|
||||
oss << "\nThe function call stack";
|
||||
if (ret) {
|
||||
oss << " (See file '" << file_name << "' for more details)";
|
||||
}
|
||||
#endif
|
||||
oss << ":\n";
|
||||
|
||||
int index = 0;
|
||||
|
|
|
@ -761,7 +761,9 @@ CNodePtr GetJUser(const NodeUsersMap &node_user_map, const CNodePtr &cnode, int
|
|||
for (auto &user : j_users) {
|
||||
user_info << " user: " << user.first->DebugString() << ", index: " << user.second << "\n";
|
||||
}
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
DumpIR("J_User_Ex_" + cnode->func_graph()->ToString() + ".ir", cnode->func_graph());
|
||||
#endif
|
||||
MS_LOG(EXCEPTION) << "Incorrect J CNode user size: " << size << ", of {" << cnode->DebugString(2) << "/" << index
|
||||
<< "}\nUser Info:\n"
|
||||
<< user_info.str();
|
||||
|
|
|
@ -42,22 +42,28 @@ FuncGraphPtr PartialEliminateOptPass(const ResourcePtr &resource, const FuncGrap
|
|||
}
|
||||
|
||||
FuncGraphPtr LiftFv(const pipeline::ResourceBasePtr &resource, const FuncGraphPtr &func_graph) {
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
bool save_graphs_flag = MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
if (save_graphs_flag) {
|
||||
DumpIR("before_lift_" + func_graph->ToString() + ".ir", func_graph);
|
||||
}
|
||||
#endif
|
||||
FuncGraphPtr new_fg = LiftingClone(func_graph);
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (save_graphs_flag) {
|
||||
DumpIR("after_lift_" + new_fg->ToString() + ".ir", new_fg);
|
||||
}
|
||||
#endif
|
||||
auto new_res = std::dynamic_pointer_cast<pipeline::Resource>(resource);
|
||||
if (new_res == nullptr) {
|
||||
MS_LOG(EXCEPTION) << "Parameter resources is not a pipeline::Resource";
|
||||
}
|
||||
auto opt_fg = PartialEliminateOptPass(new_res, new_fg);
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (save_graphs_flag) {
|
||||
DumpIR("after_opt_" + opt_fg->ToString() + ".ir", opt_fg);
|
||||
}
|
||||
#endif
|
||||
return opt_fg;
|
||||
}
|
||||
} // namespace
|
||||
|
|
|
@ -365,10 +365,12 @@ FuncGraphPtr KPynativeCellImpl::Finish(const AnfNodePtrList &weights, bool grad_
|
|||
SetOutput(weights, grad_inputs, grad_weights);
|
||||
// Replace Parameter of primal funcgraph with parameter of tape_;
|
||||
ReplacePrimalParameter(weights, has_sens_arg);
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
auto save_graphs_flg = MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
if (save_graphs_flg) {
|
||||
DumpIR("before_final_opt.ir", tape_);
|
||||
}
|
||||
#endif
|
||||
return tape_;
|
||||
}
|
||||
|
||||
|
|
|
@ -299,7 +299,7 @@ bool SubstitutionList::ApplySubstitutionsToIR(const OptimizerPtr &optimizer, con
|
|||
bool change = ApplySubstitutionToIR(optimizer, func_graph, substitution);
|
||||
changes = changes || change;
|
||||
loop = loop || change;
|
||||
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
static const auto enable_dump_pass_ir = (common::GetEnv("ENV_DUMP_PASS_IR") == "1");
|
||||
if (enable_dump_pass_ir && MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
|
||||
auto fg_name = optimizer->name() + "_r" + std::to_string(optimizer->CurPass_.counter) + "_" +
|
||||
|
@ -310,6 +310,7 @@ bool SubstitutionList::ApplySubstitutionsToIR(const OptimizerPtr &optimizer, con
|
|||
ExportIR(fg_name + ".dat", func_graph);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Record the status of each substitution
|
||||
if (optimizer->is_on_debug_) {
|
||||
|
|
|
@ -194,6 +194,7 @@ class Optimizer : public std::enable_shared_from_this<Optimizer> {
|
|||
}
|
||||
};
|
||||
use_profile ? (WITH(MsProfile::GetProfile()->Step(pass_names_[i])) opt_func) : opt_func();
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
static const auto enable_dump_pass_ir = (common::GetEnv("ENV_DUMP_PASS_IR") == "1");
|
||||
if (enable_dump_pass_ir && MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
|
||||
auto fg_name =
|
||||
|
@ -206,6 +207,7 @@ class Optimizer : public std::enable_shared_from_this<Optimizer> {
|
|||
}
|
||||
MS_LOG(DEBUG) << "Dump " << pass_names_[i] << " func graph.";
|
||||
}
|
||||
#endif
|
||||
}
|
||||
};
|
||||
use_profile ? (WITH(MsProfile::GetProfile()->Lap(counter)) run_runc) : run_runc();
|
||||
|
|
|
@ -46,11 +46,13 @@ std::vector<PrimitivePtr> FindPrimtive(const FuncGraphPtr &graph, const std::str
|
|||
}
|
||||
|
||||
void DumpGraph(const FuncGraphPtr &root, const std::string &name) {
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
|
||||
draw::Draw(name + ".dot", root);
|
||||
DumpIR(name + ".ir", root);
|
||||
ExportIR(name + ".dat", root);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Return true if the cnode is in a for-loop and loop_index indicates the i-th loop;
|
||||
|
|
|
@ -83,10 +83,11 @@ bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) {
|
|||
0
|
||||
}, end_time{0};
|
||||
(void)gettimeofday(&start_time, nullptr);
|
||||
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
|
||||
draw::Draw(STEP_AUTO_PARALLEL_BEGIN, root);
|
||||
}
|
||||
#endif
|
||||
MS_LOG(INFO) << "Now entering step auto parallel";
|
||||
TOTAL_OPS = 0;
|
||||
AnfNodePtr ret = root->get_return();
|
||||
|
|
|
@ -565,6 +565,7 @@ bool OptimizeAction(const ResourcePtr &res, const std::vector<PassItem> &passes)
|
|||
if (!result) {
|
||||
MS_LOG(EXCEPTION) << "Pass running to end, failed in pass:" << pass.first;
|
||||
}
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG) && res->func_graph() != nullptr) {
|
||||
auto fg_name = "opt_pass_" + std::to_string(counter) + "_" + pass.first;
|
||||
auto func_graph = res->func_graph();
|
||||
|
@ -574,6 +575,7 @@ bool OptimizeAction(const ResourcePtr &res, const std::vector<PassItem> &passes)
|
|||
ExportIR(fg_name + ".dat", func_graph);
|
||||
MS_LOG(DEBUG) << "Dump " << fg_name << " func graph.";
|
||||
}
|
||||
#endif
|
||||
counter++;
|
||||
MS_LOG(DEBUG) << "Pass " << pass.first << " end.";
|
||||
};
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "debug/anf_ir_dump.h"
|
||||
#include "debug/dump_proto.h"
|
||||
#include "debug/anf_ir_utils.h"
|
||||
#include "debug/common.h"
|
||||
#include "utils/config_manager.h"
|
||||
#include "utils/convert_utils.h"
|
||||
#include "utils/convert_utils_py.h"
|
||||
|
@ -45,7 +46,6 @@
|
|||
#include "backend/session/executor_manager.h"
|
||||
#include "debug/trace.h"
|
||||
#include "debug/draw.h"
|
||||
#include "debug/common.h"
|
||||
#include "pipeline/pynative/pynative_execute.h"
|
||||
#include "frontend/optimizer/py_pass_manager.h"
|
||||
#include "pybind_api/pybind_patch.h"
|
||||
|
@ -909,7 +909,6 @@ void Pipeline::Run(const std::string &phase) {
|
|||
}
|
||||
MS_LOG(INFO) << "Recording FuncGraph in pipeline end.";
|
||||
}
|
||||
#endif
|
||||
|
||||
if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG) && graph != nullptr) {
|
||||
user_graph = graph;
|
||||
|
@ -926,6 +925,7 @@ void Pipeline::Run(const std::string &phase) {
|
|||
// generate IR file in a heavily commented format, which can also be reloaded
|
||||
ExportIR(base_name + ".dat", graph);
|
||||
}
|
||||
#endif
|
||||
i++;
|
||||
#ifdef ENABLE_TIMELINE
|
||||
dump_time.Record(action.first, GetTime(), false);
|
||||
|
@ -937,9 +937,11 @@ void Pipeline::Run(const std::string &phase) {
|
|||
MsProfile::Reset();
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG) && (user_graph != nullptr)) {
|
||||
draw::DrawUserFuncGraph("ModelDigraph.dot", user_graph);
|
||||
}
|
||||
#endif
|
||||
MS_LOG(INFO) << "End";
|
||||
}
|
||||
|
||||
|
@ -1345,12 +1347,14 @@ FuncGraphPtr LoadMindIR(const std::string &file_name, char *dec_key, const size_
|
|||
const std::string &dec_mode) {
|
||||
auto func_graph =
|
||||
mindspore::LoadMindIR(file_name, false, reinterpret_cast<unsigned char *>(dec_key), key_len, dec_mode);
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
if (save_graphs) {
|
||||
DumpIR("load.ir", func_graph);
|
||||
}
|
||||
#endif
|
||||
return func_graph;
|
||||
}
|
||||
|
||||
|
|
|
@ -213,12 +213,13 @@ bool AddDFGraph(const std::map<std::string, ExecutorInfoPtr> &info, const py::di
|
|||
MS_LOG(ERROR) << "Convert df graph failed, err:" << converter.ErrCode();
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
|
||||
converter.DrawComputeGraph(GetSaveGraphsPathName("ge_graph.dot")); // for debug
|
||||
converter.DrawInitGraph(GetSaveGraphsPathName("init_graph.dot")); // for debug
|
||||
converter.DrawSaveCheckpointGraph(GetSaveGraphsPathName("save_checkpoint_graph.dot")); // for debug
|
||||
}
|
||||
#endif
|
||||
std::string init_graph = "init_subgraph." + net_id;
|
||||
std::string checkpoint_name = "save." + net_id;
|
||||
if (phase.find("train") != std::string::npos) {
|
||||
|
@ -243,11 +244,12 @@ FuncGraphPtr BuildDFGraph(const std::map<std::string, ExecutorInfoPtr> &info, co
|
|||
MS_LOG(EXCEPTION) << "No phase in executor:" << GetPhasePrefix(phase);
|
||||
}
|
||||
FuncGraphPtr anf_graph = info.at(phase)->func_graph;
|
||||
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
|
||||
draw::Draw("anf_graph.dot", anf_graph); // for debug
|
||||
DumpIR("anf_graph.ir", anf_graph, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!AddDFGraph(info, init_params, phase, broadcast_params)) {
|
||||
MS_LOG(ERROR) << "GenConvertor failed";
|
||||
|
|
|
@ -1985,9 +1985,11 @@ std::string GradExecutor::GetCellId(const py::object &cell, const py::args &args
|
|||
}
|
||||
|
||||
void GradExecutor::DumpGraphIR(const std::string &filename, const FuncGraphPtr &graph) {
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
|
||||
DumpIR(filename, graph);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
inline bool GradExecutor::IsNestedGrad() const {
|
||||
|
@ -2323,7 +2325,9 @@ void GradExecutor::EndGraphInner(py::object *ret, const py::object &cell, const
|
|||
// Just only dump the last forward graph
|
||||
if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG) && is_top_cell_end) {
|
||||
curr_g_->set_output(GetObjNode(out, out_id));
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
DumpIR("fg.ir", curr_g_);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Reset grad flag and update output node of top cell
|
||||
|
|
|
@ -44,7 +44,6 @@ bool TaskGenerator::GenTasks(const std::vector<CNodePtr> &anf_node_list, std::ve
|
|||
#ifdef ENABLE_DUMP_IR
|
||||
string task_info_name = "task_info_graph." + std::to_string(graph_id);
|
||||
(void)mindspore::RDR::RecordTaskDebugInfo(SUBMODULE_ID, task_info_name, task_debug_info_list_);
|
||||
#endif
|
||||
auto context_ptr = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(context_ptr);
|
||||
bool save_graphs = context_ptr->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
|
@ -54,6 +53,7 @@ bool TaskGenerator::GenTasks(const std::vector<CNodePtr> &anf_node_list, std::ve
|
|||
DumpTaskInfo(file_path);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -316,12 +316,13 @@ GraphId GraphCompiler::CompileGraphImpl(const KernelGraphPtr &graph, const Devic
|
|||
MS_EXCEPTION_IF_NULL(device_context);
|
||||
const auto &ms_context = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(ms_context);
|
||||
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
bool save_graphs = ms_context->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG);
|
||||
// Dump .pb graph before graph optimization.
|
||||
if (save_graphs) {
|
||||
DumpIRProto(graph, "before_opt_" + std::to_string(graph->graph_id()));
|
||||
}
|
||||
#endif
|
||||
|
||||
MS_LOG(INFO) << "Get graph outputs before optimizer, graph id: " << graph->graph_id();
|
||||
auto outputs_before_optimizer = AnfAlgo::GetAllOutputWithIndex(graph->output());
|
||||
|
@ -353,11 +354,12 @@ GraphId GraphCompiler::CompileGraphImpl(const KernelGraphPtr &graph, const Devic
|
|||
|
||||
session_->SetSummaryNodes(graph.get());
|
||||
SetSummaryNodesRefCount(graph.get());
|
||||
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
// Dump .pb graph after graph optimization.
|
||||
if (save_graphs) {
|
||||
DumpIRProto(graph, "after_opt_" + std::to_string(graph->graph_id()));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_DEBUGGER
|
||||
auto debugger = Debugger::GetInstance();
|
||||
|
|
|
@ -1370,10 +1370,12 @@ void DfGraphConvertor::ProcessSubgraph(AnfNodePtr node, const std::vector<AnfNod
|
|||
converter.use_inputs_ = true;
|
||||
converter.inputs_ = inputs;
|
||||
(void)converter.ConvertAllNode().BuildGraph();
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
std::string name = graph_node->ToString() + "_ge_graph.dot";
|
||||
if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
|
||||
converter.DrawComputeGraph(name);
|
||||
}
|
||||
#endif
|
||||
branches_map_[node.get()] = *(converter.df_graph_);
|
||||
}
|
||||
|
||||
|
|
|
@ -76,6 +76,7 @@ class DfGraphConvertor {
|
|||
static void RegisterAdapter(const std::string &name, OpAdapterPtr train_adpt, OpAdapterPtr infer_adpt);
|
||||
|
||||
void DrawComputeGraph(const std::string &name) {
|
||||
#ifndef ENABLE_SECURITY
|
||||
std::ofstream fout(name);
|
||||
if (!fout.is_open()) {
|
||||
MS_LOG(ERROR) << "Open file '" << name << "' failed!"
|
||||
|
@ -84,8 +85,11 @@ class DfGraphConvertor {
|
|||
}
|
||||
fout << compute_sout_.str();
|
||||
fout.close();
|
||||
#endif
|
||||
}
|
||||
|
||||
void DrawInitGraph(const std::string &name) {
|
||||
#ifndef ENABLE_SECURITY
|
||||
std::ofstream fout(name);
|
||||
if (!fout.is_open()) {
|
||||
MS_LOG(ERROR) << "Open file '" << name << "' failed!"
|
||||
|
@ -94,6 +98,7 @@ class DfGraphConvertor {
|
|||
}
|
||||
fout << init_sout_.str();
|
||||
fout.close();
|
||||
#endif
|
||||
}
|
||||
void DrawSaveCheckpointGraph(const std::string &name) {
|
||||
std::ofstream fout(name);
|
||||
|
|
|
@ -32,8 +32,14 @@ std::map<std::string, MsBackendPolicy> MsContext::policy_map_ = {{"ge", kMsBacke
|
|||
{"vm_prior", kMsBackendVmPrior}};
|
||||
|
||||
MsContext::MsContext(const std::string &policy, const std::string &target) {
|
||||
#ifndef ENABLE_SECURITY
|
||||
set_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG, false);
|
||||
set_param<std::string>(MS_CTX_SAVE_GRAPHS_PATH, ".");
|
||||
#else
|
||||
// Need set a default value for arrays even if running in the security mode.
|
||||
bool_params_[MS_CTX_SAVE_GRAPHS_FLAG - MS_CTX_TYPE_BOOL_BEGIN] = false;
|
||||
string_params_[MS_CTX_SAVE_GRAPHS_PATH - MS_CTX_TYPE_STRING_BEGIN] = ".";
|
||||
#endif
|
||||
set_param<std::string>(MS_CTX_PYTHON_EXE_PATH, "python");
|
||||
set_param<std::string>(MS_CTX_KERNEL_BUILD_SERVER_DIR, "");
|
||||
set_param<bool>(MS_CTX_ENABLE_DUMP, false);
|
||||
|
|
|
@ -199,6 +199,11 @@ class MsContext {
|
|||
// set method implementation for type bool/int/uint32_t/float/std::string
|
||||
template <>
|
||||
inline void MsContext::set_param<bool>(MsCtxParam param, const bool &value) {
|
||||
#ifdef ENABLE_SECURITY
|
||||
if (param == MS_CTX_SAVE_GRAPHS_FLAG) {
|
||||
MS_EXCEPTION(ValueError) << "The save_graphs is not supported, please without '-s on' and recompile source.";
|
||||
}
|
||||
#endif
|
||||
bool_params_[param - MS_CTX_TYPE_BOOL_BEGIN] = value;
|
||||
}
|
||||
|
||||
|
@ -219,6 +224,11 @@ inline void MsContext::set_param<float>(MsCtxParam param, const float &value) {
|
|||
|
||||
template <>
|
||||
inline void MsContext::set_param<std::string>(MsCtxParam param, const std::string &value) {
|
||||
#ifdef ENABLE_SECURITY
|
||||
if (param == MS_CTX_SAVE_GRAPHS_PATH) {
|
||||
MS_EXCEPTION(ValueError) << "The save_graphs is not supported, please without '-s on' and recompile source.";
|
||||
}
|
||||
#endif
|
||||
if (seter_ != nullptr && param == MS_CTX_DEVICE_TARGET) {
|
||||
MS_LOG(INFO) << "ms set context device target:" << value;
|
||||
seter_(value);
|
||||
|
|
|
@ -1268,7 +1268,6 @@ def use_build_train_network_check_cast_num(network, level, inputs, label, cast_n
|
|||
class AssignNet(Cell):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
#self._save_graphs(save_graph_flag=True, save_graph_path=".")
|
||||
self.relu = ReLU()
|
||||
self.mean = P.ReduceMean(keep_dims=False)
|
||||
self.assign_sub = P.AssignSub()
|
||||
|
@ -1281,14 +1280,14 @@ class AssignNet(Cell):
|
|||
x = self.mean(x, (2, 3))
|
||||
return x
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_auto_mixed_precision_train_1(pynative_save_graphs):
|
||||
net = AssignNet()
|
||||
input32 = Tensor(np.ones([1, 3, 2, 2]).astype(np.float32))
|
||||
label32 = Tensor(np.zeros([1, 3]).astype(np.float32))
|
||||
use_build_train_network_check_cast_num(net, "O0", input32, label32, 0)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_auto_mixed_precision_train_2(pynative_save_graphs):
|
||||
net = AssignNet()
|
||||
input32 = Tensor(np.ones([1, 3, 2, 2]).astype(np.float32))
|
||||
|
@ -1299,7 +1298,6 @@ def test_auto_mixed_precision_train_2(pynative_save_graphs):
|
|||
class MixControlNet(Cell):
|
||||
def __init__(self, in_channel, x):
|
||||
super().__init__()
|
||||
#self._save_graphs(save_graph_flag=True, save_graph_path=".")
|
||||
self.biasadd = P.BiasAdd()
|
||||
self.equal = P.Equal()
|
||||
self.addn = P.AddN()
|
||||
|
@ -1366,7 +1364,7 @@ def use_build_train_network_controlflow_check_cast_num(network, level, input_x,
|
|||
assert len(castnum) == cast_num
|
||||
return out_me
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_auto_mixed_precision_controlflow_auto(pynative_save_graphs):
|
||||
net = MixControlNet(3, 5)
|
||||
input_x = Tensor(
|
||||
|
@ -1404,7 +1402,6 @@ def test_if_cast():
|
|||
|
||||
return out
|
||||
|
||||
context.set_context(save_graphs=False)
|
||||
net = Net(True)
|
||||
beta1 = Tensor(np.array([2]).astype(np.float32))
|
||||
beta2 = Tensor(np.array([10]).astype(np.float32))
|
||||
|
|
|
@ -28,6 +28,7 @@ from mindspore import context, Tensor
|
|||
from mindspore.common import ParameterTuple
|
||||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from tests.security_utils import security_off_wrap
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
|
||||
|
@ -128,7 +129,7 @@ class SideEffectCastAll(Cell):
|
|||
out_b = self.cast(self.parameter_b, self.dtype)
|
||||
return out_a, out_b
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_side_effect_castall():
|
||||
clear_files()
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
|
||||
|
@ -333,7 +334,7 @@ class InplaceNet(Cell):
|
|||
output = self.add(tmp_c1, tmp_c2)
|
||||
return output
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_ir_fusion_inplace_bn_conv_conv():
|
||||
clear_files()
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
|
||||
|
@ -458,7 +459,7 @@ def use_build_train_network_controlflow_check_cast_num(network, level, input_x,
|
|||
assert len(castnum) == cast_num
|
||||
return out_me
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_auto_mixed_precision_controlflow_auto():
|
||||
context.set_context(mode=context.PYNATIVE_MODE, save_graphs=True)
|
||||
net = MixControlNet(3, 5)
|
||||
|
@ -472,7 +473,7 @@ def test_auto_mixed_precision_controlflow_auto():
|
|||
use_build_train_network_controlflow_check_cast_num(net, "auto", input_x,
|
||||
label, cast_num)
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_updatestate_between_assigns():
|
||||
class UpdateState_Assigns(Cell):
|
||||
def __init__(self):
|
||||
|
@ -497,7 +498,7 @@ def test_updatestate_between_assigns():
|
|||
updatestate_num = re.findall('UpdateState', content)
|
||||
assert len(updatestate_num) == 1
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_updatestate_between_maketuple_assign():
|
||||
class UpdateState_MakeTuple_Assign(Cell):
|
||||
def __init__(self):
|
||||
|
@ -524,7 +525,7 @@ def test_updatestate_between_maketuple_assign():
|
|||
updatestate_num = re.findall('UpdateState', content)
|
||||
assert len(updatestate_num) == 1
|
||||
|
||||
|
||||
@security_off_wrap
|
||||
def test_updatestate_between_assign_maketuple():
|
||||
class UpdateState_Assign_MakeTuple(Cell):
|
||||
def __init__(self):
|
||||
|
|
|
@ -19,7 +19,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -22,7 +22,7 @@ from mindspore import context
|
|||
from mindspore.ops import functional as F
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -20,7 +20,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -22,7 +22,7 @@ from mindspore import context
|
|||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.ops import functional as F
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -20,7 +20,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -23,7 +23,7 @@ from mindspore import context
|
|||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.ops import functional as F
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -22,7 +22,7 @@ from mindspore import context
|
|||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.ops import functional as F
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore.ops import composite as C
|
|||
from mindspore import context
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -22,7 +22,7 @@ from mindspore.ops import composite as C
|
|||
from mindspore import context
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -23,7 +23,7 @@ from mindspore.ops import functional as F
|
|||
from mindspore import context
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -23,7 +23,7 @@ from mindspore.ops import functional as F
|
|||
from mindspore import context
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore.ops import composite as C
|
|||
from mindspore import context
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore.ops import composite as C
|
|||
from mindspore import context
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore.ops import composite as C
|
|||
from mindspore import context
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore.ops import composite as C
|
|||
from mindspore import context
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -22,7 +22,7 @@ from mindspore.ops import composite as C
|
|||
from mindspore import context
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -22,7 +22,7 @@ from mindspore.ops import composite as C
|
|||
from mindspore import context
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class ForwardNet(nn.Cell):
|
||||
|
|
|
@ -24,11 +24,6 @@ from mindspore.common.parameter import Parameter, ParameterTuple
|
|||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
# from tests.vm_impl.math_ops_vm_impl import *
|
||||
# from tests.vm_impl.vm_interface import *
|
||||
# from tests.vm_impl import *
|
||||
# context.set_context(save_graphs=True)
|
||||
|
||||
|
||||
grad_by_list = C.GradOperation(get_by_list=True)
|
||||
grad_all = C.GradOperation(get_all=True)
|
||||
|
|
|
@ -153,7 +153,7 @@ def test_load_mindir_and_run():
|
|||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_single_if():
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True, save_graphs_path="./ifir")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
network = SingleIfNet()
|
||||
|
||||
x = Tensor(np.array([1]).astype(np.float32))
|
||||
|
|
|
@ -42,7 +42,7 @@ class GradNet(nn.Cell):
|
|||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_while_grad():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
x = Tensor([2.0], dtype=mstype.float32)
|
||||
y = Tensor([2.0], dtype=mstype.float32)
|
||||
GradNet(Net())(x, y)
|
||||
|
|
|
@ -145,7 +145,7 @@ def server_train(args):
|
|||
os.makedirs(output_dir)
|
||||
|
||||
# mindspore context
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=device_target)
|
||||
context.set_fl_context(**fl_ctx)
|
||||
print('Context setting is done! Time cost: {}'.format(time() - start))
|
||||
sys.stdout.flush()
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
|
||||
import os
|
||||
import functools
|
||||
from mindspore import context
|
||||
from mindspore.profiler import Profiler
|
||||
from .config import config
|
||||
|
||||
|
@ -93,7 +92,6 @@ def moxing_wrapper(pre_process=None, post_process=None):
|
|||
sync_data(config.train_url, config.output_path)
|
||||
print("Workspace downloaded: ", os.listdir(config.output_path))
|
||||
|
||||
context.set_context(save_graphs_path=os.path.join(config.output_path, str(get_rank_id())))
|
||||
config.device_num = get_device_num()
|
||||
config.device_id = get_device_id()
|
||||
if not os.path.exists(config.output_path):
|
||||
|
|
|
@ -86,7 +86,7 @@ ctx = {
|
|||
"encrypt_type": encrypt_type
|
||||
}
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=device_target)
|
||||
context.set_fl_context(**ctx)
|
||||
# print(**ctx, flush=True)
|
||||
# context.set_context(mode=context.GRAPH_MODE, device_target="GPU", device_id=get_device_id())
|
||||
|
|
|
@ -114,7 +114,7 @@ ctx = {
|
|||
"encrypt_type": encrypt_type
|
||||
}
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=device_target)
|
||||
context.set_fl_context(**ctx)
|
||||
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ ctx = {
|
|||
"encrypt_type": encrypt_type
|
||||
}
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=device_target)
|
||||
context.set_fl_context(**ctx)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -125,7 +125,7 @@ ctx = {
|
|||
"enable_ssl": enable_ssl
|
||||
}
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=device_target)
|
||||
context.set_fl_context(**ctx)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -120,7 +120,7 @@ ctx = {
|
|||
"enable_ssl": enable_ssl
|
||||
}
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=device_target)
|
||||
context.set_fl_context(**ctx)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -21,7 +21,6 @@ from mindspore import Tensor
|
|||
from mindspore.ops import operations as P
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(save_graphs=True)
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -21,7 +21,6 @@ from mindspore import Tensor
|
|||
from mindspore.ops import operations as P
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(save_graphs=True)
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -38,8 +38,7 @@ SEED = 20
|
|||
def test_gcn():
|
||||
print("test_gcn begin")
|
||||
np.random.seed(SEED)
|
||||
context.set_context(mode=context.GRAPH_MODE,
|
||||
device_target="Ascend", save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
config = ConfigGCN()
|
||||
config.dropout = 0.0
|
||||
adj, feature, label_onehot, _ = get_adj_features_labels(DATA_DIR)
|
||||
|
|
|
@ -60,7 +60,6 @@ def test_net():
|
|||
net1 = Net1()
|
||||
output1 = net1(Tensor(x), Tensor(y))
|
||||
|
||||
context.set_context(save_graphs=True)
|
||||
net2 = Net2()
|
||||
output2 = net2(Tensor(x), Tensor(y))
|
||||
assert np.allclose(output1[0].asnumpy(), output2[0].asnumpy())
|
||||
|
|
|
@ -30,7 +30,7 @@ from utils import allclose_nparray
|
|||
from utils import FakeDataInitMode
|
||||
from utils import find_newest_validateir_file
|
||||
from utils import clean_all_ir_files
|
||||
|
||||
from tests.security_utils import security_off_wrap
|
||||
|
||||
def read_validateir_file(path_folder):
|
||||
filename = find_newest_validateir_file(path_folder)
|
||||
|
@ -109,6 +109,7 @@ def test_sit_auto_mix_precision_train_o3():
|
|||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@security_off_wrap
|
||||
def test_sit_auto_mix_precision_model_o0():
|
||||
input_data = np.random.randn(32, 3, 224, 224).astype(np.float32)
|
||||
dataset1 = FakeData(size=32,
|
||||
|
@ -142,6 +143,7 @@ def test_sit_auto_mix_precision_model_o0():
|
|||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
@security_off_wrap
|
||||
def test_sit_auto_mix_precision_model_o2():
|
||||
input_data = np.random.randn(32, 3, 224, 224).astype(np.float32)
|
||||
dataset1 = FakeData(size=32,
|
||||
|
|
|
@ -30,7 +30,7 @@ from src.metrics import AUCMetric
|
|||
from src.config import WideDeepConfig
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, gradients_mean=True)
|
||||
init()
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ from src.metrics import AUCMetric
|
|||
from src.config import WideDeepConfig
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
|
||||
init()
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ def _set_bert_all_reduce_split():
|
|||
def train_process_bert_thor(q, device_id, epoch_size, device_num):
|
||||
os.system("mkdir " + str(device_id))
|
||||
os.chdir(str(device_id))
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id, save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id)
|
||||
context.set_context(reserve_class_name_in_scope=False)
|
||||
context.set_context(max_call_depth=3000)
|
||||
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH
|
||||
|
|
|
@ -132,7 +132,7 @@ class LossGet(Callback):
|
|||
def train_process(q, device_id, epoch_size, device_num, enable_hccl):
|
||||
os.system("mkdir " + str(device_id))
|
||||
os.chdir(str(device_id))
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(device_id=device_id)
|
||||
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH
|
||||
os.environ['RANK_ID'] = str(device_id)
|
||||
|
@ -230,7 +230,7 @@ def train_process(q, device_id, epoch_size, device_num, enable_hccl):
|
|||
def train_process_thor(q, device_id, epoch_size, device_num, enable_hccl):
|
||||
os.system("mkdir " + str(device_id))
|
||||
os.chdir(str(device_id))
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(device_id=device_id)
|
||||
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH_2
|
||||
os.environ['RANK_ID'] = str(device_id - 4)
|
||||
|
@ -313,7 +313,7 @@ def train_process_thor(q, device_id, epoch_size, device_num, enable_hccl):
|
|||
@pytest.mark.env_single
|
||||
def test_resnet_and_resnet_thor_imagenet_4p():
|
||||
# reset context
|
||||
context.set_context(save_graphs=False, enable_graph_kernel=False, enable_sparse=False)
|
||||
context.set_context(enable_graph_kernel=False, enable_sparse=False)
|
||||
context.reset_auto_parallel_context()
|
||||
context.reset_ps_context()
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ from mindspore import Tensor
|
|||
from mindspore.ops import operations as P
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
def test_cast_op_attr():
|
||||
|
|
|
@ -21,8 +21,7 @@ from mindspore import Tensor
|
|||
from mindspore import Parameter
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE,
|
||||
device_target='CPU', save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
|
||||
|
||||
|
||||
class UpdateCacheNet(nn.Cell):
|
||||
|
|
|
@ -19,7 +19,7 @@ import mindspore.nn as nn
|
|||
import mindspore.context as context
|
||||
from mindspore import Tensor, ms_function
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class Net_l1_regularizer(nn.Cell):
|
||||
|
|
|
@ -32,7 +32,7 @@ def maskedselect():
|
|||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_maskedselect():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
y = maskedselect()
|
||||
expect = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]
|
||||
assert (y.asnumpy() == expect).all()
|
||||
|
@ -68,7 +68,7 @@ def masked_select_grad():
|
|||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_masked_select_grad():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
dx = masked_select_grad()
|
||||
expect = [4, 6, 8, 10]
|
||||
assert (dx.asnumpy() == expect).all()
|
||||
|
|
|
@ -23,7 +23,7 @@ from mindspore import Parameter
|
|||
from mindspore.common import dtype as mstype
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target='CPU', save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
|
|
|
@ -33,7 +33,7 @@ def smoothl1loss(beta):
|
|||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_smoothl1loss():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
|
||||
epsilon = 1e-6
|
||||
|
||||
|
@ -81,7 +81,7 @@ def smoothl1loss_grad(beta):
|
|||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_smoothl1loss_grad():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
||||
|
||||
epsilon = 1e-6
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ from mindspore.ops import functional as F
|
|||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -71,7 +71,7 @@ def test_maximum():
|
|||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_broadcast():
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU')
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
|
||||
|
||||
x1_np = np.array([[[[0.659578],
|
||||
[0.49113268],
|
||||
|
@ -195,7 +195,7 @@ def test_broadcast():
|
|||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_broadcast_diff_dims():
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU')
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
|
||||
|
||||
x1_np = np.array([[[0.275478, 0.48933202, 0.71846116],
|
||||
[0.9803821, 0.57205725, 0.28511533]],
|
||||
|
|
|
@ -48,7 +48,7 @@ class Grad(Cell):
|
|||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_nobroadcast():
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU')
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
|
||||
|
||||
x1_np = np.random.rand(3, 4).astype(np.float32)
|
||||
x2_np = np.random.rand(3, 4).astype(np.float32)
|
||||
|
@ -66,7 +66,7 @@ def test_nobroadcast():
|
|||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_broadcast():
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU')
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
|
||||
|
||||
x1_np = np.array([[[[0.659578],
|
||||
[0.49113268],
|
||||
|
@ -191,7 +191,7 @@ def test_broadcast():
|
|||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_broadcast_diff_dims():
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU')
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
|
||||
|
||||
x1_np = np.array([[[0.275478, 0.48933202, 0.71846116],
|
||||
[0.9803821, 0.57205725, 0.28511533]],
|
||||
|
@ -224,7 +224,7 @@ def test_broadcast_diff_dims():
|
|||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_broadcast_int32():
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True, device_target='GPU')
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
|
||||
|
||||
x1_np = np.random.rand(3, 4).astype(np.int32)
|
||||
x2_np = np.random.rand(3, 4).astype(np.int32)
|
||||
|
|
|
@ -38,7 +38,7 @@ class ReluNet(nn.Cell):
|
|||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_ReluV2():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
|
||||
x = Tensor(np.array([[[[-1, 1, 10],
|
||||
[1, -1, 1],
|
||||
|
@ -77,7 +77,7 @@ class AddReluNet(nn.Cell):
|
|||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_AddRelu():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
|
||||
x1 = Tensor(np.array([[[[-1, 1, 10],
|
||||
[1, -1, 1],
|
||||
|
@ -118,7 +118,7 @@ class AddReluGradNet(nn.Cell):
|
|||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_AddReluGrad():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
|
||||
x = Tensor(np.array([[[[-1, 1, 10],
|
||||
[1, -1, 1],
|
||||
|
|
|
@ -33,7 +33,7 @@ def smoothl1loss(beta):
|
|||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_smoothl1loss():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
|
||||
epsilon = 1e-6
|
||||
|
||||
|
@ -81,7 +81,7 @@ def smoothl1loss_grad(beta):
|
|||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_smoothl1loss_grad():
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
|
||||
epsilon = 1e-6
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ class Net(Cell):
|
|||
|
||||
def get_output(i0, i1, i2, enable_graph_kernel=False):
|
||||
if enable_graph_kernel:
|
||||
context.set_context(enable_graph_kernel=True, save_graphs=False)
|
||||
context.set_context(enable_graph_kernel=True)
|
||||
net = Net()
|
||||
output = net(i0, i1, i2)
|
||||
return output
|
||||
|
|
|
@ -24,7 +24,7 @@ import mindspore.ops as ops
|
|||
from mindspore import context
|
||||
from dataset import create_dataset
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU")
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
|
||||
|
||||
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
|
||||
|
|
|
@ -23,7 +23,7 @@ import mindspore.ops as ops
|
|||
from mindspore.nn.probability.dpn import ConditionalVAE
|
||||
from mindspore.nn.probability.infer import ELBO, SVI
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU")
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
IMAGE_SHAPE = (-1, 1, 32, 32)
|
||||
image_path = os.path.join('/home/workspace/mindspore_dataset/mnist', "train")
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ import mindspore.ops as ops
|
|||
from mindspore.nn.probability.dpn import VAE
|
||||
from mindspore.nn.probability.infer import ELBO, SVI
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU")
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
IMAGE_SHAPE = (-1, 1, 32, 32)
|
||||
image_path = os.path.join('/home/workspace/mindspore_dataset/mnist', "train")
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ import mindspore.ops as ops
|
|||
from mindspore.nn.probability.dpn import VAE
|
||||
from mindspore.nn.probability.infer import ELBO, SVI
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU")
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
IMAGE_SHAPE = (-1, 1, 32, 32)
|
||||
image_path = os.path.join('/home/workspace/mindspore_dataset/mnist', "train")
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ from mindspore import context
|
|||
from dataset import create_dataset
|
||||
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU")
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
|
||||
|
||||
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
|
||||
|
|
|
@ -24,7 +24,7 @@ from mindspore import context
|
|||
from dataset import create_dataset
|
||||
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU")
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||
|
||||
|
||||
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
|
||||
|
|
|
@ -145,8 +145,7 @@ class LossGet(Callback):
|
|||
def train_process(q, device_id, epoch_size, num_classes, device_num, batch_size, enable_hccl):
|
||||
os.system("mkdir " + str(device_id))
|
||||
os.chdir(str(device_id))
|
||||
context.set_context(mode=context.GRAPH_MODE,
|
||||
device_target="Ascend", save_graphs=False)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
||||
context.set_context(device_id=device_id)
|
||||
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH
|
||||
os.environ['RANK_ID'] = str(device_id)
|
||||
|
|
|
@ -19,7 +19,7 @@ import mindspore
|
|||
from mindspore import context
|
||||
from mindspore.common.tensor import Tensor
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True, save_graphs_path="graph_paths")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -22,7 +22,7 @@ from mindspore import context
|
|||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True, save_graphs_path="graph_paths")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -19,7 +19,7 @@ import mindspore
|
|||
from mindspore import context
|
||||
from mindspore.common.tensor import Tensor
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True, save_graphs_path="graph_paths")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
import mindspore.nn as nn
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True, save_graphs_path="graph_paths")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -7,7 +7,7 @@ from mindspore.common.tensor import Tensor
|
|||
import mindspore.ops as ops
|
||||
import mindspore
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True, save_graphs_path="graph_path")
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class TestNoReturn(nn.Cell):
|
||||
|
|
|
@ -113,7 +113,6 @@ TEST_F(TestCloner, test_clone_closure) {
|
|||
Cloner cl(gs, true);
|
||||
|
||||
auto g_clone = cl[g];
|
||||
draw::Draw("test_clone_closure_g_clone.dot", g_clone);
|
||||
FuncGraphIndex idx2(g_clone, DeepLinkedGraphSearch);
|
||||
|
||||
std::string name_list = "xy";
|
||||
|
@ -130,10 +129,8 @@ TEST_F(TestCloner, test_clone_lifting) {
|
|||
|
||||
// parse ast to graph
|
||||
FuncGraphPtr parsed_f = getPyFun(py_code);
|
||||
draw::Draw("test_clone_before_lifting.dot", parsed_f);
|
||||
|
||||
auto g_lifting = LiftingClone(parsed_f);
|
||||
draw::Draw("test_clone_after_lifting.dot", g_lifting);
|
||||
|
||||
FuncGraphIndex idx(g_lifting);
|
||||
auto g = idx.GetFirstFuncGraph("j");
|
||||
|
|
|
@ -35,7 +35,6 @@ class TestGradImplementations : public UT::Common {
|
|||
TEST_F(TestGradImplementations, TestGetAugmentedGraph) {
|
||||
FuncGraphPtr fg = ad::g_k_prims.KPrimitive(nullptr, NewValueNode(kPrimScalarMul), nullptr);
|
||||
ASSERT_TRUE(fg != nullptr);
|
||||
draw::Draw("gradImpl_TestGetAugmentedFuncGraph.dot", fg);
|
||||
|
||||
auto fg1 = ad::g_k_prims.KPrimitive(nullptr, NewValueNode(kPrimScalarMul), nullptr);
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue