forked from mindspore-Ecosystem/mindspore
!29120 Debugger/dump code cleanup: delete dead code
Merge pull request !29120 from TinaMengtingZhang/del_dead_code
This commit is contained in:
commit
bd1b1f01f3
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
* Copyright 2020-2022 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -46,7 +46,6 @@ class DumpJsonParser {
|
||||||
bool IsTensorDump() const;
|
bool IsTensorDump() const;
|
||||||
bool IsFullDump() const;
|
bool IsFullDump() const;
|
||||||
bool IsDumpIter(uint32_t iteration) const;
|
bool IsDumpIter(uint32_t iteration) const;
|
||||||
bool DumpAllIter();
|
|
||||||
|
|
||||||
bool async_dump_enabled() const { return async_dump_enabled_; }
|
bool async_dump_enabled() const { return async_dump_enabled_; }
|
||||||
bool e2e_dump_enabled() const { return e2e_dump_enabled_; }
|
bool e2e_dump_enabled() const { return e2e_dump_enabled_; }
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
* Copyright 2021-2022 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -66,58 +66,6 @@ void GetFileKernelName(NotNull<std::string *> kernel_name) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetConstNodeId(const AnfNodePtr &node, std::map<std::string, size_t> *const_map) {
|
|
||||||
MS_EXCEPTION_IF_NULL(node);
|
|
||||||
if (!node->isa<ValueNode>()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
std::string node_name = GetKernelNodeName(node);
|
|
||||||
MS_EXCEPTION_IF_NULL(const_map);
|
|
||||||
auto iter = const_map->find(node_name);
|
|
||||||
if (iter == const_map->end()) {
|
|
||||||
auto const_idx = const_map->size() + 1;
|
|
||||||
(*const_map)[node_name] = const_idx;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void GetCNodeConstantId(const CNodePtr &node, std::map<std::string, size_t> *const_map) {
|
|
||||||
MS_EXCEPTION_IF_NULL(node);
|
|
||||||
auto &inputs = node->inputs();
|
|
||||||
if (inputs.empty()) {
|
|
||||||
MS_LOG(EXCEPTION) << "Inputs of apply node is empty";
|
|
||||||
}
|
|
||||||
AnfNodePtr op = inputs[0];
|
|
||||||
|
|
||||||
// CNode/ConstGraph/Const/Parameter
|
|
||||||
MS_EXCEPTION_IF_NULL(op);
|
|
||||||
if (op->isa<CNode>() || IsValueNode<FuncGraph>(op) || op->isa<Parameter>()) {
|
|
||||||
MS_LOG(WARNING) << "Operator must be a primitive.";
|
|
||||||
} else {
|
|
||||||
// process OP inputs
|
|
||||||
for (size_t i = 1; i < inputs.size(); ++i) {
|
|
||||||
SetConstNodeId(inputs[i], const_map);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void GetConstantId(const session::KernelGraph *graph, std::map<std::string, size_t> *const_map) {
|
|
||||||
MS_EXCEPTION_IF_NULL(graph);
|
|
||||||
std::vector<AnfNodePtr> nodes = TopoSort(graph->get_return(), SuccIncoming, AlwaysInclude);
|
|
||||||
for (const AnfNodePtr &node : nodes) {
|
|
||||||
MS_EXCEPTION_IF_NULL(node);
|
|
||||||
if (!node->isa<CNode>()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
auto cnode = node->cast<CNodePtr>();
|
|
||||||
MS_EXCEPTION_IF_NULL(cnode);
|
|
||||||
if (cnode != graph->get_return()) {
|
|
||||||
GetCNodeConstantId(cnode, const_map);
|
|
||||||
} else {
|
|
||||||
SetConstNodeId(cnode->input(1), const_map);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void GetDumpIntShape(const AnfNodePtr &node, size_t index, NotNull<ShapeVector *> int_shapes, bool trans_flag) {
|
void GetDumpIntShape(const AnfNodePtr &node, size_t index, NotNull<ShapeVector *> int_shapes, bool trans_flag) {
|
||||||
if (trans_flag) {
|
if (trans_flag) {
|
||||||
*int_shapes = trans::GetRuntimePaddingShape(node, index);
|
*int_shapes = trans::GetRuntimePaddingShape(node, index);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
* Copyright 2021-2022 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -31,8 +31,6 @@ std::string GenerateDumpPath(uint32_t graph_id, uint32_t rank_id = 0, bool is_cs
|
||||||
|
|
||||||
void GetFileKernelName(NotNull<std::string *> kernel_name);
|
void GetFileKernelName(NotNull<std::string *> kernel_name);
|
||||||
|
|
||||||
void GetConstantId(const session::KernelGraph *graph, std::map<std::string, size_t> *const_map);
|
|
||||||
|
|
||||||
void GetDumpIntShape(const AnfNodePtr &node, size_t index, NotNull<ShapeVector *> int_shapes, bool trans_flag = false);
|
void GetDumpIntShape(const AnfNodePtr &node, size_t index, NotNull<ShapeVector *> int_shapes, bool trans_flag = false);
|
||||||
|
|
||||||
void DumpMemToFile(const std::string &file_path, const device::DeviceAddress &addr, const ShapeVector &int_shapes,
|
void DumpMemToFile(const std::string &file_path, const device::DeviceAddress &addr, const ShapeVector &int_shapes,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2020-2021 Huawei Technologies Co., Ltd
|
* Copyright 2020-2022 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -525,18 +525,6 @@ bool E2eDump::isDatasetGraph(const session::KernelGraph *graph) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool E2eDump::DumpDirExists(const std::string &dump_path) {
|
|
||||||
DIR *dir = opendir(dump_path.c_str());
|
|
||||||
if (dir != nullptr) {
|
|
||||||
MS_LOG(INFO) << "Dump dir " << dump_path << " exists";
|
|
||||||
if (closedir(dir) == -1) {
|
|
||||||
MS_LOG(WARNING) << "Dump dir " << dump_path << " close failed!";
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef ENABLE_D
|
#ifdef ENABLE_D
|
||||||
void E2eDump::DumpTensorToFile(const std::string &dump_path, const debugger::dump::DumpData &dump_data,
|
void E2eDump::DumpTensorToFile(const std::string &dump_path, const debugger::dump::DumpData &dump_data,
|
||||||
char *data_ptr) {
|
char *data_ptr) {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2020-2021 Huawei Technologies Co., Ltd
|
* Copyright 2020-2022 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -65,8 +65,6 @@ class E2eDump {
|
||||||
static void DumpOutputImpl(const CNodePtr &node, bool trans_flag, const std::string &dump_path,
|
static void DumpOutputImpl(const CNodePtr &node, bool trans_flag, const std::string &dump_path,
|
||||||
std::string *kernel_name, const Debugger *debugger);
|
std::string *kernel_name, const Debugger *debugger);
|
||||||
|
|
||||||
static bool DumpDirExists(const std::string &dump_path);
|
|
||||||
|
|
||||||
#ifdef ENABLE_D
|
#ifdef ENABLE_D
|
||||||
static void DumpTensorToFile(const std::string &dump_path, const debugger::dump::DumpData &dump_data, char *data_ptr);
|
static void DumpTensorToFile(const std::string &dump_path, const debugger::dump::DumpData &dump_data, char *data_ptr);
|
||||||
|
|
||||||
|
|
|
@ -799,22 +799,6 @@ std::string GetNodeNameWithoutScope(const std::string &dump_style_name) {
|
||||||
return dump_style_name.substr(last_scope_marker + delim.size());
|
return dump_style_name.substr(last_scope_marker + delim.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReplaceSrcFileName(std::string *dump_style_name) {
|
|
||||||
if (dump_style_name == nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const std::string strsrc = "/";
|
|
||||||
std::string strdst = "_";
|
|
||||||
std::string::size_type pos = 0;
|
|
||||||
std::string::size_type srclen = strsrc.size();
|
|
||||||
std::string::size_type dstlen = strdst.size();
|
|
||||||
|
|
||||||
while ((pos = dump_style_name->find(strsrc, pos)) != std::string::npos) {
|
|
||||||
(void)dump_style_name->replace(pos, srclen, strdst);
|
|
||||||
pos += dstlen;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void DebugServices::ConvertReadTensors(std::vector<std::string> backend_name, std::vector<size_t> slot,
|
void DebugServices::ConvertReadTensors(std::vector<std::string> backend_name, std::vector<size_t> slot,
|
||||||
std::vector<unsigned int> device_id, std::vector<unsigned int> iteration,
|
std::vector<unsigned int> device_id, std::vector<unsigned int> iteration,
|
||||||
std::vector<unsigned int> root_graph_id, AsyncFilePool *const result_list) {
|
std::vector<unsigned int> root_graph_id, AsyncFilePool *const result_list) {
|
||||||
|
@ -1000,8 +984,9 @@ std::vector<uint32_t> DebugServices::GetDumpRankIdList() {
|
||||||
}
|
}
|
||||||
if (S_ISDIR(st.st_mode)) {
|
if (S_ISDIR(st.st_mode)) {
|
||||||
std::string rank_dir_name = dir->d_name;
|
std::string rank_dir_name = dir->d_name;
|
||||||
if (GetRankOrGraphId("rank", rank_dir_name) != UINT32_MAX) {
|
uint32_t rank_id = GetRankOrGraphId("rank", rank_dir_name);
|
||||||
rank_id_list.push_back(GetRankOrGraphId("rank", rank_dir_name));
|
if (rank_id != UINT32_MAX) {
|
||||||
|
rank_id_list.push_back(rank_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1035,8 +1020,8 @@ void DebugServices::CheckDumpGraphIdList(std::vector<uint32_t> rank_id_list) {
|
||||||
if (graph_dir == "." || graph_dir == "..") {
|
if (graph_dir == "." || graph_dir == "..") {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (GetRankOrGraphId("graph", graph_dir) != UINT32_MAX) {
|
uint32_t graph_id = GetRankOrGraphId("graph", graph_dir);
|
||||||
uint32_t graph_id = GetRankOrGraphId("graph", graph_dir);
|
if (graph_id != UINT32_MAX) {
|
||||||
ReadGraphsHistory(rank_id, graph_id);
|
ReadGraphsHistory(rank_id, graph_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -468,20 +468,13 @@ void Debugger::DumpSingleNode(const CNodePtr &node, uint32_t graph_id) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Debugger::DumpSetup(const KernelGraphPtr &kernel_graph) const {
|
|
||||||
MS_LOG(INFO) << "Start!";
|
|
||||||
MS_EXCEPTION_IF_NULL(kernel_graph);
|
|
||||||
E2eDump::DumpSetup(kernel_graph.get());
|
|
||||||
MS_LOG(INFO) << "Finish!";
|
|
||||||
}
|
|
||||||
|
|
||||||
void Debugger::DumpInGraphCompiler(const KernelGraphPtr &kernel_graph) {
|
void Debugger::DumpInGraphCompiler(const KernelGraphPtr &kernel_graph) {
|
||||||
// This function is used for new GPU runtime using MindRTBackend, on Ascend platform, graphs are saved in other way.
|
// This function is used for new GPU runtime using MindRTBackend, on Ascend platform, graphs are saved in other way.
|
||||||
if (device_target_ == kAscendDevice) {
|
if (device_target_ == kAscendDevice) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
auto &json_parser = DumpJsonParser::GetInstance();
|
auto &json_parser = DumpJsonParser::GetInstance();
|
||||||
if (json_parser.e2e_dump_enabled() || json_parser.async_dump_enabled()) {
|
if (json_parser.e2e_dump_enabled()) {
|
||||||
uint32_t rank_id = GetRankID();
|
uint32_t rank_id = GetRankID();
|
||||||
kernel_graph->set_root_graph_id(kernel_graph->graph_id());
|
kernel_graph->set_root_graph_id(kernel_graph->graph_id());
|
||||||
std::string final_graph = "trace_code_graph_" + std::to_string(kernel_graph->graph_id());
|
std::string final_graph = "trace_code_graph_" + std::to_string(kernel_graph->graph_id());
|
||||||
|
|
|
@ -103,8 +103,6 @@ class Debugger : public std::enable_shared_from_this<Debugger> {
|
||||||
|
|
||||||
void DumpSingleNode(const CNodePtr &node, uint32_t graph_id);
|
void DumpSingleNode(const CNodePtr &node, uint32_t graph_id);
|
||||||
|
|
||||||
void DumpSetup(const KernelGraphPtr &kernel_graph) const;
|
|
||||||
|
|
||||||
void DumpInGraphCompiler(const KernelGraphPtr &kernel_graph);
|
void DumpInGraphCompiler(const KernelGraphPtr &kernel_graph);
|
||||||
|
|
||||||
void PostExecuteGraphDebugger();
|
void PostExecuteGraphDebugger();
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
* Copyright 2021-2022 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -58,10 +58,10 @@ std::vector<size_t> CheckRealOutput(const std::string &node_name, const size_t &
|
||||||
return real_outputs;
|
return real_outputs;
|
||||||
}
|
}
|
||||||
|
|
||||||
void LoadInputs(const CNodePtr &cnode, const KernelLaunchInfo *launch_info_, uint32_t exec_order_,
|
void LoadInputs(const CNodePtr &cnode, const KernelLaunchInfo *launch_info, uint32_t exec_order,
|
||||||
uint32_t root_graph_id) {
|
uint32_t root_graph_id) {
|
||||||
// get inputs
|
// get inputs
|
||||||
auto kernel_inputs = launch_info_->inputs_;
|
auto kernel_inputs = launch_info->inputs_;
|
||||||
auto input_size = AnfAlgo::GetInputTensorNum(cnode);
|
auto input_size = AnfAlgo::GetInputTensorNum(cnode);
|
||||||
for (size_t j = 0; j < input_size; ++j) {
|
for (size_t j = 0; j < input_size; ++j) {
|
||||||
auto input_kernel = cnode->input(j + 1);
|
auto input_kernel = cnode->input(j + 1);
|
||||||
|
@ -77,8 +77,7 @@ void LoadInputs(const CNodePtr &cnode, const KernelLaunchInfo *launch_info_, uin
|
||||||
auto gpu_addr = std::make_unique<device::gpu::GPUDeviceAddress>(addr->addr, addr->size, format, type);
|
auto gpu_addr = std::make_unique<device::gpu::GPUDeviceAddress>(addr->addr, addr->size, format, type);
|
||||||
string input_tensor_name = input_kernel_name + ':' + "0";
|
string input_tensor_name = input_kernel_name + ':' + "0";
|
||||||
ShapeVector int_shapes = trans::GetRuntimePaddingShape(input_kernel, PARAMETER_OUTPUT_INDEX);
|
ShapeVector int_shapes = trans::GetRuntimePaddingShape(input_kernel, PARAMETER_OUTPUT_INDEX);
|
||||||
auto ret =
|
auto ret = gpu_addr->LoadMemToHost(input_tensor_name, exec_order, format, int_shapes, type, 0, true, root_graph_id);
|
||||||
gpu_addr->LoadMemToHost(input_tensor_name, exec_order_, format, int_shapes, type, 0, true, root_graph_id);
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
MS_LOG(ERROR) << "LoadMemToHost:"
|
MS_LOG(ERROR) << "LoadMemToHost:"
|
||||||
<< ", tensor_name:" << input_tensor_name << ", host_format:" << format << ".!";
|
<< ", tensor_name:" << input_tensor_name << ", host_format:" << format << ".!";
|
||||||
|
@ -87,10 +86,10 @@ void LoadInputs(const CNodePtr &cnode, const KernelLaunchInfo *launch_info_, uin
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void LoadOutputs(const CNodePtr &cnode, const KernelLaunchInfo *launch_info_, uint32_t exec_order_,
|
void LoadOutputs(const CNodePtr &cnode, const KernelLaunchInfo *launch_info, uint32_t exec_order,
|
||||||
uint32_t root_graph_id) {
|
uint32_t root_graph_id) {
|
||||||
// get outputs
|
// get outputs
|
||||||
auto kernel_outputs = launch_info_->outputs_;
|
auto kernel_outputs = launch_info->outputs_;
|
||||||
auto output_size = AnfAlgo::GetOutputTensorNum(cnode);
|
auto output_size = AnfAlgo::GetOutputTensorNum(cnode);
|
||||||
auto node_name = AnfAlgo::GetCNodeName(cnode);
|
auto node_name = AnfAlgo::GetCNodeName(cnode);
|
||||||
std::string kernel_name = GetKernelNodeName(cnode);
|
std::string kernel_name = GetKernelNodeName(cnode);
|
||||||
|
@ -108,7 +107,7 @@ void LoadOutputs(const CNodePtr &cnode, const KernelLaunchInfo *launch_info_, ui
|
||||||
auto gpu_addr = std::make_unique<device::gpu::GPUDeviceAddress>(addr->addr, addr->size, format, type);
|
auto gpu_addr = std::make_unique<device::gpu::GPUDeviceAddress>(addr->addr, addr->size, format, type);
|
||||||
string tensor_name = kernel_name + ':' + std::to_string(j);
|
string tensor_name = kernel_name + ':' + std::to_string(j);
|
||||||
ShapeVector int_shapes = trans::GetRuntimePaddingShape(cnode, j);
|
ShapeVector int_shapes = trans::GetRuntimePaddingShape(cnode, j);
|
||||||
auto ret = gpu_addr->LoadMemToHost(tensor_name, exec_order_, format, int_shapes, type, j, false, root_graph_id);
|
auto ret = gpu_addr->LoadMemToHost(tensor_name, exec_order, format, int_shapes, type, j, false, root_graph_id);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
MS_LOG(ERROR) << "LoadMemToHost:"
|
MS_LOG(ERROR) << "LoadMemToHost:"
|
||||||
<< ", tensor_name:" << tensor_name << ", host_format:" << format << ".!";
|
<< ", tensor_name:" << tensor_name << ", host_format:" << format << ".!";
|
||||||
|
@ -137,7 +136,7 @@ bool CheckReadData(const CNodePtr &cnode) {
|
||||||
return read_data;
|
return read_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReadDataAndDump(const CNodePtr &cnode, const KernelLaunchInfo *launch_info_, uint32_t exec_order_) {
|
void ReadDataAndDump(const CNodePtr &cnode, const KernelLaunchInfo *launch_info, uint32_t exec_order) {
|
||||||
auto debugger = Debugger::GetInstance();
|
auto debugger = Debugger::GetInstance();
|
||||||
if (!debugger) {
|
if (!debugger) {
|
||||||
return;
|
return;
|
||||||
|
@ -148,10 +147,10 @@ void ReadDataAndDump(const CNodePtr &cnode, const KernelLaunchInfo *launch_info_
|
||||||
MS_EXCEPTION_IF_NULL(kernel_graph);
|
MS_EXCEPTION_IF_NULL(kernel_graph);
|
||||||
auto root_graph_id = kernel_graph->root_graph_id();
|
auto root_graph_id = kernel_graph->root_graph_id();
|
||||||
if (debugger->debugger_enabled() || dump_json_parser.InputNeedDump()) {
|
if (debugger->debugger_enabled() || dump_json_parser.InputNeedDump()) {
|
||||||
LoadInputs(cnode, launch_info_, exec_order_, root_graph_id);
|
LoadInputs(cnode, launch_info, exec_order, root_graph_id);
|
||||||
}
|
}
|
||||||
if (debugger->debugger_enabled() || dump_json_parser.OutputNeedDump()) {
|
if (debugger->debugger_enabled() || dump_json_parser.OutputNeedDump()) {
|
||||||
LoadOutputs(cnode, launch_info_, exec_order_, root_graph_id);
|
LoadOutputs(cnode, launch_info, exec_order, root_graph_id);
|
||||||
}
|
}
|
||||||
// Dump kernel
|
// Dump kernel
|
||||||
if (dump_enabled) {
|
if (dump_enabled) {
|
||||||
|
@ -168,7 +167,7 @@ void ReadDataAndDump(const CNodePtr &cnode, const KernelLaunchInfo *launch_info_
|
||||||
debugger->PostExecuteNode(cnode, last_kernel);
|
debugger->PostExecuteNode(cnode, last_kernel);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReadDataAndDumpAscend(const CNodePtr &cnode, uint32_t exec_order_) {
|
void ReadDataAndDumpAscend(const CNodePtr &cnode, uint32_t exec_order) {
|
||||||
auto debugger = Debugger::GetInstance();
|
auto debugger = Debugger::GetInstance();
|
||||||
if (!debugger) {
|
if (!debugger) {
|
||||||
return;
|
return;
|
||||||
|
@ -180,7 +179,7 @@ void ReadDataAndDumpAscend(const CNodePtr &cnode, uint32_t exec_order_) {
|
||||||
MS_EXCEPTION_IF_NULL(kernel_graph);
|
MS_EXCEPTION_IF_NULL(kernel_graph);
|
||||||
auto root_graph_id = kernel_graph->root_graph_id();
|
auto root_graph_id = kernel_graph->root_graph_id();
|
||||||
|
|
||||||
debugger->LoadNodeOutputs(cnode, exec_order_, root_graph_id);
|
debugger->LoadNodeOutputs(cnode, exec_order, root_graph_id);
|
||||||
// Dump kernel
|
// Dump kernel
|
||||||
if (dump_enabled) {
|
if (dump_enabled) {
|
||||||
MS_EXCEPTION_IF_NULL(kernel_graph);
|
MS_EXCEPTION_IF_NULL(kernel_graph);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
* Copyright 2021-2022 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -30,17 +30,17 @@ namespace mindspore {
|
||||||
|
|
||||||
std::vector<size_t> CheckRealOutput(const std::string &node_name, const size_t &output_size);
|
std::vector<size_t> CheckRealOutput(const std::string &node_name, const size_t &output_size);
|
||||||
|
|
||||||
void LoadInputs(const CNodePtr &cnode, const KernelLaunchInfo *launch_info_, uint32_t exec_order_,
|
void LoadInputs(const CNodePtr &cnode, const KernelLaunchInfo *launch_info, uint32_t exec_order,
|
||||||
uint32_t root_graph_id);
|
uint32_t root_graph_id);
|
||||||
|
|
||||||
void LoadOutputs(const CNodePtr &cnode, const KernelLaunchInfo *launch_info_, uint32_t exec_order_,
|
void LoadOutputs(const CNodePtr &cnode, const KernelLaunchInfo *launch_info, uint32_t exec_order,
|
||||||
uint32_t root_graph_id);
|
uint32_t root_graph_id);
|
||||||
|
|
||||||
bool CheckReadData(const CNodePtr &cnode);
|
bool CheckReadData(const CNodePtr &cnode);
|
||||||
|
|
||||||
void ReadDataAndDump(const CNodePtr &cnode, const KernelLaunchInfo *launch_info_, uint32_t exec_order_);
|
void ReadDataAndDump(const CNodePtr &cnode, const KernelLaunchInfo *launch_info, uint32_t exec_order);
|
||||||
|
|
||||||
void ReadDataAndDumpAscend(const CNodePtr &cnode, uint32_t exec_order_);
|
void ReadDataAndDumpAscend(const CNodePtr &cnode, uint32_t exec_order);
|
||||||
|
|
||||||
std::string CheckDatasetSinkMode(const KernelGraphPtr &graph_ptr);
|
std::string CheckDatasetSinkMode(const KernelGraphPtr &graph_ptr);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue