!18909 modify log level for pynative ascend

Merge pull request !18909 from chujinjin/modify_log_level_for_pynative_ascend
This commit is contained in:
i-robot 2021-06-28 01:13:22 +00:00 committed by Gitee
commit b2111fcdf5
8 changed files with 36 additions and 46 deletions

View File

@ -1477,7 +1477,7 @@ size_t AnfRuntimeAlgorithm::GetRealInputIndex(const mindspore::AnfNodePtr &anf_n
auto find = spec_node_list.find(node_name);
if (find != spec_node_list.end()) {
ret = find->second[cur_index];
MS_LOG(INFO) << "Real input index change to" << ret << ", node name:" << node_name;
MS_LOG(DEBUG) << "Real input index change to" << ret << ", node name:" << node_name;
}
}
return ret;

View File

@ -194,7 +194,7 @@ void GenOpOutputStubTensor(const KernelGraphPtr &single_op_graph, const CNodePtr
size_t LoadCtrlInputTensor(const std::shared_ptr<KernelGraph> &graph, std::vector<tensor::TensorPtr> *inputs) {
MS_EXCEPTION_IF_NULL(graph);
MS_LOG(INFO) << "Load kInputCtrlTensors";
MS_LOG(DEBUG) << "Load kInputCtrlTensors";
auto inputs_params = graph->input_ctrl_tensors();
if (inputs_params == nullptr) {
return 0;
@ -231,7 +231,7 @@ size_t LoadCtrlInputTensor(const std::shared_ptr<KernelGraph> &graph, std::vecto
*epoch_val = graph->current_epoch();
epoch_tensor->set_sync_status(kNeedSyncHostToDevice);
inputs->push_back(epoch_tensor);
MS_LOG(INFO) << "Load epoch_val:" << *epoch_val;
MS_LOG(DEBUG) << "Load epoch_val:" << *epoch_val;
graph->set_current_epoch(graph->current_epoch() + 1);
return inputs_params->size();
}
@ -626,9 +626,7 @@ bool AscendSession::GraphCacheExist(const GraphInfo &graph_info) const {
void AscendSession::BuildOpImpl(const OpRunInfo &op_run_info, const GraphInfo &graph_info,
const std::vector<tensor::TensorPtr> &input_tensors,
const std::vector<int64_t> &tensors_mask) {
MS_LOG(INFO) << "Build op " << op_run_info.op_name << " start !";
if (GraphCacheExist(graph_info)) {
MS_LOG(INFO) << "Build op " << op_run_info.op_name << " graph cache has existed !";
return;
}
@ -640,7 +638,6 @@ void AscendSession::BuildOpImpl(const OpRunInfo &op_run_info, const GraphInfo &g
RunOpAdjustKernel(graph);
BuildKernel(graph);
run_op_graphs_[graph_info] = graph;
MS_LOG(INFO) << "Build op " << op_run_info.op_name << " finish !";
}
void AscendSession::RunOpImpl(const GraphInfo &graph_info, OpRunInfo *op_run_info,
@ -660,7 +657,6 @@ void AscendSession::RunOpImpl(const GraphInfo &graph_info, OpRunInfo *op_run_inf
// Run op
auto graph = run_op_graphs_[graph_info];
MS_EXCEPTION_IF_NULL(graph);
MS_LOG(INFO) << "Run op " << op_run_info->op_name << " start!";
// malloc mem
RunOpRemoveNopNode(graph);
RunOpMemoryAlloc(*input_tensors, graph.get());
@ -680,7 +676,6 @@ void AscendSession::RunOpImpl(const GraphInfo &graph_info, OpRunInfo *op_run_inf
UpdateOutputAbstract(graph, op_run_info);
}
RunOpMemoryClear(graph.get());
MS_LOG(INFO) << "Run op " << op_run_info->op_name << " finish!";
}
KernelGraphPtr AscendSession::PreBuildOp(const OpRunInfo &op_run_info,
@ -928,7 +923,7 @@ void AscendSession::BuildKernel(const std::vector<CNodePtr> &kernels) const {
}
void AscendSession::BuildDynamicKernel(const std::shared_ptr<KernelGraph> &kernel_graph) const {
MS_LOG(INFO) << "Start!";
MS_LOG(DEBUG) << "Start!";
MS_EXCEPTION_IF_NULL(kernel_graph);
const auto &kernels = kernel_graph->execution_order();
auto iter = std::find_if(kernels.begin(), kernels.end(), [](const CNodePtr &kernel) {
@ -942,7 +937,7 @@ void AscendSession::BuildDynamicKernel(const std::shared_ptr<KernelGraph> &kerne
if (!runtime_instance->GenDynamicKernel(kernel_graph.get())) {
MS_LOG(DEBUG) << "Graph:" << kernel_graph->graph_id() << " failed to generate dynamic kernel!";
}
MS_LOG(INFO) << "Finish!";
MS_LOG(DEBUG) << "Finish!";
}
static CNodePtr GetNextLabelSet(const std::vector<CNodePtr> &kernel_nodes, uint32_t index) {
@ -1086,12 +1081,10 @@ void AscendSession::MemoryAlloc(KernelGraph *kernel_graph) const {
void AscendSession::RunOpMemoryAlloc(const std::vector<tensor::TensorPtr> &input_tensors,
KernelGraph *kernel_graph) const {
MS_LOG(INFO) << "Start memory alloc!";
MS_EXCEPTION_IF_NULL(kernel_graph);
auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(kAscendDevice, device_id_);
MS_EXCEPTION_IF_NULL(runtime_instance);
runtime_instance->RunOpAssignMemory(input_tensors, kernel_graph);
MS_LOG(INFO) << "Finish!";
}
void AscendSession::RunOpGenKernelEvent(const KernelGraph *graph) const {
@ -1124,7 +1117,7 @@ void AscendSession::Load(const std::shared_ptr<KernelGraph> &kernel_graph) const
}
void AscendSession::Execute(const std::shared_ptr<KernelGraph> &kernel_graph, bool is_task) const {
MS_LOG(INFO) << "Start!";
MS_LOG(DEBUG) << "Start!";
bool is_task_sink = false;
if (is_task) {
auto context_ptr = MsContext::GetInstance();
@ -1142,21 +1135,21 @@ void AscendSession::Execute(const std::shared_ptr<KernelGraph> &kernel_graph, bo
#endif
MS_LOG(EXCEPTION) << "run task error!";
}
MS_LOG(INFO) << "Finish!";
MS_LOG(DEBUG) << "Finish!";
}
void AscendSession::DumpSetup(const std::shared_ptr<KernelGraph> &kernel_graph) const {
MS_LOG(INFO) << "Start!";
MS_LOG(DEBUG) << "Start!";
MS_EXCEPTION_IF_NULL(kernel_graph);
E2eDump::DumpSetup(kernel_graph.get(), rank_id_);
MS_LOG(INFO) << "Finish!";
MS_LOG(DEBUG) << "Finish!";
}
void AscendSession::Dump(const std::shared_ptr<KernelGraph> &kernel_graph) const {
MS_LOG(INFO) << "Start!";
MS_LOG(DEBUG) << "Start!";
MS_EXCEPTION_IF_NULL(kernel_graph);
E2eDump::DumpData(kernel_graph.get(), rank_id_);
MS_LOG(INFO) << "Finish!";
MS_LOG(DEBUG) << "Finish!";
}
void AscendSession::DumpAllGraphs(const std::vector<KernelGraphPtr> &all_graphs) {

View File

@ -278,8 +278,6 @@ void CPUSession::RunOpImpl(const GraphInfo &graph_info, OpRunInfo *op_run_info,
runtime_.CreateOutputTensors(kernel_graph.get(), *input_tensors, outputs, &tensor_to_node);
runtime_.BindInputOutput(kernel_graph.get(), *input_tensors, outputs);
MS_LOG(INFO) << "Run Op start";
bool ret = runtime_.Run(kernel_graph.get(), false);
if (!ret) {
MS_LOG(EXCEPTION) << "Run Op failed";
@ -291,7 +289,6 @@ void CPUSession::RunOpImpl(const GraphInfo &graph_info, OpRunInfo *op_run_info,
}
SetOutputFlags(*outputs);
runtime_.RunOpClearMemory(kernel_graph.get());
MS_LOG(INFO) << "Run Op end";
}
void CPUSession::SetKernelInfo(const KernelGraph *kernel_graph) {

View File

@ -185,7 +185,6 @@ BaseRef CreateNodeOutputTensor(const session::KernelWithIndex &node_output_pair,
int output_index = SizeToInt(node_output_pair.second);
MS_EXCEPTION_IF_NULL(node);
MS_EXCEPTION_IF_NULL(graph);
MS_LOG(INFO) << "Create tensor for output[" << node->DebugString() << "] index[" << output_index << "]";
auto tensor_from_input = GetNodeOutputTensorFromInputs(node_output_pair, graph, input_tensors);
if (tensor_from_input != nullptr) {
return tensor_from_input;
@ -1220,7 +1219,6 @@ void SessionBasic::CreateOutputPlaceholder(
size_t index = 0;
for (auto &item : anf_outputs) {
MS_EXCEPTION_IF_NULL(item);
MS_LOG(INFO) << "Create node output placeholder[" << item->DebugString() << "]";
std::vector<size_t> indexes{index++};
outputs->emplace_back(CreateNodeOutputPlaceholder(item, kernel_graph, input_tensors, indexes, output_indexes));
}

View File

@ -277,12 +277,12 @@ nlohmann::json ConstructTransDataKernelJson(const std::vector<size_t> &host_shap
}
void AscendDeviceAddress::SyncStream() const {
MS_LOG(INFO) << "Start!";
MS_LOG(DEBUG) << "Start!";
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
if (ms_context->get_param<int>(MS_CTX_EXECUTION_MODE) != kPynativeMode &&
!ms_context->get_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER)) {
MS_LOG(INFO) << "Finish!";
MS_LOG(DEBUG) << "Finish!";
return;
}
auto device_id = ms_context->get_param<uint32_t>(MS_CTX_DEVICE_ID);
@ -292,7 +292,7 @@ void AscendDeviceAddress::SyncStream() const {
if (!ret) {
MS_LOG(EXCEPTION) << "Sync stream error!";
}
MS_LOG(INFO) << "Finish!";
MS_LOG(DEBUG) << "Finish!";
}
bool AscendDeviceAddress::SyncDeviceToHost(size_t size, void *host_ptr) const {

View File

@ -588,28 +588,30 @@ bool AscendKernelRuntime::Run(session::KernelGraph *const graph, bool is_task_si
SignalGuard sg;
MS_EXCEPTION_IF_NULL(graph);
bool ret = false;
#if defined(_WIN32) || defined(_WIN64)
auto start_time = std::chrono::steady_clock::now();
#else
struct timeval start_time {};
struct timeval end_time {};
(void)gettimeofday(&start_time, nullptr);
#endif
if (is_task_sink) {
#if defined(_WIN32) || defined(_WIN64)
auto start_time = std::chrono::steady_clock::now();
#else
struct timeval start_time {};
struct timeval end_time {};
(void)gettimeofday(&start_time, nullptr);
#endif
ret = RunTask(graph);
#if defined(_WIN32) || defined(_WIN64)
auto end_time = std::chrono::steady_clock::now();
std::chrono::duration<double, std::ratio<1, kUSecondInSecond>> cost = end_time - start_time;
MS_LOG(INFO) << "Call MS Run Success in " << cost.count() << " us";
#else
(void)gettimeofday(&end_time, nullptr);
uint64_t cost = kUSecondInSecond * static_cast<uint64_t>(end_time.tv_sec - start_time.tv_sec);
cost += static_cast<uint64_t>(end_time.tv_usec - start_time.tv_usec);
MS_LOG(INFO) << "Call MS Run Success in " << cost << " us";
#endif
} else {
ret = LaunchKernel(graph);
}
#if defined(_WIN32) || defined(_WIN64)
auto end_time = std::chrono::steady_clock::now();
std::chrono::duration<double, std::ratio<1, kUSecondInSecond>> cost = end_time - start_time;
MS_LOG(INFO) << "Call MS Run Success in " << cost.count() << " us";
#else
(void)gettimeofday(&end_time, nullptr);
uint64_t cost = kUSecondInSecond * static_cast<uint64_t>(end_time.tv_sec - start_time.tv_sec);
cost += static_cast<uint64_t>(end_time.tv_usec - start_time.tv_usec);
MS_LOG(INFO) << "Call MS Run Success in " << cost << " us";
#endif
return ret;
}

View File

@ -36,7 +36,7 @@ void SignalGuard::RegisterHandlers() {
struct sigaction old_int_action;
(void)sigaction(SIGINT, nullptr, &old_int_action);
if (old_int_action.sa_sigaction != nullptr) {
MS_LOG(INFO) << "The signal has been registered";
MS_LOG(DEBUG) << "The signal has been registered";
old_handler = old_int_action.sa_sigaction;
}
int_action.sa_sigaction = &IntHandler;

View File

@ -414,7 +414,7 @@ void KernelRuntime::UpdateRefNodeOutputMem(const session::KernelGraph *graph) {
auto output_sizes = kernel_mod->GetOutputSizeList();
if (output_sizes.empty()) {
MS_LOG(INFO) << "This kernel has no output size.";
MS_LOG(DEBUG) << "This kernel has no output size.";
continue;
}
for (size_t i = 0; i < output_sizes.size(); ++i) {
@ -922,7 +922,7 @@ void KernelRuntime::GenAddrCleanLaunchArgs(const CNodePtr &cnode, AddressPtrList
input->size = device_address->size_;
kernel_inputs->emplace_back(input);
}
MS_LOG(INFO) << "AtomicAddClean clean output size:" << clean_output_indexes.size();
MS_LOG(DEBUG) << "AtomicAddClean clean output size:" << clean_output_indexes.size();
}
// set clean workspace address
if (AnfAlgo::HasNodeAttr(kAttrAtomicWorkspaceIndexs, pre_node)) {