From c09501aad0e3e46b55bba9bec25ba940be90f4e0 Mon Sep 17 00:00:00 2001 From: yuchaojie Date: Wed, 24 Nov 2021 11:21:51 +0800 Subject: [PATCH] add trace for exception log --- .../backend/kernel_compiler/common_utils.cc | 4 +- .../backend/kernel_compiler/hccl/hcom_util.cc | 4 +- .../dynamic_broadcast_gradient_args_kernel.cc | 10 ++-- .../host/dynamic_reshape_kernel.cc | 10 ++-- .../host/dynamic_shape_kernel.cc | 4 +- .../backend/kernel_compiler/rts/assign.cc | 2 +- .../kernel_compiler/tbe/tbe_adapter.cc | 4 +- .../tbe_kernel_broadcast_selecter.cc | 7 ++- .../tbe_kernel_select/tbe_property_checker.cc | 9 ++-- .../insert_tensor_move_for_cascade.cc | 2 +- .../change_axis_of_reduce_kernel.cc | 8 +-- .../convert_unsupported_transnode_to_aicpu.cc | 4 +- ...eal_ref_and_split_unsupported_transdata.cc | 6 +-- .../ascend/format_type/merge_cast_to_op.cc | 4 +- .../ir_fission/batch_norm_grad_split.cc | 13 ++--- .../ascend/ir_fission/bn_grad_split.cc | 20 ++++---- .../optimizer/ascend/ir_fission/bn_split.cc | 9 ++-- .../ascend/ir_fission/gather_v2_ds_fission.cc | 13 +++-- .../max_pool3d_grad_grad_fission.cc | 4 +- .../ascend/ir_fission/seed_adapter.cc | 13 +++-- .../ascend/ir_fission/split_fission.cc | 7 +-- .../ascend/ir_fusion/avgpool_3d_fusion.cc | 41 ++++++++------- .../ir_fusion/avgpool_3d_grad_fusion.cc | 9 ++-- ...duce_grad_conv2d_backprop_filter_fusion.cc | 5 +- .../ascend/mindir/all_to_all_unify_mindir.cc | 21 +++++--- .../mindir/avg_pool_grad_unify_mindir.cc | 44 +++++++++------- .../ascend/mindir/conv2d_unify_mindir.cc | 27 +++++----- .../ascend/mindir/dropout_unify_mindir.cc | 4 +- ...e_learned_scale_quant_grad_unify_mindir.cc | 50 +++++++++++-------- .../mindir/maxpool_to_maxpool_with_argmax.cc | 11 ++-- .../maxpool_with_argmax_unify_mindir.cc | 9 +++- .../neighbor_exchange_v2_unify_mindir.cc | 43 +++++++++++----- .../ascend/mindir/slice_grad_unify_mindir.cc | 8 ++- ..._cross_entropy_with_logits_unify_mindir.cc | 22 +++++--- .../ccsrc/backend/optimizer/common/helper.cc | 25 +++++----- .../optimizer/mem_reuse/mem_reuse_checker.cc | 4 +- .../optimizer/pass/communication_op_fusion.cc | 4 +- .../pass/conv_transpose_to_conv_bp.cc | 8 +-- .../pass/custom_op_reg_info_to_attr.cc | 2 +- .../optimizer/pass/optimize_dependence.cc | 4 +- .../optimizer/trt_pass/graph_partitioner.cc | 3 +- .../backend/session/anf_runtime_algorithm.cc | 15 +++--- .../ccsrc/backend/session/session_basic.cc | 14 ++++-- .../backend/session/single_kernel_graph.cc | 6 ++- 44 files changed, 331 insertions(+), 205 deletions(-) diff --git a/mindspore/ccsrc/backend/kernel_compiler/common_utils.cc b/mindspore/ccsrc/backend/kernel_compiler/common_utils.cc index e6f72214c59..349c8371d9d 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/common_utils.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/common_utils.cc @@ -31,6 +31,7 @@ #include "base/core_ops.h" #include "ir/graph_utils.h" #include "utils/ms_context.h" +#include "utils/trace_base.h" #include "mindspore/ccsrc/debug/common.h" namespace mindspore { @@ -640,7 +641,8 @@ bool IsWeightBoundary(const AnfNodePtr &node) { std::vector GetReduceAttrAxis(const CNodePtr &cnode) { if (AnfAlgo::GetInputTensorNum(cnode) != 1 || AnfAlgo::GetOutputTensorNum(cnode) != 1) { - MS_LOG(EXCEPTION) << "The reduce node [" << cnode->DebugString() << "] is not single input or single output."; + MS_LOG(EXCEPTION) << "The reduce node [" << cnode->DebugString() + << "] is not single input or single output. trace: " << trace::DumpSourceLines(cnode); } std::vector axis; auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(cnode, 0); diff --git a/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_util.cc b/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_util.cc index b5c8763b000..e6635358530 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_util.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_util.cc @@ -20,6 +20,7 @@ #include "backend/session/anf_runtime_algorithm.h" #include "utils/ms_context.h" #include "utils/utils.h" +#include "utils/trace_base.h" namespace mindspore { namespace { @@ -272,7 +273,8 @@ void HcomUtil::GetHcomGroup(NotNull anf_node, NotNull(attr); } else { - MS_LOG(EXCEPTION) << "Get Hcom Group Attr of Op:" << anf_node->fullname_with_scope() << " failed"; + MS_LOG(EXCEPTION) << "Get Hcom Group Attr of Op:" << anf_node->fullname_with_scope() + << " failed. trace: " << trace::DumpSourceLines(anf_node); } } } // namespace mindspore diff --git a/mindspore/ccsrc/backend/kernel_compiler/host/dynamic_broadcast_gradient_args_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/host/dynamic_broadcast_gradient_args_kernel.cc index c733a9b1fb2..07cb69cb955 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/host/dynamic_broadcast_gradient_args_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/host/dynamic_broadcast_gradient_args_kernel.cc @@ -16,6 +16,7 @@ #include "backend/kernel_compiler/host/dynamic_broadcast_gradient_args_kernel.h" #include "backend/session/anf_runtime_algorithm.h" +#include "utils/trace_base.h" namespace mindspore { namespace kernel { @@ -124,10 +125,12 @@ std::vector GetInputShape(const CNodePtr &cnode, size_t index) { auto shape_x = AnfAlgo::GetPrevNodeOutputInferShape(cnode, index); auto type_x = AnfAlgo::GetOutputInferDataType(cnode, index); if (type_x != TypeId::kNumberTypeInt64) { - MS_LOG(EXCEPTION) << "Input x type must be int64, but :" << type_x; + MS_LOG(EXCEPTION) << "Input x type must be int64, but got " << type_x + << ". trace: " << trace::DumpSourceLines(cnode); } if (shape_x.size() != 1) { - MS_LOG(EXCEPTION) << "Input" << index << " must be [1-D], but " << shape_x.size() << "-D."; + MS_LOG(EXCEPTION) << "Input" << index << " must be [1-D], but got " << shape_x.size() + << "-D. trace: " << trace::DumpSourceLines(cnode); } size_t x_num = shape_x[0]; @@ -184,7 +187,8 @@ void DynamicBroadcastGradientArgsKernel::Execute() { MS_EXCEPTION_IF_NULL(cnode); auto input_num = AnfAlgo::GetInputTensorNum(cnode); if (input_num != kInputNum) { - MS_LOG(EXCEPTION) << "Invalid Input Num:" << input_num; + MS_LOG(EXCEPTION) << "Invalid input num, should be " << kInputNum << ", but got " << input_num + << ". trace: " << trace::DumpSourceLines(cnode); } std::vector> input_shapes(kInputNum); diff --git a/mindspore/ccsrc/backend/kernel_compiler/host/dynamic_reshape_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/host/dynamic_reshape_kernel.cc index 9145846f446..74da05dd22a 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/host/dynamic_reshape_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/host/dynamic_reshape_kernel.cc @@ -19,6 +19,7 @@ #include #include "backend/session/anf_runtime_algorithm.h" #include "abstract/utils.h" +#include "utils/trace_base.h" namespace mindspore { namespace kernel { @@ -29,12 +30,14 @@ std::vector GetInputValue(const CNodePtr &cnode, size_t index) { auto address_x = AnfAlgo::GetPrevNodeMutableOutputAddr(cnode, index); auto shape_x = AnfAlgo::GetPrevNodeOutputInferShape(cnode, index); if (shape_x.size() != 1) { - MS_LOG(EXCEPTION) << "Input" << index << " must be [1-D], but " << shape_x.size() << "-D."; + MS_LOG(EXCEPTION) << "Input" << index << " must be [1-D], but got " << shape_x.size() + << "-D. trace: " << trace::DumpSourceLines(cnode); } session::KernelWithIndex kernel_with_index = AnfAlgo::GetPrevNodeOutput(cnode, index); auto type_x = AnfAlgo::GetOutputInferDataType(kernel_with_index.first, kernel_with_index.second); if (type_x != TypeId::kNumberTypeInt64 && type_x != TypeId::kNumberTypeInt32) { - MS_LOG(EXCEPTION) << "Input x type must be int64 or int32, but :" << TypeIdToType(type_x); + MS_LOG(EXCEPTION) << "Input x type must be int64 or int32, but got " << TypeIdToType(type_x) + << ". trace: " << trace::DumpSourceLines(cnode); } size_t x_num = shape_x[0]; @@ -67,7 +70,8 @@ void DynamicReshapeKernel::Execute() { MS_EXCEPTION_IF_NULL(cnode); auto input_num = AnfAlgo::GetInputTensorNum(cnode); if (input_num != kInputNum) { - MS_LOG(EXCEPTION) << "Invalid Input Num:" << input_num; + MS_LOG(EXCEPTION) << "Invalid input num, should be " << kInputNum << ", but got " << input_num + << ". trace: " << trace::DumpSourceLines(cnode); } auto address_x = AnfAlgo::GetPrevNodeMutableOutputAddr(cnode, 0); diff --git a/mindspore/ccsrc/backend/kernel_compiler/host/dynamic_shape_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/host/dynamic_shape_kernel.cc index 76d773f9cb1..c9006d92138 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/host/dynamic_shape_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/host/dynamic_shape_kernel.cc @@ -16,6 +16,7 @@ #include "backend/kernel_compiler/host/dynamic_shape_kernel.h" #include "backend/session/anf_runtime_algorithm.h" +#include "utils/trace_base.h" namespace mindspore { namespace kernel { @@ -25,7 +26,8 @@ void DynamicShapeKernel::Execute() { MS_EXCEPTION_IF_NULL(cnode); auto input_num = AnfAlgo::GetInputTensorNum(cnode); if (input_num != 1) { - MS_LOG(EXCEPTION) << "Invalid Input Num:" << input_num; + MS_LOG(EXCEPTION) << "Op [" << cnode->DebugString() << "] has invalid input num, should be 1, but got " << input_num + << ". trace: " << trace::DumpSourceLines(cnode); } auto prev_output_shape = AnfAlgo::GetPrevNodeOutputInferShape(cnode, 0); diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc index 276b80d7ad3..1b7416aafce 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc @@ -52,7 +52,7 @@ bool AssignKernel::Launch(const std::vector &inputs, const std::vect std::vector AssignKernel::GenTask(const std::vector &inputs, const std::vector &, const std::vector &, uint32_t stream_id) { if (inputs.size() != 2) { - MS_LOG(EXCEPTION) << "inputs size is not two, got " << inputs.size(); + MS_LOG(EXCEPTION) << "Inputs size should be 2, but got " << inputs.size(); } stream_id_ = stream_id; diff --git a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_adapter.cc b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_adapter.cc index 1328b026113..6624b99b48e 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_adapter.cc @@ -194,7 +194,7 @@ bool TbeAdapter::IsPlaceHolderInput(const AnfNodePtr &node, const OpIOInfoPtr &i auto none_index = AnfAlgo::GetNodeAttr>(node, "placeholder_index"); return find(none_index.begin(), none_index.end(), input_ptr->index()) != none_index.end(); } else { - MS_LOG(EXCEPTION) << "Cnode: " << cnode_name << "doesn't has attribute placeholder_index."; + MS_LOG(EXCEPTION) << "Cnode: " << cnode_name << " doesn't has attribute placeholder_index."; } } void TbeAdapter::CastAttrJsonPrePass(const AnfNodePtr &anf_node, std::vector *op_info_attrs, @@ -235,7 +235,7 @@ void TbeAdapter::CastAttrJsonPost(const AnfNodePtr &anf_node, nlohmann::json *at if (iter != dst_type_map.end()) { attrs_json->at(0)[kJValue] = iter->second; } else { - MS_LOG(EXCEPTION) << "Invalid type:" << type_id; + MS_LOG(EXCEPTION) << "Invalid type: " << type_id; } } void TbeAdapter::LayerNormAttrJsonPost(const AnfNodePtr &anf_node, nlohmann::json *attrs_json) { diff --git a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_broadcast_selecter.cc b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_broadcast_selecter.cc index ad3e11f3c66..97a3f542a65 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_broadcast_selecter.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_broadcast_selecter.cc @@ -15,6 +15,7 @@ */ #include "backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_broadcast_selecter.h" #include "utils/utils.h" +#include "utils/trace_base.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/kernel_compiler/tbe/tbe_kernel_select/common_utils.h" @@ -37,11 +38,13 @@ bool TbeKernelBroadCastSelecter::GetShapeInfo(SupportFormat *support_format) { auto dynamic_size_vec = AnfAlgo::GetNodeAttr>(cnode_ptr_, kAttrDynInputSizes); constexpr int64_t DYNAMIC_INPUT_NUM = 2; if (dynamic_size_vec.empty()) { - MS_LOG(EXCEPTION) << "Node [" << AnfAlgo::GetCNodeName(cnode_ptr_) << "]'s attr [dyn_input_sizes] is empty."; + MS_LOG(EXCEPTION) << "Node [" << AnfAlgo::GetCNodeName(cnode_ptr_) + << "]'s attr [dyn_input_sizes] is empty. trace: " << trace::DumpSourceLines(cnode_ptr_); } if (dynamic_size_vec[0] < DYNAMIC_INPUT_NUM) { MS_LOG(EXCEPTION) << "Node [" << AnfAlgo::GetCNodeName(cnode_ptr_) - << "]'s attr [dyn_input_sizes] value less than " << DYNAMIC_INPUT_NUM; + << "]'s attr [dyn_input_sizes] value less than " << DYNAMIC_INPUT_NUM + << ". trace: " << trace::DumpSourceLines(cnode_ptr_); } auto dynamic_input_shape0_ = AnfAlgo::GetPrevNodeOutputInferShape(cnode_ptr_, kInputIndex_0); PadScalarShape(&dynamic_input_shape0_); diff --git a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_property_checker.cc b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_property_checker.cc index b321acff4af..e5820bef192 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_property_checker.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_property_checker.cc @@ -19,6 +19,7 @@ #include #include "backend/session/anf_runtime_algorithm.h" #include "frontend/parallel/ops_info/ops_utils.h" +#include "utils/trace_base.h" namespace mindspore { namespace kernel { @@ -51,7 +52,7 @@ static bool CheckStridedSlice(const CNodePtr &cnode) { MS_EXCEPTION_IF_NULL(input_value); if (!input_value->isa()) { MS_LOG(EXCEPTION) << "For 'StrideSlice', the first input value should be a tensor, but got " - << input_value->ToString(); + << input_value->ToString() << ". trace: " << trace::DumpSourceLines(cnode); } input_dims = SizeToInt(input_value->cast()->shape().size()); } else if (input->isa() || input->isa()) { @@ -59,12 +60,12 @@ static bool CheckStridedSlice(const CNodePtr &cnode) { MS_EXCEPTION_IF_NULL(input_abstract); if (!input_abstract->isa()) { MS_LOG(EXCEPTION) << "For 'StrideSlice', the first input value should be a tensor, but got " - << input_abstract->ToString(); + << input_abstract->ToString() << ". trace: " << trace::DumpSourceLines(cnode); } input_dims = SizeToInt(input_abstract->cast()->shape()->shape().size()); } else { MS_LOG(EXCEPTION) << "For 'StrideSlice', the first input node should be a 'ValueNode' or a 'CNode', but got " - << input->ToString(); + << input->ToString() << ". trace: " << trace::DumpSourceLines(cnode); } const int base_number = 2; if (shrink_axis_mask >= std::pow(base_number, input_dims - 1) && input_dims > 1) { @@ -79,7 +80,7 @@ static bool CheckTopK(const CNodePtr &cnode) { auto sorted = AnfAlgo::GetNodeAttr(cnode, kAttrSorted); return sorted; } - MS_LOG(EXCEPTION) << "For 'TopK', it should be have attribute 'sorted'."; + MS_LOG(EXCEPTION) << "For 'TopK', it should be have attribute 'sorted'. trace: " << trace::DumpSourceLines(cnode); } bool TbePropertyChecker::CheckTbeProperties(const mindspore::CNodePtr &cnode) { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/enhancer/insert_tensor_move_for_cascade.cc b/mindspore/ccsrc/backend/optimizer/ascend/enhancer/insert_tensor_move_for_cascade.cc index 01103964570..5b069d23703 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/enhancer/insert_tensor_move_for_cascade.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/enhancer/insert_tensor_move_for_cascade.cc @@ -46,7 +46,7 @@ bool IsPartOutputsOfHcclOp(const AnfNodePtr &node, const CNodePtr &cur_hccl, con auto &node_users = manager->node_users(); auto iter = node_users.find(prev_hccl_op); if (iter == node_users.end()) { - MS_LOG(EXCEPTION) << "node has no output in manager" + MS_LOG(EXCEPTION) << "Node has no output in manager" << " trace: " << trace::DumpSourceLines(cur_hccl); } for (const auto &node_index : iter->second) { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/format_type/change_axis_of_reduce_kernel.cc b/mindspore/ccsrc/backend/optimizer/ascend/format_type/change_axis_of_reduce_kernel.cc index 9347d581685..9bc93b18884 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/format_type/change_axis_of_reduce_kernel.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/format_type/change_axis_of_reduce_kernel.cc @@ -21,6 +21,7 @@ #include #include "utils/utils.h" +#include "utils/trace_base.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/kernel_compiler/common_utils.h" @@ -40,11 +41,12 @@ const std::map kReduceConvertMap = {{kOpFormat_FRA void SafeCheckFunction(const CNodePtr &cnode, const std::vector &reduce_axis) { MS_EXCEPTION_IF_NULL(cnode); if (reduce_axis.empty()) { - MS_LOG(EXCEPTION) << "The node " << cnode->DebugString() << "'s reduce axis got a empty vector"; + MS_LOG(EXCEPTION) << "The node " << cnode->DebugString() + << "'s reduce axis got a empty vector, trace: " << trace::DumpSourceLines(cnode); } if (AnfAlgo::GetInputTensorNum(cnode) != 1 || AnfAlgo::GetOutputTensorNum(cnode) != 1) { - MS_LOG(EXCEPTION) << "the kind of reduce node [" << cnode->DebugString() - << "] is not single input or single output."; + MS_LOG(EXCEPTION) << "The kind of reduce node [" << cnode->DebugString() + << "] is not single input or single output. trace: " << trace::DumpSourceLines(cnode); } for (auto elem : reduce_axis) { if (elem > kAxisDim) { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_unsupported_transnode_to_aicpu.cc b/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_unsupported_transnode_to_aicpu.cc index 1e9ad4d8815..34d89def27d 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_unsupported_transnode_to_aicpu.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_unsupported_transnode_to_aicpu.cc @@ -47,8 +47,8 @@ const AnfNodePtr ConvertUnSupportNodeToAICPU::Process(const mindspore::FuncGraph AnfAlgo::SetSelectKernelBuildInfo(builder->Build(), node.get()); AnfAlgo::SetNodeAttr(kAttrIsAICPUKernel, MakeValue(true), node); } else { - MS_LOG(EXCEPTION) << " kernel " << kernel_builder_info->ToString() << "is not supported in AiCPU & AiCore : node [" - << node->DebugString() << "]" << trace::DumpSourceLines(node); + MS_LOG(EXCEPTION) << "Kernel " << kernel_builder_info->ToString() << "is not supported in AiCPU & AiCore : node [" + << node->DebugString() << "]. trace:" << trace::DumpSourceLines(node); } return nullptr; } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/format_type/deal_ref_and_split_unsupported_transdata.cc b/mindspore/ccsrc/backend/optimizer/ascend/format_type/deal_ref_and_split_unsupported_transdata.cc index eb8881b0722..380d4f5d93a 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/format_type/deal_ref_and_split_unsupported_transdata.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/format_type/deal_ref_and_split_unsupported_transdata.cc @@ -42,7 +42,7 @@ session::KernelWithIndex DealRefAndSpiltUnSupportedTransdata::FindRefOriginNode( if (ref_infos.count(cur_out_index) != 0) { auto in_index = ref_infos.at(cur_out_index); if (in_index > cnode->inputs().size()) { - MS_LOG(EXCEPTION) << "ref op has wrong inputs: op inputs num is " << cnode->inputs().size() + MS_LOG(EXCEPTION) << "Ref op has wrong inputs: op inputs num is " << cnode->inputs().size() << ", ref info is " << cur_out_index; } AnfNodePtr next_node = cnode->input(in_index + 1); @@ -84,7 +84,7 @@ void DealRefAndSpiltUnSupportedTransdata::AddRefPairToKernelGraph(const FuncGrap auto final_ref = (final_node == get_item ? cnode : final_node); session::AnfWithOutIndex final_pair = std::make_pair(final_ref, final_index); if (kernel_graph->IsInRefOutputMap(final_pair)) { - MS_LOG(EXCEPTION) << "ref_pair is already in ref map, node is " << final_ref->DebugString() << ", index is " + MS_LOG(EXCEPTION) << "Ref_pair is already in ref map, node is " << final_ref->DebugString() << ", index is " << final_index; } MS_LOG(DEBUG) << "Add Ref pair, final {node ptr " << final_pair.first.get() << " , info is " @@ -214,7 +214,7 @@ CNodePtr DealRefAndSpiltUnSupportedTransdata::DealRefSingleOutput( } auto ref_info = *(ref_infos.begin()); if (ref_info.second > cnode->inputs().size()) { - MS_LOG(EXCEPTION) << "ref op has wrong inputs: op inputs num is " << cnode->inputs().size() << ", ref info is " + MS_LOG(EXCEPTION) << "Ref op has wrong inputs: op inputs num is " << cnode->inputs().size() << ", ref info is " << ref_info.second; } return AddAdditionalToRefOutput(func_graph, cnode, ref_info.first, ref_info.second, nullptr); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/format_type/merge_cast_to_op.cc b/mindspore/ccsrc/backend/optimizer/ascend/format_type/merge_cast_to_op.cc index f6c395be9ee..8ec2c264643 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/format_type/merge_cast_to_op.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/format_type/merge_cast_to_op.cc @@ -23,6 +23,7 @@ #include "backend/session/anf_runtime_algorithm.h" #include "utils/utils.h" +#include "utils/trace_base.h" #include "base/core_ops.h" namespace mindspore { @@ -180,7 +181,8 @@ AnfNodePtr MergeCastToNextOp(const FuncGraphPtr &graph, const CNodePtr &node, co AnfAlgo::SetSelectKernelBuildInfo(*alternative_kernel_info, next_cnode.get()); if (AnfAlgo::GetInputTensorNum(node) < kCastInputTensorNum) { MS_LOG(EXCEPTION) << "Op[" << node->DebugString() << "] has wrong input num:" << AnfAlgo::GetInputTensorNum(node) - << ", should be not less than " << kCastInputTensorNum; + << ", should be not less than " << kCastInputTensorNum + << ". trace: " << trace::DumpSourceLines(node); } return node->input(1); } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/batch_norm_grad_split.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/batch_norm_grad_split.cc index ef655422843..046aa9a321a 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/batch_norm_grad_split.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/batch_norm_grad_split.cc @@ -58,8 +58,9 @@ void BatchNormGradSplit::CreateOutputsOfReduceGrad(const FuncGraphPtr &graph, co const auto &bn_grad_inputs = bn_grad_node->inputs(); CheckCNodeInputSize(bn_grad_node, kBNGradInputTensorNum); if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) { - MS_LOG(EXCEPTION) << "BNTrainingReduceGrad_outputs has wrong size" - << " trace: " << trace::DumpSourceLines(bn_grad_node); + MS_LOG(EXCEPTION) << "Outputs of BNTrainingReduceGrad has wrong size, should be " << kBNTrainingUpdateGradOutputNum + << ", but got " << bn_update_grad_outputs.size() + << ". trace: " << trace::DumpSourceLines(bn_grad_node); } std::vector bn_reduce_grad_inputs = { NewValueNode(std::make_shared(kBNTrainingReduceGradOpName)), @@ -110,15 +111,15 @@ const AnfNodePtr BatchNormGradSplit::Process(const FuncGraphPtr &func_graph, con std::vector bn_update_grad_outputs; CreateOutputsOfUpdateGrad(func_graph, cnode, &bn_update_grad_outputs); if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) { - MS_LOG(EXCEPTION) << "bn_update_grad_outputs has wrong size" - << " trace: " << trace::DumpSourceLines(node); + MS_LOG(EXCEPTION) << "Outputs of bn_update_grad has wrong size, should be " << kBNTrainingUpdateGradOutputNum + << ", but got " << bn_update_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(node); } std::vector bn_reduce_grad_outputs; CreateOutputsOfReduceGrad(func_graph, cnode, bn_update_grad_outputs, &bn_reduce_grad_outputs); if (bn_reduce_grad_outputs.size() != kSingleOutputNum) { - MS_LOG(EXCEPTION) << "bn_reduce_grad_outputs has wrong size" - << " trace: " << trace::DumpSourceLines(node); + MS_LOG(EXCEPTION) << "Outputs of bn_reduce_grad has wrong size, should be " << kSingleOutputNum << ", but got " + << bn_reduce_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(node); } std::vector make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), bn_reduce_grad_outputs[0], diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_grad_split.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_grad_split.cc index 7ebfbc7a37c..88aa492d3bb 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_grad_split.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_grad_split.cc @@ -58,7 +58,9 @@ void BnGradSplit::CreateOutputsOfReduceGrad(const FuncGraphPtr &graph, const CNo auto bn_grad_inputs = bn_grad_node->inputs(); CheckCNodeInputSize(bn_grad_node, kBNGradInputTensorNum); if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) { - MS_LOG(EXCEPTION) << "bn_update_grad_outputs has wrong size"; + MS_LOG(EXCEPTION) << "Outputs of bn_update_grad has wrong size, should be " << kBNTrainingUpdateGradOutputNum + << ", but got " << bn_update_grad_outputs.size() + << ". trace: " << trace::DumpSourceLines(bn_grad_node); } std::vector bn_reduce_grad_inputs = { NewValueNode(std::make_shared(kBNTrainingReduceGradOpName)), @@ -87,15 +89,15 @@ CNodePtr BnGradSplit::BNGradSplitForTBE(const FuncGraphPtr &func_graph, const CN std::vector bn_update_grad_outputs; CreateOutputsOfUpdateGrad(func_graph, cnode, &bn_update_grad_outputs); if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) { - MS_LOG(EXCEPTION) << "bn_update_grad_outputs has wrong size" - << " trace: " << trace::DumpSourceLines(cnode); + MS_LOG(EXCEPTION) << "Outputs of bn_update_grad has wrong size, should be " << kBNTrainingUpdateGradOutputNum + << ", but got " << bn_update_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(cnode); } std::vector bn_reduce_grad_outputs; CreateOutputsOfReduceGrad(func_graph, cnode, bn_update_grad_outputs, &bn_reduce_grad_outputs); if (bn_reduce_grad_outputs.size() != 1) { - MS_LOG(EXCEPTION) << "bn_reduce_grad_outputs has wrong size" - << " trace: " << trace::DumpSourceLines(cnode); + MS_LOG(EXCEPTION) << "Outputs of bn_reduce_grad has wrong size, should be " << 1 << ", but got " + << bn_reduce_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(cnode); } std::vector make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), bn_reduce_grad_outputs[0], @@ -112,8 +114,8 @@ CNodePtr SyncBnGradSplit::SyncBNGradSplitForTBE(const FuncGraphPtr &func_graph, CreateOutputsOfUpdateGrad(func_graph, cnode, &bn_update_grad_outputs); if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) { - MS_LOG(EXCEPTION) << "bn_update_grad_outputs has wrong size" - << " trace: " << trace::DumpSourceLines(cnode); + MS_LOG(EXCEPTION) << "Outputs of bn_update_grad has wrong size, should be " << kBNTrainingUpdateGradOutputNum + << ", but got " << bn_update_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(cnode); } std::vector allreduce_mul_outputs; @@ -125,8 +127,8 @@ CNodePtr SyncBnGradSplit::SyncBNGradSplitForTBE(const FuncGraphPtr &func_graph, std::vector bn_reduce_grad_outputs; CreateOutputsOfReduceGrad(func_graph, cnode, allreduce_mul_outputs, &bn_reduce_grad_outputs); if (bn_reduce_grad_outputs.size() != 1) { - MS_LOG(EXCEPTION) << "bn_reduce_grad_outputs has wrong size" - << " trace: " << trace::DumpSourceLines(cnode); + MS_LOG(EXCEPTION) << "Outputs of bn_reduce_grad has wrong size, should be " << 1 << ", but got " + << bn_reduce_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(cnode); } std::vector make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), bn_reduce_grad_outputs[0], diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_split.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_split.cc index e7c929bd10c..5a57a6e1f8a 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_split.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_split.cc @@ -161,11 +161,13 @@ AnfNodePtr CreateValueNodeOfDeviceNumReciprocal(const FuncGraphPtr &graph, const MS_EXCEPTION_IF_NULL(graph); MS_EXCEPTION_IF_NULL(sync_bn_cnode); if (!AnfAlgo::HasNodeAttr(kDeviceNum, sync_bn_cnode)) { - MS_LOG(EXCEPTION) << "The node [" << sync_bn_cnode->DebugString() << "] does not have attr device_num."; + MS_LOG(EXCEPTION) << "The node [" << sync_bn_cnode->DebugString() + << "] does not have attr device_num. trace: " << trace::DumpSourceLines(sync_bn_cnode); } auto device_num = AnfAlgo::GetNodeAttr(sync_bn_cnode, kDeviceNum); if (device_num == 0) { - MS_LOG(EXCEPTION) << "The device_num attr of node [" << sync_bn_cnode->DebugString() << "] should not be 0"; + MS_LOG(EXCEPTION) << "The device_num attr of node [" << sync_bn_cnode->DebugString() + << "] should not be 0. trace: " << trace::DumpSourceLines(sync_bn_cnode); } MS_LOG(INFO) << "device_num value: " << device_num; const float device_num_reciprocal = 1.0 / device_num; @@ -224,7 +226,8 @@ AnfNodePtr CreateAllReduceAndMul(const FuncGraphPtr &graph, const AnfNodePtr &al auto sync_bn_opname = sync_bn_cnode->fullname_with_scope(); auto opid_pos = sync_bn_opname.rfind("-op"); if (opid_pos == std::string::npos || opid_pos + kPositionOffset >= sync_bn_opname.size()) { - MS_LOG(EXCEPTION) << "op[" << sync_bn_cnode->DebugString() << "] has no opid."; + MS_LOG(EXCEPTION) << "Op[" << sync_bn_cnode->DebugString() + << "] has no opid. trace: " << trace::DumpSourceLines(sync_bn_cnode); return nullptr; } int64_t opid = std::stol(sync_bn_opname.substr(opid_pos + kPositionOffset)); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/gather_v2_ds_fission.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/gather_v2_ds_fission.cc index ee7c0d5e636..bb0faf5bd5f 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/gather_v2_ds_fission.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/gather_v2_ds_fission.cc @@ -20,6 +20,7 @@ #include "backend/session/anf_runtime_algorithm.h" #include "ir/primitive.h" #include "utils/utils.h" +#include "utils/trace_base.h" namespace mindspore { namespace opt { @@ -64,16 +65,19 @@ CNodePtr GatherV2DsFission::CreatePad(const FuncGraphPtr &graph, const CNodePtr auto param_abstract_shape = origin_node->input(1)->Shape(); MS_EXCEPTION_IF_NULL(param_abstract_shape); if (!param_abstract_shape->isa()) { - MS_LOG(EXCEPTION) << "The node [" << origin_node->DebugString() << "]'s first input has wrong shape type."; + MS_LOG(EXCEPTION) << "The node [" << origin_node->DebugString() + << "]'s first input has wrong shape type. trace: " << trace::DumpSourceLines(origin_node); } auto param_dyn_shape = param_abstract_shape->cast(); ShapeVector shape(param_dyn_shape->shape()); if (shape.empty()) { - MS_LOG(EXCEPTION) << "The shape of node [" << origin_node->DebugString() << "]'s first input is empty."; + MS_LOG(EXCEPTION) << "The shape of node [" << origin_node->DebugString() + << "]'s first input is empty. trace: " << trace::DumpSourceLines(origin_node); } if (shape[shape.size() - 1] == -1) { MS_LOG(EXCEPTION) << "The node [" << origin_node->DebugString() - << "]'s first input should not be dynamic, shape:" << shape; + << "]'s first input should not be dynamic, but got shape:" << shape + << ". trace: " << trace::DumpSourceLines(origin_node); } shape[shape.size() - 1] = SizeToLong(pad_dim_size); auto type_id = AnfAlgo::GetPrevNodeOutputInferDataType(origin_node, 0); @@ -113,7 +117,8 @@ CNodePtr GatherV2DsFission::CreateGatherV2Ds(const FuncGraphPtr &graph, const CN MS_EXCEPTION_IF_NULL(origin_node); MS_EXCEPTION_IF_NULL(pad); if (origin_node->size() != kGatherInputNum) { - MS_LOG(EXCEPTION) << "In dynamic shape scene, gatherv2 should have 3 inputs"; + MS_LOG(EXCEPTION) << "In dynamic shape scene, gatherv2 should have 3 inputs, but got " << origin_node->size() + << ". trace: " << trace::DumpSourceLines(origin_node); } std::vector gatherv2_inputs = {NewValueNode(std::make_shared(prim::kPrimGather->name())), pad, origin_node->input(kGatherInputIndicesIndex), diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/max_pool3d_grad_grad_fission.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/max_pool3d_grad_grad_fission.cc index 9ae9496d0b3..86de6a8b8a1 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/max_pool3d_grad_grad_fission.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/max_pool3d_grad_grad_fission.cc @@ -20,6 +20,7 @@ #include "backend/session/anf_runtime_algorithm.h" #include "frontend/optimizer/opt.h" #include "backend/optimizer/common/helper.h" +#include "utils/trace_base.h" namespace mindspore { namespace opt { @@ -38,7 +39,8 @@ tensor::TensorPtr CreateTensor(const AnfNodePtr &node) { MS_LOG(ERROR) << "MaxPool3DGradGrad only support NCDHW format, but got " << data_format; } if (ksize.size() != kKernelSizeNum) { - MS_LOG(EXCEPTION) << "kernel_size of MaxPool3DGradGrad must be five, but got :" << ksize; + MS_LOG(EXCEPTION) << "kernel_size of MaxPool3DGradGrad must be five, but got " << ksize + << ". trace: " << trace::DumpSourceLines(node); } int64_t d = ksize[kDim2]; int64_t h = ksize[kDim3]; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/seed_adapter.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/seed_adapter.cc index a1208c62567..acc9885ba75 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/seed_adapter.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/seed_adapter.cc @@ -21,6 +21,7 @@ #include "backend/optimizer/common/helper.h" #include "backend/kernel_compiler/kernel_build_info.h" #include "utils/utils.h" +#include "utils/trace_base.h" #include "backend/session/kernel_graph.h" #include "backend/session/anf_runtime_algorithm.h" #include "runtime/device/kernel_info.h" @@ -70,21 +71,25 @@ std::vector ConvertAttrToValueNode(const std::shared_ptr ret = {}; auto attrs = op_info->attrs_ptr(); if (attrs.empty()) { - MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() << ") doesn't have any attrs."; + MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() + << ") doesn't have any attrs. trace: " << trace::DumpSourceLines(cnode); } for (const auto &attr : attrs) { if (!AnfAlgo::HasNodeAttr(attr->name(), cnode)) { - MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() << ") doesn't have attr(" << attr->name() << ")"; + MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() << ") doesn't have attr(" << attr->name() + << "). trace: " << trace::DumpSourceLines(cnode); } auto attr_value = AnfAlgo::GetNodeAttr(cnode, attr->name()); auto value_node = CreateValueNode(attr_value); if (value_node == nullptr) { - MS_LOG(EXCEPTION) << "Create value node error, node: " << cnode->DebugString() << ", seed value: " << attr_value; + MS_LOG(EXCEPTION) << "Create value node error, node: " << cnode->DebugString() << ", seed value: " << attr_value + << ". trace: " << trace::DumpSourceLines(cnode); } ret.emplace_back(value_node); } if (ret.empty()) { - MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() << ") doesn't have any matched attrs."; + MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() + << ") doesn't have any matched attrs. trace: " << trace::DumpSourceLines(cnode); } return ret; } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/split_fission.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/split_fission.cc index 4f7866e687d..d23500fa807 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/split_fission.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/split_fission.cc @@ -35,10 +35,11 @@ size_t GetSmallSplitSize(const AnfNodePtr &split_node, int64_t split_dim, int64_ split_dim += SizeToLong(input_shape.size()); } if (LongToSize(split_dim) >= input_shape.size()) { - MS_LOG(EXCEPTION) << "The split_dim value should be less than the shape size of input 0"; + MS_LOG(EXCEPTION) << "The split_dim value should be less than the shape size of input 0. trace: " + << trace::DumpSourceLines(split_node); } if (num_split == 0) { - MS_LOG(EXCEPTION) << "Divisor 'num_split' should not be 0."; + MS_LOG(EXCEPTION) << "Divisor 'num_split' should not be 0. trace: " << trace::DumpSourceLines(split_node); } return input_shape[LongToSize(split_dim)] / LongToSize(num_split); } @@ -92,7 +93,7 @@ void SetAttrAndAbstractForBaseSplitv(const CNodePtr &origin_cnode, const CNodePt split_dim += SizeToLong(output_shape.size()); } if (split_dim < 0) { - MS_LOG(EXCEPTION) << "Error split dim: " << split_dim; + MS_LOG(EXCEPTION) << "Error split dim: " << split_dim << ", trace: " << trace::DumpSourceLines(origin_cnode); } auto split_dim_l = LongToSize(split_dim); auto num_split_l = LongToSize(num_split); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/avgpool_3d_fusion.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/avgpool_3d_fusion.cc index 90e08b276f3..8ef380e46a9 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/avgpool_3d_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/avgpool_3d_fusion.cc @@ -23,6 +23,7 @@ #include "backend/optimizer/common/helper.h" #include "base/core_ops.h" #include "utils/utils.h" +#include "utils/trace_base.h" namespace mindspore { namespace opt { @@ -70,7 +71,7 @@ bool GetKernelSize(const AnfNodePtr &node, int64_t *kd, int64_t *kh, int64_t *kw *kh = kernel_size[kDim3]; *kw = kernel_size[kDim4]; } else { - MS_LOG(EXCEPTION) << "Unknown kernel size " << kernel_size.size(); + MS_LOG(EXCEPTION) << "Unknown kernel size " << kernel_size.size() << ", trace: " << trace::DumpSourceLines(node); } return true; } @@ -83,22 +84,22 @@ bool GetStrideSize(const AnfNodePtr &node, int64_t *sd, int64_t *sh, int64_t *sw MS_EXCEPTION_IF_NULL(sh); MS_EXCEPTION_IF_NULL(sw); if (AnfAlgo::HasNodeAttr("strides", node->cast())) { - auto kernel_size = AnfAlgo::GetNodeAttr>(node, "strides"); - if (kernel_size.size() == 1) { - *sd = kernel_size[kDim0]; - *sh = kernel_size[kDim0]; - *sw = kernel_size[kDim0]; - } else if (kernel_size.size() == kDHWDimNum) { - *sd = kernel_size[kDim0]; - *sh = kernel_size[kDim1]; - *sw = kernel_size[kDim2]; - } else if (kernel_size.size() == kNCDHWDimNum) { + auto stride_size = AnfAlgo::GetNodeAttr>(node, "strides"); + if (stride_size.size() == 1) { + *sd = stride_size[kDim0]; + *sh = stride_size[kDim0]; + *sw = stride_size[kDim0]; + } else if (stride_size.size() == kDHWDimNum) { + *sd = stride_size[kDim0]; + *sh = stride_size[kDim1]; + *sw = stride_size[kDim2]; + } else if (stride_size.size() == kNCDHWDimNum) { // NCDHW - *sd = kernel_size[kDim2]; - *sh = kernel_size[kDim3]; - *sw = kernel_size[kDim4]; + *sd = stride_size[kDim2]; + *sh = stride_size[kDim3]; + *sw = stride_size[kDim4]; } else { - MS_LOG(EXCEPTION) << "Unknown strides size " << kernel_size.size(); + MS_LOG(EXCEPTION) << "Unknown strides size " << stride_size.size() << ", trace: " << trace::DumpSourceLines(node); } return true; } @@ -109,7 +110,7 @@ void GetAttrs(const AnfNodePtr &node, std::vector *pad_list, bool *coun int64_t *divisor_override) { MS_EXCEPTION_IF_NULL(node); if (!AnfAlgo::HasNodeAttr("pad_list", node->cast())) { - MS_LOG(EXCEPTION) << "AvgPool3D should has attr pad_list"; + MS_LOG(EXCEPTION) << "AvgPool3D should has attr pad_list, trace: " << trace::DumpSourceLines(node); } *pad_list = AnfAlgo::GetNodeAttr>(node, "pad_list"); if (AnfAlgo::HasNodeAttr("count_include_pad", node->cast())) { @@ -259,7 +260,9 @@ const AnfNodePtr AvgPool3DFusion::Process(const FuncGraphPtr &func_graph, const auto dims_in = AnfAlgo::GetPrevNodeOutputInferShape(avg_pool_3d_node, 0); auto dims_out = AnfAlgo::GetOutputInferShape(avg_pool_3d_node, 0); if (dims_in.size() < k5DInferDims || dims_out.size() < k5DInferDims) { - MS_LOG(EXCEPTION) << "AvgPool3D's in_out infer shape dims can not be less " << k5DInferDims; + MS_LOG(EXCEPTION) << "AvgPool3D's in_out infer shape dims can not be less " << k5DInferDims + << ", but got in_shape is " << dims_in.size() << "-D, out_shape is " << dims_out.size() + << "-D. trace: " << trace::DumpSourceLines(node); } auto fn = SizeToLong(dims_in[kDim0]); auto fc = SizeToLong(dims_in[kDim1]); @@ -274,14 +277,14 @@ const AnfNodePtr AvgPool3DFusion::Process(const FuncGraphPtr &func_graph, const int64_t kh; int64_t kw; if (!GetKernelSize(avg_pool_3d_node, &kd, &kh, &kw)) { - MS_LOG(EXCEPTION) << "GetK kernel size failed"; + MS_LOG(EXCEPTION) << "Get kernel size failed, trace: " << trace::DumpSourceLines(node); } // strides int64_t sd; int64_t sh; int64_t sw; if (!GetStrideSize(avg_pool_3d_node, &sd, &sh, &sw)) { - MS_LOG(EXCEPTION) << "GetK stride size failed"; + MS_LOG(EXCEPTION) << "Get stride size failed, trace: " << trace::DumpSourceLines(node); } std::vector pad_list; bool count_include_pad = false; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/avgpool_3d_grad_fusion.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/avgpool_3d_grad_fusion.cc index 9bc44ed5b05..cde48420e44 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/avgpool_3d_grad_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/avgpool_3d_grad_fusion.cc @@ -24,6 +24,7 @@ #include "backend/optimizer/common/helper.h" #include "base/core_ops.h" #include "utils/utils.h" +#include "utils/trace_base.h" namespace mindspore { namespace opt { @@ -43,22 +44,22 @@ void GetAttrs(const AnfNodePtr &node, std::vector *kernel_size, std::ve MS_EXCEPTION_IF_NULL(node); // attr kernel size if (!AnfAlgo::HasNodeAttr("kernel_size", node->cast())) { - MS_LOG(EXCEPTION) << "AvgPool3D should has attr kernel_size"; + MS_LOG(EXCEPTION) << "AvgPool3D should has attr kernel_size, trace: " << trace::DumpSourceLines(node); } *kernel_size = AnfAlgo::GetNodeAttr>(node, "kernel_size"); // attr strides if (!AnfAlgo::HasNodeAttr("strides", node->cast())) { - MS_LOG(EXCEPTION) << "AvgPool3D should has attr strides"; + MS_LOG(EXCEPTION) << "AvgPool3D should has attr strides, trace: " << trace::DumpSourceLines(node); } *strides = AnfAlgo::GetNodeAttr>(node, "strides"); // sttr pad_list if (!AnfAlgo::HasNodeAttr("pad_list", node->cast())) { - MS_LOG(EXCEPTION) << "AvgPool3D should has attr pad_list"; + MS_LOG(EXCEPTION) << "AvgPool3D should has attr pad_list, trace: " << trace::DumpSourceLines(node); } *pad_list = AnfAlgo::GetNodeAttr>(node, "pad_list"); // attr origin input shape if (!AnfAlgo::HasNodeAttr("origin_input_shape", node->cast())) { - MS_LOG(EXCEPTION) << "AvgPool3D should has attr origin_input_shape"; + MS_LOG(EXCEPTION) << "AvgPool3D should has attr origin_input_shape, trace: " << trace::DumpSourceLines(node); } *origin_input_shape = AnfAlgo::GetNodeAttr>(node, "origin_input_shape"); // attr count include pad diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/bn_reduce_grad_conv2d_backprop_filter_fusion.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/bn_reduce_grad_conv2d_backprop_filter_fusion.cc index 14c7d79f105..69f9dc462a0 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/bn_reduce_grad_conv2d_backprop_filter_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/bn_reduce_grad_conv2d_backprop_filter_fusion.cc @@ -20,6 +20,7 @@ #include "backend/session/anf_runtime_algorithm.h" #include "ir/primitive.h" #include "utils/utils.h" +#include "utils/trace_base.h" #include "base/core_ops.h" #include "abstract/abstract_value.h" #include "backend/optimizer/common/helper.h" @@ -38,7 +39,9 @@ bool CheckSupported(const CNodePtr &conv_back_filter) { auto x_shape = AnfAlgo::GetPrevNodeOutputInferShape(conv_back_filter, 1); auto out_shape = AnfAlgo::GetOutputInferShape(conv_back_filter, 0); if (y_shape.size() != kNCHWShapeSize || x_shape.size() != kNCHWShapeSize || out_shape.size() != kNCHWShapeSize) { - MS_LOG(EXCEPTION) << "The dim of Conv2dBackpropFilter's input and output should be 4"; + MS_LOG(EXCEPTION) << "The dim of Conv2dBackpropFilter's input and output should be 4, but got y_shape is " + << y_shape.size() << "-D, x_shape is " << x_shape.size() << "-D, out_shape is " + << out_shape.size() << "-D. trace: " << trace::DumpSourceLines(conv_back_filter); } const std::set kSupportedBatchSize = {32, 256}; if (kSupportedBatchSize.find(x_shape[0]) == kSupportedBatchSize.end()) { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/mindir/all_to_all_unify_mindir.cc b/mindspore/ccsrc/backend/optimizer/ascend/mindir/all_to_all_unify_mindir.cc index 278670098c2..92cd0a5babd 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/mindir/all_to_all_unify_mindir.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/mindir/all_to_all_unify_mindir.cc @@ -17,6 +17,7 @@ #include "backend/optimizer/ascend/mindir/all_to_all_unify_mindir.h" #include #include +#include "utils/trace_base.h" #include "backend/session/anf_runtime_algorithm.h" #include "runtime/hccl_adapter/hccl_adapter.h" #include "backend/optimizer/common/helper.h" @@ -37,7 +38,8 @@ void ChangePrimitiveToAllToAllV(const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(neighbor_exchange); if (neighbor_exchange->size() == kCNodePrimitiveIdx) { - MS_LOG(EXCEPTION) << "Invalid cnode " << node->DebugString() << " input size " << neighbor_exchange->size(); + MS_LOG(EXCEPTION) << "Inputs should not be empty for cnode " << node->DebugString() + << ". trace: " << trace::DumpSourceLines(neighbor_exchange); } auto prim = GetValueNode(neighbor_exchange->input(kCNodePrimitiveIdx)); @@ -62,7 +64,8 @@ CNodePtr AllToAllUnifyMindIR::CreateSplitNode(const FuncGraphPtr &graph, const C int64_t split_dim = AnfAlgo::GetNodeAttr(all_to_all, kAttrSplitDim); if (all_to_all->size() <= kAllToAllInputIdx) { - MS_LOG(EXCEPTION) << "Invalid cnode " << all_to_all->DebugString() << " input size " << all_to_all->size(); + MS_LOG(EXCEPTION) << "Inputs should not be empty for cnode " << all_to_all->DebugString() + << ". trace: " << trace::DumpSourceLines(all_to_all); } auto all_to_all_input = all_to_all->input(kAllToAllInputIdx); std::vector split_input = {NewValueNode(std::make_shared(prim::kPrimSplitV->name())), @@ -73,11 +76,12 @@ CNodePtr AllToAllUnifyMindIR::CreateSplitNode(const FuncGraphPtr &graph, const C auto shape = AnfAlgo::GetOutputInferShape(all_to_all_input, 0); split_dim = NormalizeDim(shape, split_dim); if (SizeToLong(shape.size()) <= split_dim) { - MS_LOG(EXCEPTION) << "Invalid split dim " << split_dim << " is over the shape size " << shape.size(); + MS_LOG(EXCEPTION) << "Invalid split dim " << split_dim << " is over the shape size " << shape.size() + << ". trace: " << trace::DumpSourceLines(all_to_all); } if (split_count == 0 || shape[LongToSize(split_dim)] % static_cast(split_count) != 0) { MS_LOG(EXCEPTION) << "Invalid split count " << split_count << " cannot be divisible by shape[" << split_dim - << "] = " << shape[LongToSize(split_dim)]; + << "] = " << shape[LongToSize(split_dim)] << ". trace: " << trace::DumpSourceLines(all_to_all); } shape[LongToSize(split_dim)] /= static_cast(split_count); std::vector dtypes(split_count, dtype); @@ -101,7 +105,8 @@ CNodePtr AllToAllUnifyMindIR::CreateAllToAllvNode(const FuncGraphPtr &graph, con std::vector split_outputs; CreateMultipleOutputsOfAnfNode(graph, split, static_cast(split_count), &split_outputs); if (split_outputs.empty()) { - MS_LOG(EXCEPTION) << "The node " << split->DebugString() << " should have at least one output, but got 0."; + MS_LOG(EXCEPTION) << "The node " << split->DebugString() + << " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(split); } std::vector all_to_all_v_input = {NewValueNode(std::make_shared(kAllToAllVOpName))}; (void)all_to_all_v_input.insert(all_to_all_v_input.end(), split_outputs.begin(), split_outputs.end()); @@ -135,7 +140,8 @@ CNodePtr AllToAllUnifyMindIR::CreateConcatNode(const FuncGraphPtr &graph, const std::vector all_to_all_v_outputs; CreateMultipleOutputsOfAnfNode(graph, all_to_all_v, static_cast(split_count), &all_to_all_v_outputs); if (all_to_all_v_outputs.empty()) { - MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString() << " should have at least one output, but got 0."; + MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString() + << " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(all_to_all_v); } std::vector concat_input = {NewValueNode(std::make_shared(kConcatOpName))}; (void)concat_input.insert(concat_input.end(), all_to_all_v_outputs.begin(), all_to_all_v_outputs.end()); @@ -144,7 +150,8 @@ CNodePtr AllToAllUnifyMindIR::CreateConcatNode(const FuncGraphPtr &graph, const auto single_shape = AnfAlgo::GetOutputInferShape(all_to_all_v_outputs[0], 0); concat_dim = NormalizeDim(single_shape, concat_dim); if (LongToSize(concat_dim) >= single_shape.size()) { - MS_LOG(EXCEPTION) << "Invalid concat dim " << concat_dim << " is greater than shape size " << single_shape.size(); + MS_LOG(EXCEPTION) << "Invalid concat dim " << concat_dim << " is greater than shape size " << single_shape.size() + << ". trace: " << trace::DumpSourceLines(all_to_all); } single_shape[LongToSize(concat_dim)] *= static_cast(split_count); AnfAlgo::SetOutputInferTypeAndShape({AnfAlgo::GetOutputInferDataType(all_to_all_v_outputs[0], 0)}, {single_shape}, diff --git a/mindspore/ccsrc/backend/optimizer/ascend/mindir/avg_pool_grad_unify_mindir.cc b/mindspore/ccsrc/backend/optimizer/ascend/mindir/avg_pool_grad_unify_mindir.cc index 03be3155df3..38baa19d833 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/mindir/avg_pool_grad_unify_mindir.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/mindir/avg_pool_grad_unify_mindir.cc @@ -24,6 +24,7 @@ #include "utils/utils.h" #include "utils/check_convert_utils.h" #include "utils/convert_utils_base.h" +#include "utils/trace_base.h" #include "backend/optimizer/common/helper.h" #include "runtime/device/kernel_info.h" #include "backend/session/anf_runtime_algorithm.h" @@ -43,15 +44,15 @@ std::vector GetInputXShape(const AnfNodePtr &node) { return shapes; } -int64_t windowed_output_size(int64_t input_size, int64_t ksize, int64_t stride, PadMode pad_mode, int64_t *pad_before, - int64_t *pad_after) { +int64_t windowed_output_size(const AnfNodePtr &node, int64_t input_size, int64_t ksize, int64_t stride, + PadMode pad_mode, int64_t *pad_before, int64_t *pad_after) { MS_EXCEPTION_IF_NULL(pad_before); MS_EXCEPTION_IF_NULL(pad_after); int64_t output = 0; *pad_before = 0; *pad_after = 0; if (stride == 0) { - MS_LOG(EXCEPTION) << "The stride of AvgPoolGrad should not be 0."; + MS_LOG(EXCEPTION) << "The stride of AvgPoolGrad should not be 0. trace: " << trace::DumpSourceLines(node); return 0; } if (pad_mode == PadMode::VALID) { @@ -62,13 +63,15 @@ int64_t windowed_output_size(int64_t input_size, int64_t ksize, int64_t stride, *pad_before = pad_need / 2; *pad_after = pad_need - *pad_before; } else { - MS_LOG(EXCEPTION) << "The pad mode of AvgPoolGrad should be SAME or VALID, but got PAD"; + MS_LOG(EXCEPTION) << "The pad mode of AvgPoolGrad should be SAME or VALID, but got PAD. trace: " + << trace::DumpSourceLines(node); } return output; } -std::vector> GetAssistInputMatrix(const std::vector &x_shape, int64_t pad_top, - int64_t pad_bottom, int64_t pad_left, int64_t pad_right) { +std::vector> GetAssistInputMatrix(const AnfNodePtr &node, const std::vector &x_shape, + int64_t pad_top, int64_t pad_bottom, int64_t pad_left, + int64_t pad_right) { // `assist_input_matrix` is a 2d matrix with input_shape after padding, // the value of element which is padded is 0, else are 1. // For each element of output, it is mapped for slide window: `[h*h_stride : h*h_stride + h_ksize, @@ -76,7 +79,7 @@ std::vector> GetAssistInputMatrix(const std::vector // number of input that associate with output element. std::vector> assist_input_matrix; if (x_shape.size() < kShapeDimNum) { - MS_LOG(EXCEPTION) << "The dim of x_shape should not be less than 4."; + MS_LOG(EXCEPTION) << "The dim of x_shape should not be less than 4. trace: " << trace::DumpSourceLines(node); } std::vector in_shape_after_padding_2d = {x_shape[kDim2] + pad_top + pad_bottom, x_shape[kDim3] + pad_left + pad_right}; @@ -97,22 +100,24 @@ std::vector> GetAssistInputMatrix(const std::vector return assist_input_matrix; } -ValueNodePtr CreateMeanMatrixValueNode(const FuncGraphPtr &func_graph, const std::vector &x_shape, - const std::vector &k_size, const std::vector &stride, - const PadMode pad_mode, const TypeId x_dtype) { +ValueNodePtr CreateMeanMatrixValueNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, + const std::vector &x_shape, const std::vector &k_size, + const std::vector &stride, const PadMode pad_mode, + const TypeId x_dtype) { MS_EXCEPTION_IF_NULL(func_graph); auto kernel_graph = func_graph->cast(); MS_EXCEPTION_IF_NULL(kernel_graph); if (x_shape.size() != kShapeDimNum || k_size.size() != kShapeDimNum || stride.size() != kShapeDimNum) { MS_LOG(EXCEPTION) << "The dim of x_shape, kernel_size and strides of AvgPoolGrad should be 4, but got x_shape:" - << x_shape << ", kernel_size:" << k_size << ", strides:" << stride; + << x_shape << ", kernel_size:" << k_size << ", strides:" << stride + << ". trace: " << trace::DumpSourceLines(node); } int64_t pad_top, pad_bottom, pad_left, pad_right; int64_t h_output = - windowed_output_size(x_shape[kDim2], k_size[kDim2], stride[kDim2], pad_mode, &pad_top, &pad_bottom); + windowed_output_size(node, x_shape[kDim2], k_size[kDim2], stride[kDim2], pad_mode, &pad_top, &pad_bottom); int64_t w_output = - windowed_output_size(x_shape[kDim3], k_size[kDim3], stride[kDim3], pad_mode, &pad_left, &pad_right); - auto assist_input_matrix = GetAssistInputMatrix(x_shape, pad_top, pad_bottom, pad_left, pad_right); + windowed_output_size(node, x_shape[kDim3], k_size[kDim3], stride[kDim3], pad_mode, &pad_left, &pad_right); + auto assist_input_matrix = GetAssistInputMatrix(node, x_shape, pad_top, pad_bottom, pad_left, pad_right); // calculate output std::vector hw_output(h_output * w_output, 0.0); @@ -153,14 +158,15 @@ ValueNodePtr CreateMeanMatrixValueNode(const FuncGraphPtr &func_graph, const std return mean_matrix_vnode; } -ValueNodePtr CreateKernelMatrixValueNode(const FuncGraphPtr &func_graph, const std::vector &x_shape, - const std::vector &k_size, const TypeId x_dtype) { +ValueNodePtr CreateKernelMatrixValueNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, + const std::vector &x_shape, const std::vector &k_size, + const TypeId x_dtype) { MS_EXCEPTION_IF_NULL(func_graph); auto kernel_graph = func_graph->cast(); MS_EXCEPTION_IF_NULL(kernel_graph); if (x_shape.size() != kShapeDimNum || k_size.size() != kShapeDimNum) { MS_LOG(EXCEPTION) << "The dim of x_shape and kernel_size of AvgPoolGrad should be 4, but got x_shape:" << x_shape - << ", kernel_size:" << k_size; + << ", kernel_size:" << k_size << ". trace: " << trace::DumpSourceLines(node); } std::vector kernel_shape = {1, x_shape[kDim1], k_size[kDim2], k_size[kDim3]}; auto data_size = std::accumulate(kernel_shape.begin(), kernel_shape.end(), int64_t(1), std::multiplies()); @@ -197,8 +203,8 @@ const AnfNodePtr AvgPoolGradUnifyMindIR::Process(const FuncGraphPtr &graph, cons auto pad_mode = PadMode(AnfAlgo::GetNodeAttr(avgpool_grad, kAttrPadMode)); auto x_shape_vnode = CreateShapeValueNode(graph, x_shape); - auto mean_matrix_vnode = CreateMeanMatrixValueNode(graph, x_shape, k_size, stride, pad_mode, x_dtype); - auto kernel_matrix_vnode = CreateKernelMatrixValueNode(graph, x_shape, k_size, x_dtype); + auto mean_matrix_vnode = CreateMeanMatrixValueNode(graph, node, x_shape, k_size, stride, pad_mode, x_dtype); + auto kernel_matrix_vnode = CreateKernelMatrixValueNode(graph, node, x_shape, k_size, x_dtype); std::vector avgpool_grad_vm_inputs = {NewValueNode(std::make_shared(kAvgPoolGradVmOpName)), x_shape_vnode, avgpool_grad->input(3), mean_matrix_vnode, diff --git a/mindspore/ccsrc/backend/optimizer/ascend/mindir/conv2d_unify_mindir.cc b/mindspore/ccsrc/backend/optimizer/ascend/mindir/conv2d_unify_mindir.cc index 771d617617a..025408f80e9 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/mindir/conv2d_unify_mindir.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/mindir/conv2d_unify_mindir.cc @@ -24,6 +24,7 @@ #include "utils/utils.h" #include "utils/ms_context.h" #include "utils/check_convert_utils.h" +#include "utils/trace_base.h" #include "backend/optimizer/common/helper.h" #include "runtime/device/kernel_info.h" #include "backend/session/anf_runtime_algorithm.h" @@ -31,7 +32,7 @@ namespace mindspore { namespace opt { namespace { -constexpr size_t kConv2DBackpropInputNum = 4; +constexpr size_t kConv2DBackpropInputNum = 3; constexpr size_t kConv2DAxisNum = 4; constexpr auto kAttrOffsetA = "offset_a"; constexpr auto kAttrPadList = "pad_list"; @@ -56,11 +57,11 @@ bool NeedUpdate(const CNodePtr &conv2d, std::vector in_shape, std::vecto int64_t data_format; bool result = CheckAndConvertUtils::GetDataFormatEnumValue(data_format_ptr, &data_format); if (!result || data_format != Format::NCHW) { - MS_LOG(EXCEPTION) << "Conv2D only supports NCHW when group > 1"; + MS_LOG(EXCEPTION) << "Conv2D only supports NCHW when group > 1, trace: " << trace::DumpSourceLines(conv2d); } if (in_shape.size() != kConv2DAxisNum || out_shape.size() != kConv2DAxisNum) { MS_LOG(EXCEPTION) << "Conv2D's input and output should have 4 axis, but got input axis num: " << in_shape.size() - << "output axis num: " << out_shape.size(); + << "output axis num: " << out_shape.size() << ". trace: " << trace::DumpSourceLines(conv2d); } auto in_channel = in_shape[kDim1]; auto out_channel = out_shape[kDim1]; @@ -114,7 +115,7 @@ CNodePtr CreateTranspose(const FuncGraphPtr &graph, const CNodePtr &conv2d, cons auto out_shape = AnfAlgo::GetOutputInferShape(input_node, 0); if (out_shape.size() != kConv2DAxisNum) { MS_LOG(EXCEPTION) << "Conv2D's output axis number should be " << kConv2DAxisNum << ", but got " - << out_shape.size(); + << out_shape.size() << ". trace: " << trace::DumpSourceLines(conv2d); } std::swap(out_shape[kDim0], out_shape[kDim1]); auto shapes = {out_shape}; @@ -226,7 +227,7 @@ CNodePtr Conv2DBackpropInputUnifyMindIR::CreateDepthwiseConv2DBackpropInput(cons MS_EXCEPTION_IF_NULL(conv2d_backin); CNodePtr depth_conv_backin = nullptr; - if (conv2d_backin->inputs().size() == kConv2DBackpropInputNum) { + if (AnfUtils::GetInputTensorNum(conv2d_backin) == kConv2DBackpropInputNum) { std::vector depth_conv_backin_inputs = { NewValueNode(std::make_shared(kDepthwiseConv2dNativeBackpropInputOpName)), conv2d_backin->input(kIndex3), transpose, conv2d_backin->input(kIndex1)}; @@ -265,11 +266,12 @@ const AnfNodePtr Conv2DBackpropInputUnifyMindIR::Process(const FuncGraphPtr &gra return nullptr; } - auto input_size = conv2d_backin->inputs().size(); + auto input_size = AnfUtils::GetInputTensorNum(conv2d_backin); // In pynative mode, input_sizes input will be convert to attr if Conv2DBackpropInput is a forward op. if (input_size != kConv2DBackpropInputNum && input_size != kConv2DBackpropInputNum - 1) { - MS_LOG(EXCEPTION) << "Conv2DBackpropInput's input number should be " << (kConv2DBackpropInputNum - 1) << " or " - << (kConv2DBackpropInputNum - 2) << ", but got " << (input_size - 1); + MS_LOG(EXCEPTION) << "Conv2DBackpropInput's input number should be " << kConv2DBackpropInputNum << " or " + << (kConv2DBackpropInputNum - 1) << ", but got " << input_size + << ". trace: " << trace::DumpSourceLines(node); } auto transpose = CreateTranspose(graph, conv2d_backin, conv2d_backin->input(kIndex2), true, *this); auto depth_conv_backin = CreateDepthwiseConv2DBackpropInput(graph, conv2d_backin, transpose); @@ -281,9 +283,10 @@ CNodePtr Conv2DBackpropFilterUnifyMindIR::CreateDepthwiseConv2DBackpropFilter(co const CNodePtr &conv2d_backfil) const { MS_EXCEPTION_IF_NULL(graph); MS_EXCEPTION_IF_NULL(conv2d_backfil); - if (conv2d_backfil->inputs().size() != kConv2DBackpropInputNum) { - MS_LOG(EXCEPTION) << "Conv2DBackpropFilter's input number should be " << (kConv2DBackpropInputNum - 1) - << ", but got " << (conv2d_backfil->inputs().size() - 1); + if (AnfUtils::GetInputTensorNum(conv2d_backfil) != kConv2DBackpropInputNum) { + MS_LOG(EXCEPTION) << "Conv2DBackpropFilter's input number should be " << kConv2DBackpropInputNum << ", but got " + << AnfUtils::GetInputTensorNum(conv2d_backfil) + << ". trace: " << trace::DumpSourceLines(conv2d_backfil); } auto filter_size_node = conv2d_backfil->input(kIndex3); MS_EXCEPTION_IF_NULL(filter_size_node); @@ -307,7 +310,7 @@ CNodePtr Conv2DBackpropFilterUnifyMindIR::CreateDepthwiseConv2DBackpropFilter(co std::vector out_shape = AnfAlgo::GetOutputInferShape(conv2d_backfil, 0); if (out_shape.size() != kConv2DAxisNum) { MS_LOG(EXCEPTION) << "Conv2DBackpropFilter's output axis number should be " << kConv2DAxisNum << ", but got " - << out_shape.size(); + << out_shape.size() << ". trace: " << trace::DumpSourceLines(conv2d_backfil); } std::swap(out_shape[0], out_shape[1]); auto shapes = {out_shape}; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/mindir/dropout_unify_mindir.cc b/mindspore/ccsrc/backend/optimizer/ascend/mindir/dropout_unify_mindir.cc index 379e79422e3..705e31c262c 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/mindir/dropout_unify_mindir.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/mindir/dropout_unify_mindir.cc @@ -69,11 +69,11 @@ ValueNodePtr CreateKeepPorbValueNode(const FuncGraphPtr &func_graph, const AnfNo MS_EXCEPTION_IF_NULL(cnode); // Step1: get keep_prob if (!AnfAlgo::HasNodeAttr(kKeepProb, cnode)) { - MS_LOG(EXCEPTION) << "Dropout node does not have attr: keep_prob."; + MS_LOG(EXCEPTION) << "Dropout node does not have attr: keep_prob. trace: " << trace::DumpSourceLines(node); } if (AnfAlgo::GetCNodeName(cnode) == kDropoutOpName) { if (!AnfAlgo::HasNodeAttr(kSeed0, cnode) || !AnfAlgo::HasNodeAttr(kSeed1, cnode)) { - MS_LOG(EXCEPTION) << "Dropout node does not have attr: seed0 or seed1."; + MS_LOG(EXCEPTION) << "Dropout node does not have attr: seed0 or seed1. trace: " << trace::DumpSourceLines(node); } } auto keep_prob = AnfAlgo::GetNodeAttr(node, kKeepProb); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/mindir/fake_learned_scale_quant_grad_unify_mindir.cc b/mindspore/ccsrc/backend/optimizer/ascend/mindir/fake_learned_scale_quant_grad_unify_mindir.cc index 2a386909f6a..4f6b694e62d 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/mindir/fake_learned_scale_quant_grad_unify_mindir.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/mindir/fake_learned_scale_quant_grad_unify_mindir.cc @@ -34,8 +34,9 @@ void FakeLearnedScaleQuantPerLayerGradUnifyMindIR::CreateOutputsOfLSQPerLayerGra MS_EXCEPTION_IF_NULL(lsq_perlayer_grad_node); const auto &lsq_perlayer_grad_inputs = lsq_perlayer_grad_node->inputs(); if (lsq_perlayer_grad_inputs.size() < kFakeLearnedScaleQuantGradInputNum) { - MS_LOG(EXCEPTION) << "lsq_perlayer_grad_node has wrong inputs size." - << " trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node); + MS_LOG(EXCEPTION) << "Lsq_perlayer_grad_node has wrong inputs size, should be not less than " + << kFakeLearnedScaleQuantGradInputNum << ", but got " << lsq_perlayer_grad_inputs.size() + << ". trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node); } std::vector lsq_perlayer_grad_d_inputs = { NewValueNode(std::make_shared(kFakeLearnedScaleQuantPerLayerGradDOpName)), @@ -65,12 +66,14 @@ void FakeLearnedScaleQuantPerLayerGradUnifyMindIR::CreateOutputsOfLSQPerLayerRed MS_EXCEPTION_IF_NULL(lsq_perlayer_reduce_grad_outputs); const auto &lsq_perlayer_grad_inputs = lsq_perlayer_grad_node->inputs(); if (lsq_perlayer_grad_inputs.size() < kFakeLearnedScaleQuantGradInputNum) { - MS_LOG(EXCEPTION) << "lsq_perlayer_grad_node has wrong inputs size" - << " trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node); + MS_LOG(EXCEPTION) << "Lsq_perlayer_grad_node has wrong inputs size, should be not less than " + << kFakeLearnedScaleQuantGradInputNum << ", but got " << lsq_perlayer_grad_inputs.size() + << ". trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node); } if (lsq_perlayer_grad_d_outputs.size() != kFakeLearnedScaleQuantGradDOutputNum) { - MS_LOG(EXCEPTION) << "lsq_perlayer_grad_d_outputs has wrong size" - << " trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node); + MS_LOG(EXCEPTION) << "Lsq_perlayer_grad_d_outputs has wrong inputs size, should be " + << kFakeLearnedScaleQuantGradDOutputNum << ", but got " << lsq_perlayer_grad_d_outputs.size() + << ". trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node); } std::vector lsq_perlayer_reduce_grad_inputs = { NewValueNode(std::make_shared(kFakeLearnedScaleQuantPerLayerGradDReduceOpName)), @@ -93,8 +96,9 @@ void FakeLearnedScaleQuantPerChannelGradUnifyMindIR::CreateOutputsOfLSQPerChanne MS_EXCEPTION_IF_NULL(lsq_perchannel_grad_node); const auto &lsq_perchannel_grad_inputs = lsq_perchannel_grad_node->inputs(); if (lsq_perchannel_grad_inputs.size() < kFakeLearnedScaleQuantGradInputNum) { - MS_LOG(EXCEPTION) << "lsq_perchannel_grad_node has wrong inputs size." - << " trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node); + MS_LOG(EXCEPTION) << "Lsq_perchannel_grad_node has wrong inputs size, should be not less than " + << kFakeLearnedScaleQuantGradInputNum << ", but got " << lsq_perchannel_grad_inputs.size() + << ". trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node); } std::vector lsq_perchannel_grad_d_inputs = { NewValueNode(std::make_shared(kFakeLearnedScaleQuantPerChannelGradDOpName)), @@ -125,12 +129,14 @@ void FakeLearnedScaleQuantPerChannelGradUnifyMindIR::CreateOutputsOfLSQPerChanne MS_EXCEPTION_IF_NULL(lsq_perchannel_reduce_grad_outputs); const auto &lsq_perchannel_grad_inputs = lsq_perchannel_grad_node->inputs(); if (lsq_perchannel_grad_inputs.size() < kFakeLearnedScaleQuantGradInputNum) { - MS_LOG(EXCEPTION) << "lsq_perchannel_grad_node has wrong inputs size" - << " trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node); + MS_LOG(EXCEPTION) << "Lsq_perchannel_grad_node has wrong inputs size, should be not less than " + << kFakeLearnedScaleQuantGradInputNum << ", but got " << lsq_perchannel_grad_inputs.size() + << ". trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node); } if (lsq_perchannel_grad_d_outputs.size() != kFakeLearnedScaleQuantGradDOutputNum) { - MS_LOG(EXCEPTION) << "lsq_perchannel_grad_d_outputs has wrong size" - << " trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node); + MS_LOG(EXCEPTION) << "Lsq_perchannel_grad_d_outputs has wrong inputs size, should be " + << kFakeLearnedScaleQuantGradDOutputNum << ", but got " << lsq_perchannel_grad_inputs.size() + << ". trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node); } std::vector lsq_perchannel_reduce_grad_inputs = { NewValueNode(std::make_shared(kFakeLearnedScaleQuantPerChannelGradDReduceOpName)), @@ -164,16 +170,18 @@ const AnfNodePtr FakeLearnedScaleQuantPerLayerGradUnifyMindIR::Process(const Fun std::vector lsq_perlayer_grad_d_outputs; CreateOutputsOfLSQPerLayerGradD(func_graph, cnode, &lsq_perlayer_grad_d_outputs); if (lsq_perlayer_grad_d_outputs.size() != kFakeLearnedScaleQuantGradOutputNum) { - MS_LOG(EXCEPTION) << "fake_learned_scale_quant_perlayer_grad_d_outputs has wrong size" - << " trace: " << trace::DumpSourceLines(node); + MS_LOG(EXCEPTION) << "Fake_learned_scale_quant_perlayer_grad_d_outputs has wrong inputs size, should be " + << kFakeLearnedScaleQuantGradOutputNum << ", but got " << lsq_perlayer_grad_d_outputs.size() + << ". trace: " << trace::DumpSourceLines(node); } std::vector lsq_perlayer_reduce_grad_outputs; CreateOutputsOfLSQPerLayerReduceGrad(func_graph, cnode, lsq_perlayer_grad_d_outputs, &lsq_perlayer_reduce_grad_outputs); if (lsq_perlayer_reduce_grad_outputs.size() != kSingleOutputNum) { - MS_LOG(EXCEPTION) << "fake_learned_scale_quant_perlayer_reduce_grad_outputs has wrong size" - << " trace: " << trace::DumpSourceLines(node); + MS_LOG(EXCEPTION) << "Fake_learned_scale_quant_perlayer_reduce_grad_outputs has wrong inputs size, should be " + << kSingleOutputNum << ", but got " << lsq_perlayer_reduce_grad_outputs.size() + << ". trace: " << trace::DumpSourceLines(node); } std::vector make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), lsq_perlayer_grad_d_outputs[0], @@ -201,16 +209,18 @@ const AnfNodePtr FakeLearnedScaleQuantPerChannelGradUnifyMindIR::Process(const F std::vector lsq_perchannel_grad_d_outputs; CreateOutputsOfLSQPerChannelGradD(func_graph, cnode, &lsq_perchannel_grad_d_outputs); if (lsq_perchannel_grad_d_outputs.size() != kFakeLearnedScaleQuantGradOutputNum) { - MS_LOG(EXCEPTION) << "fake_learned_scale_quant_perchannel_grad_d_outputs has wrong size" - << " trace: " << trace::DumpSourceLines(node); + MS_LOG(EXCEPTION) << "Fake_learned_scale_quant_perchannel_grad_d_outputs has wrong inputs size, should be " + << kFakeLearnedScaleQuantGradOutputNum << ", but got " << lsq_perchannel_grad_d_outputs.size() + << ". trace: " << trace::DumpSourceLines(node); } std::vector lsq_perchannel_reduce_grad_outputs; CreateOutputsOfLSQPerChannelReduceGrad(func_graph, cnode, lsq_perchannel_grad_d_outputs, &lsq_perchannel_reduce_grad_outputs); if (lsq_perchannel_reduce_grad_outputs.size() != kSingleOutputNum) { - MS_LOG(EXCEPTION) << "fake_learned_scale_quant_perchannel_reduce_grad_outputs has wrong size" - << " trace: " << trace::DumpSourceLines(node); + MS_LOG(EXCEPTION) << "Fake_learned_scale_quant_perchannel_reduce_grad_outputs has wrong inputs size, should be " + << kSingleOutputNum << ", but got " << lsq_perchannel_reduce_grad_outputs.size() + << ". trace: " << trace::DumpSourceLines(node); } std::vector make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), lsq_perchannel_grad_d_outputs[0], diff --git a/mindspore/ccsrc/backend/optimizer/ascend/mindir/maxpool_to_maxpool_with_argmax.cc b/mindspore/ccsrc/backend/optimizer/ascend/mindir/maxpool_to_maxpool_with_argmax.cc index c5cbc533975..a29aeb54f78 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/mindir/maxpool_to_maxpool_with_argmax.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/mindir/maxpool_to_maxpool_with_argmax.cc @@ -21,6 +21,7 @@ #include "utils/utils.h" #include "utils/ms_context.h" +#include "utils/trace_base.h" #include "backend/optimizer/common/helper.h" #include "runtime/device/kernel_info.h" #include "backend/session/anf_runtime_algorithm.h" @@ -37,7 +38,7 @@ CNodePtr GetMaxPool(const CNodePtr &maxpool_grad) { MS_EXCEPTION_IF_NULL(maxpool_grad); if (maxpool_grad->inputs().size() != kMaxPoolGradInputNum) { MS_LOG(EXCEPTION) << "MaxPoolGrad's input number should be " << (kMaxPoolGradInputNum - 1) << ", but got " - << (maxpool_grad->inputs().size() - 1); + << (maxpool_grad->inputs().size() - 1) << ". trace: " << trace::DumpSourceLines(maxpool_grad); } auto maxpool_anf = maxpool_grad->input(kIndex2); MS_EXCEPTION_IF_NULL(maxpool_anf); @@ -50,7 +51,7 @@ CNodePtr MaxPool2MaxPoolWithArgmax::CreateMaxPoolWithArgmax(const FuncGraphPtr & MS_EXCEPTION_IF_NULL(maxpool); if (maxpool->inputs().size() != kMaxPoolInputNum) { MS_LOG(EXCEPTION) << "MaxPool's input number should be " << (kMaxPoolInputNum - 1) << ", but got " - << (maxpool->inputs().size() - 1); + << (maxpool->inputs().size() - 1) << ". trace: " << trace::DumpSourceLines(maxpool); } std::vector maxpool_argmax_inputs = {NewValueNode(std::make_shared(kMaxPoolWithArgmaxOpName)), maxpool->input(kIndex1)}; @@ -74,7 +75,7 @@ CNodePtr MaxPool2MaxPoolWithArgmax::CreateMaxPoolGradWithArgmax( MS_EXCEPTION_IF_NULL(maxpool_grad); if (maxpool_grad->inputs().size() != kMaxPoolGradInputNum) { MS_LOG(EXCEPTION) << "MaxPoolGrad's input number should be " << (kMaxPoolGradInputNum - 1) << ", but got " - << (maxpool_grad->inputs().size() - 1); + << (maxpool_grad->inputs().size() - 1) << ". trace: " << trace::DumpSourceLines(maxpool_grad); } // MaxPoolGrad's inputs are {input, output, grad_input}, MaxPoolGradWithArgmax's inputs are // {input, grad_input, argmax_output} @@ -95,11 +96,11 @@ void MaxPool2MaxPoolWithArgmax::SetNodeAttrs(const CNodePtr &maxpool, const CNod auto ksize = AnfAlgo::GetNodeAttr>(maxpool, kAttrKernelSize); if (strides.size() != kMaxPoolAttrAxisNum) { MS_LOG(EXCEPTION) << "MaxPool's attr strides has wrong axis number, should be " << kMaxPoolAttrAxisNum - << ", but got " << strides.size(); + << ", but got " << strides.size() << ". trace: " << trace::DumpSourceLines(maxpool); } if (ksize.size() != kMaxPoolAttrAxisNum) { MS_LOG(EXCEPTION) << "MaxPool's attr ksize has wrong axis number, should be " << kMaxPoolAttrAxisNum << ", but got " - << ksize.size(); + << ksize.size() << ". trace: " << trace::DumpSourceLines(maxpool); } // note that strides and ksize change from (1, 1, x, y) to (1, x, y, 1) strides[kIndex1] = strides[kIndex2]; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/mindir/maxpool_with_argmax_unify_mindir.cc b/mindspore/ccsrc/backend/optimizer/ascend/mindir/maxpool_with_argmax_unify_mindir.cc index b73dcc353c6..2a55bbb3f5d 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/mindir/maxpool_with_argmax_unify_mindir.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/mindir/maxpool_with_argmax_unify_mindir.cc @@ -22,6 +22,7 @@ #include "backend/session/anf_runtime_algorithm.h" #include "base/core_ops.h" #include "utils/utils.h" +#include "utils/trace_base.h" namespace mindspore { namespace opt { @@ -66,7 +67,9 @@ const AnfNodePtr MaxPoolWithArgmaxUnifyMindIR::Process(const FuncGraphPtr &graph auto output_shape = AnfAlgo::GetOutputInferShape(maxpool_with_argmax, 0); auto argmax_shape = output_shape; if (argmax_shape.size() != kMaxPoolWithArgmaxShape || ksize.size() != kMaxPoolWithArgmaxShape) { - MS_LOG(EXCEPTION) << "argmax or kernel_size's shape size not equal to 4"; + MS_LOG(EXCEPTION) << "Argmax or kernel_size's shape dim should be equal to 4, but got argmax dim: " + << argmax_shape.size() << ", kernel_size dim: " << ksize.size() + << ". trace: " << trace::DumpSourceLines(node); } argmax_shape[kDim2] = LongToSize(ksize[kDim1] * ksize[kDim2]); argmax_shape[kDim3] = (output_shape[kDim2] * output_shape[kDim3] + kAlignBytes - 1) / kAlignBytes + 1; @@ -100,7 +103,9 @@ const AnfNodePtr MaxPoolGradWithArgmaxUnifyMindIR::Process(const FuncGraphPtr &g auto ksize = AnfAlgo::GetNodeAttr>(maxpool_grad_with_argmax, kAttrKernelSize); auto argmax_shape = AnfAlgo::GetOutputInferShape(tuple_getitem0_anf, 0); if (argmax_shape.size() != kMaxPoolWithArgmaxShape || ksize.size() != kMaxPoolWithArgmaxShape) { - MS_LOG(EXCEPTION) << "argmax or kernel_size's shape size not equal to 4"; + MS_LOG(EXCEPTION) << "Argmax or kernel_size's shape dim should be equal to 4, but got argmax dim: " + << argmax_shape.size() << ", kernel_size dim: " << ksize.size() + << ". trace: " << trace::DumpSourceLines(node); } argmax_shape[kDim3] = (argmax_shape[kDim2] * argmax_shape[kDim3] + kAlignBytes - 1) / kAlignBytes + 1; argmax_shape[kDim2] = LongToSize(ksize[kDim1] * ksize[kDim2]); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/mindir/neighbor_exchange_v2_unify_mindir.cc b/mindspore/ccsrc/backend/optimizer/ascend/mindir/neighbor_exchange_v2_unify_mindir.cc index e21792ffd81..78a37127c92 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/mindir/neighbor_exchange_v2_unify_mindir.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/mindir/neighbor_exchange_v2_unify_mindir.cc @@ -21,6 +21,7 @@ #include "backend/session/anf_runtime_algorithm.h" #include "runtime/hccl_adapter/hccl_adapter.h" #include "backend/optimizer/common/helper.h" +#include "utils/trace_base.h" namespace mindspore { namespace opt { @@ -290,7 +291,8 @@ CNodePtr CreateAllToAllvNode(const FuncGraphPtr &graph, const CNodePtr &neighbor CreateMultipleOutputsOfAnfNode(graph, split_nodes[i], static_cast(split_num[i]), &output); if (output.empty()) { MS_LOG(EXCEPTION) << "The node " << split_nodes[i]->DebugString() - << " should have at least one output, but got 0."; + << " should have at least one output, but got 0. trace: " + << trace::DumpSourceLines(split_nodes[i]); } } split_outputs.emplace_back(output); @@ -365,7 +367,8 @@ std::vector NeighborExchangeV2UnifyMindIR::CreateSplitNodes(const Func if (neighbor_exchange_v2->size() <= kNeighborExchangeV2InputIdx) { MS_LOG(EXCEPTION) << "Invalid cnode " << neighbor_exchange_v2->DebugString() << " input size " - << neighbor_exchange_v2->size(); + << neighbor_exchange_v2->size() << ", should be " << kNeighborExchangeV2InputIdx + << ". trace: " << trace::DumpSourceLines(neighbor_exchange_v2); } std::vector split_nodes = {}; @@ -379,7 +382,8 @@ std::vector NeighborExchangeV2UnifyMindIR::CreateSplitNodes(const Func auto dtype = AnfAlgo::GetOutputInferDataType(neighbor_exchange_v2_input, 0); auto shape = AnfAlgo::GetOutputInferShape(neighbor_exchange_v2_input, 0); if (SizeToLong(shape.size()) != kShapeSize) { // only support NCHW now - MS_LOG(EXCEPTION) << "Invalid shape size " << shape.size() << ", only support NCHW input now!"; + MS_LOG(EXCEPTION) << "Invalid shape size " << shape.size() + << ", only support NCHW input now! trace: " << trace::DumpSourceLines(neighbor_exchange_v2); } // splitv for top & bottom @@ -416,7 +420,8 @@ std::vector NeighborExchangeV2UnifyMindIR::CreateSplitNodes(const Func &split_outputs_top_bottom); if (split_outputs_top_bottom.empty()) { MS_LOG(EXCEPTION) << "The node " << split_nodes[0]->DebugString() - << " should have at least one output, but got 0."; + << " should have at least one output, but got 0. trace: " + << trace::DumpSourceLines(split_nodes[0]); } // for top corner @@ -608,7 +613,8 @@ CNodePtr NeighborExchangeV2UnifyMindIR::CreateConcatNodes(const FuncGraphPtr &gr CreateMultipleOutputsOfAnfNode(graph, all_to_all_v, static_cast(all_to_all_output_num), &all_to_all_v_outputs); if (all_to_all_v_outputs.empty()) { - MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString() << " should have at least one output, but got 0."; + MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString() + << " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(all_to_all_v); } if (recv_rank_ids[kRankIdZero] == kInvalidId && recv_rank_ids[kRankIdFour] == kInvalidId) { @@ -644,7 +650,9 @@ CNodePtr NeighborExchangeV2UnifyMindIR::CreateConcatNodes(const FuncGraphPtr &gr std::vector concat_left_outputs; CreateMultipleOutputsOfAnfNode(graph, concat_left, 1, &concat_left_outputs); if (concat_left_outputs.empty()) { - MS_LOG(EXCEPTION) << "The node " << concat_left->DebugString() << " should have at least one output, but got 0."; + MS_LOG(EXCEPTION) << "The node " << concat_left->DebugString() + << " should have at least one output, but got 0. trace: " + << trace::DumpSourceLines(concat_left); } concat_input_all.insert(concat_input_all.end(), concat_left_outputs.begin(), concat_left_outputs.end()); ++input_nums_all; @@ -655,7 +663,9 @@ CNodePtr NeighborExchangeV2UnifyMindIR::CreateConcatNodes(const FuncGraphPtr &gr std::vector concat_middle_outputs; CreateMultipleOutputsOfAnfNode(graph, concat_middle, 1, &concat_middle_outputs); if (concat_middle_outputs.empty()) { - MS_LOG(EXCEPTION) << "The node " << concat_middle->DebugString() << " should have at least one output, but got 0."; + MS_LOG(EXCEPTION) << "The node " << concat_middle->DebugString() + << " should have at least one output, but got 0. trace: " + << trace::DumpSourceLines(concat_middle); } concat_input_all.insert(concat_input_all.end(), concat_middle_outputs.begin(), concat_middle_outputs.end()); ++input_nums_all; @@ -667,7 +677,9 @@ CNodePtr NeighborExchangeV2UnifyMindIR::CreateConcatNodes(const FuncGraphPtr &gr std::vector concat_right_outputs; CreateMultipleOutputsOfAnfNode(graph, concat_right, 1, &concat_right_outputs); if (concat_right_outputs.empty()) { - MS_LOG(EXCEPTION) << "The node " << concat_right->DebugString() << " should have at least one output, but got 0."; + MS_LOG(EXCEPTION) << "The node " << concat_right->DebugString() + << " should have at least one output, but got 0. trace: " + << trace::DumpSourceLines(concat_right); } concat_input_all.insert(concat_input_all.end(), concat_right_outputs.begin(), concat_right_outputs.end()); ++input_nums_all; @@ -694,14 +706,16 @@ std::vector NeighborExchangeV2GradUnifyMindIR::CreateSplitNodesForGrad if (neighbor_exchange_v2_grad->size() <= kNeighborExchangeV2InputIdx) { MS_LOG(EXCEPTION) << "Invalid cnode " << neighbor_exchange_v2_grad->DebugString() << " input size " - << neighbor_exchange_v2_grad->size(); + << neighbor_exchange_v2_grad->size() << ", should be " << kNeighborExchangeV2InputIdx + << ". trace: " << trace::DumpSourceLines(neighbor_exchange_v2_grad); } auto neighbor_exchange_v2_grad_input = neighbor_exchange_v2_grad->input(kNeighborExchangeV2InputIdx); auto dtype = AnfAlgo::GetOutputInferDataType(neighbor_exchange_v2_grad_input, 0); auto shape = AnfAlgo::GetOutputInferShape(neighbor_exchange_v2_grad_input, 0); if (SizeToLong(shape.size()) != kShapeSize) { - MS_LOG(EXCEPTION) << "Invalid shape size " << shape.size() << ", only support NCHW input now!"; + MS_LOG(EXCEPTION) << "Invalid shape size " << shape.size() + << ", only support NCHW input now! trace: " << trace::DumpSourceLines(neighbor_exchange_v2_grad); } std::vector split_nodes = {}; @@ -727,7 +741,8 @@ std::vector NeighborExchangeV2GradUnifyMindIR::CreateSplitNodesForGrad CreateMultipleOutputsOfAnfNode(graph, split_nodes[0], static_cast(num_split_h), &split_outputs_top_bottom); if (split_outputs_top_bottom.empty()) { MS_LOG(EXCEPTION) << "The node " << split_nodes[0]->DebugString() - << " should have at least one output, but got 0."; + << " should have at least one output, but got 0. trace: " + << trace::DumpSourceLines(split_nodes[0]); } size_split_h = AnfAlgo::GetNodeAttr>(split_nodes[0], kAttrSizeSplits); } else { @@ -827,7 +842,8 @@ CNodePtr NeighborExchangeV2GradUnifyMindIR::CreateSplitGradNodes(const FuncGraph CreateMultipleOutputsOfAnfNode(graph, all_to_all_v, static_cast(all_to_all_output_num), &all_to_all_v_outputs); if (all_to_all_v_outputs.empty()) { - MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString() << " should have at least one output, but got 0."; + MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString() + << " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(all_to_all_v); } // create pad nodes // slice begin & size @@ -871,7 +887,8 @@ CNodePtr NeighborExchangeV2GradUnifyMindIR::CreateSplitGradNodes(const FuncGraph std::vector pad_outputs; CreateMultipleOutputsOfAnfNode(graph, pad, 1, &pad_outputs); if (pad_outputs.empty()) { - MS_LOG(EXCEPTION) << "The node " << pad->DebugString() << " should have at least one output, but got 0."; + MS_LOG(EXCEPTION) << "The node " << pad->DebugString() + << " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(pad); } addn_inputs.insert(addn_inputs.end(), pad_outputs.begin(), pad_outputs.end()); ++pad_num; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/mindir/slice_grad_unify_mindir.cc b/mindspore/ccsrc/backend/optimizer/ascend/mindir/slice_grad_unify_mindir.cc index 51698acc21e..a7ef039c063 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/mindir/slice_grad_unify_mindir.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/mindir/slice_grad_unify_mindir.cc @@ -23,6 +23,7 @@ #include "utils/utils.h" #include "utils/ms_context.h" +#include "utils/trace_base.h" #include "backend/optimizer/common/helper.h" #include "runtime/device/kernel_info.h" #include "backend/session/anf_runtime_algorithm.h" @@ -67,7 +68,7 @@ const AnfNodePtr SliceGradUnifyMindIR::Process(const FuncGraphPtr &graph, const if (input_num != kSliceGradInputTensorNum && input_num != kSliceGradCangjieInputTensorNum) { MS_LOG(EXCEPTION) << "The input tensor size[" << input_num << "] of node " + slice_grad->DebugString() + " is not equal to " << kSliceGradInputTensorNum - << " or " << kSliceGradCangjieInputTensorNum; + << " or " << kSliceGradCangjieInputTensorNum << ". trace: " << trace::DumpSourceLines(node); } std::vector pad_inputs = {NewValueNode(std::make_shared(kPadOpName)), slice_grad->input(kIndex1)}; @@ -89,7 +90,10 @@ const AnfNodePtr SliceGradUnifyMindIR::Process(const FuncGraphPtr &graph, const sizes = AnfAlgo::GetNodeAttr>(slice_grad, kAttrSize); } if (x_shape.size() != begins.size() || begins.size() != sizes.size()) { - MS_LOG(EXCEPTION) << "For SliceGrad, x's shape dim number should be equal to len(begin) and len(size)."; + MS_LOG(EXCEPTION) + << "For SliceGrad, x_shape dim number should be equal to len(begin) and len(size), but got x_shape dim: " + << x_shape.size() << ", len(begin): " << begins.size() << ", len(size): " << sizes.size() + << ". trace: " << trace::DumpSourceLines(node); } std::vector> paddings; for (size_t i = 0; i < x_shape.size(); ++i) { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/mindir/sparse_softmax_cross_entropy_with_logits_unify_mindir.cc b/mindspore/ccsrc/backend/optimizer/ascend/mindir/sparse_softmax_cross_entropy_with_logits_unify_mindir.cc index 0dae94940bd..94da58992db 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/mindir/sparse_softmax_cross_entropy_with_logits_unify_mindir.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/mindir/sparse_softmax_cross_entropy_with_logits_unify_mindir.cc @@ -63,7 +63,8 @@ CNodePtr CreateOneHot(const FuncGraphPtr &graph, const CNodePtr &sparse_softmax_ size_t index = logits_shape.size() - 1; depth = SizeToLong(logits_shape[index]); } else { - MS_LOG(EXCEPTION) << "logits's shape of sparse_softmax_cross_entropy_with_logits is empty."; + MS_LOG(EXCEPTION) << "Logits's shape of node [" << sparse_softmax_node->DebugString() + << "] is empty. trace: " << trace::DumpSourceLines(sparse_softmax_node); } auto value_on = std::make_shared(1.0, kFloat32); @@ -126,7 +127,7 @@ CNodePtr CreateSoftmaxCrossEntropyWithLogits(const FuncGraphPtr &graph, const CN if (!labels_shape.empty()) { loss_shape.emplace_back(labels_shape[0]); } else { - MS_LOG(EXCEPTION) << "one_hot output's shape is empty."; + MS_LOG(EXCEPTION) << "One_hot output's shape is empty. trace: " << trace::DumpSourceLines(one_hot_node); } auto shapes = {loss_shape, AnfAlgo::GetOutputInferShape(one_hot_node, 0)}; @@ -140,7 +141,8 @@ std::vector GetAxis(const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(node); std::vector output_shape = AnfAlgo::GetOutputInferShape(node, 0); if (output_shape.empty()) { - MS_LOG(EXCEPTION) << node->fullname_with_scope() << "'s output shape is empty"; + MS_LOG(EXCEPTION) << node->fullname_with_scope() + << "'s output shape is empty. trace: " << trace::DumpSourceLines(node); } std::vector range; for (size_t i = 0; i < output_shape.size(); i++) { @@ -308,7 +310,8 @@ CNodePtr CreateRealDiv(const FuncGraphPtr &graph, const CNodePtr &sparse_softmax CheckCNodeInputSize(sparse_softmax_node, kSparseSoftmaxCrossEntropyWithLogitsInputTensorNum); std::vector labels_shape = AnfAlgo::GetPrevNodeOutputInferShape(sparse_softmax_node, 1); if (labels_shape.size() != 1) { - MS_LOG(EXCEPTION) << "label's shape should be 1-D."; + MS_LOG(EXCEPTION) << "Label's shape should be 1-D, but got " << labels_shape.size() + << ". trace: " << trace::DumpSourceLines(sparse_softmax_node); } auto y_value = static_cast(labels_shape[0]); auto y = std::make_shared(y_value, kFloat32); @@ -356,13 +359,15 @@ CNodePtr CreateMul(const FuncGraphPtr &graph, const CNodePtr &sparse_softmax_nod auto softmax_output_shape = AnfAlgo::GetOutputInferShape(softmax_output_node, 0); if (softmax_output_shape.size() != softmax_output_shape_size) { MS_LOG(EXCEPTION) << "SoftmaxCrossEntropyWithLogits the second output shape size should be " - << softmax_output_shape_size << ", but got " << softmax_output_shape.size(); + << softmax_output_shape_size << ", but got " << softmax_output_shape.size() + << ". trace: " << trace::DumpSourceLines(softmax_output_node); } ShapeVector tensor_shape; tensor_shape.emplace_back(softmax_output_shape[0]); tensor_shape.emplace_back(1); if (softmax_output_shape[0] == 0) { - MS_LOG(EXCEPTION) << "output_shape[0] of softmax should not be 0"; + MS_LOG(EXCEPTION) << "output_shape[0] of softmax should not be 0. trace: " + << trace::DumpSourceLines(softmax_output_node); } std::vector tensor_value(softmax_output_shape[0], 1.0 / softmax_output_shape[0]); auto buf_size = sizeof(float) * tensor_value.size(); @@ -406,8 +411,9 @@ CNodePtr CreateCast(const FuncGraphPtr &graph, const CNodePtr &cast, const AnfNo bool IsSparseSoftmaxCrossEntropyWithLogitsGrad(const CNodePtr &sparse, string pass_name) { MS_EXCEPTION_IF_NULL(sparse); if (AnfAlgo::GetCNodeName(sparse) != kSparseSoftmaxCrossEntropyWithLogitsOpName) { - MS_LOG(EXCEPTION) << "The pass of " << pass_name << "'s input node is not " - << kSparseSoftmaxCrossEntropyWithLogitsOpName; + MS_LOG(EXCEPTION) << "The pass of " << pass_name << "'s input node should be " + << kSparseSoftmaxCrossEntropyWithLogitsOpName << ", but got " << AnfAlgo::GetCNodeName(sparse) + << ". trace: " << trace::DumpSourceLines(sparse); } if (AnfAlgo::HasNodeAttr(kAttrIsGrad, sparse)) { return AnfAlgo::GetNodeAttr(sparse, kAttrIsGrad); diff --git a/mindspore/ccsrc/backend/optimizer/common/helper.cc b/mindspore/ccsrc/backend/optimizer/common/helper.cc index 66a76124b2a..7c871fd3444 100644 --- a/mindspore/ccsrc/backend/optimizer/common/helper.cc +++ b/mindspore/ccsrc/backend/optimizer/common/helper.cc @@ -32,6 +32,7 @@ #include "utils/convert_utils.h" #include "runtime/device/kernel_info.h" #include "utils/ms_context.h" +#include "utils/trace_base.h" #include "backend/optimizer/common/const_input_to_attr_registry.h" #include "abstract/primitive_infer_map.h" @@ -151,7 +152,8 @@ void CheckCNodeInputSize(const CNodePtr &cnode, size_t input_tensor_size) { auto real_input_tensor_num = AnfAlgo::GetInputTensorNum(cnode); if (real_input_tensor_num != input_tensor_size) { MS_LOG(EXCEPTION) << "The input tensor size[" << real_input_tensor_num - << "] of node " + cnode->DebugString() + " is not equal to " << input_tensor_size; + << "] of node [" + cnode->DebugString() + "] is not equal to " << input_tensor_size + << ". trace: " << trace::DumpSourceLines(cnode); } } @@ -606,7 +608,7 @@ void ConstInputToAttr(const CNodePtr &cnode, const mindspore::HashSet &i MS_EXCEPTION_IF_NULL(value_node); MS_LOG(DEBUG) << "start erase input[" << i << "] of cnode[" + cnode->DebugString() + "]"; if (i >= input_names_vec.size()) { - MS_LOG(EXCEPTION) << "index " << i << " is larger than input names size [" << input_names_vec.size() << "]"; + MS_LOG(EXCEPTION) << "Index " << i << " is larger than input names size [" << input_names_vec.size() << "]"; } auto value = value_node->value(); if (value->isa()) { @@ -654,20 +656,20 @@ bool AnfEqual(const BaseRef &a, const BaseRef &b) { } else if (a_node->isa() && b_node->isa()) { auto a_value_node_ptr = a_node->cast(); if (a_value_node_ptr == nullptr) { - MS_LOG(EXCEPTION) << "cast value node ptr fail"; + MS_LOG(EXCEPTION) << "Cast value node ptr fail."; } auto a_value_ptr = a_value_node_ptr->value(); if (a_value_ptr == nullptr) { - MS_LOG(EXCEPTION) << "value ptr is nullptr"; + MS_LOG(EXCEPTION) << "Value ptr is nullptr."; } auto b_value_node_ptr = b_node->cast(); if (b_value_node_ptr == nullptr) { - MS_LOG(EXCEPTION) << "cast value node ptr fail"; + MS_LOG(EXCEPTION) << "Cast value node ptr fail."; } auto b_value_ptr = b_value_node_ptr->value(); if (b_value_ptr == nullptr) { - MS_LOG(EXCEPTION) << "value ptr is nullptr"; + MS_LOG(EXCEPTION) << "Value ptr is nullptr."; } return (*a_value_ptr) == (*b_value_ptr); @@ -808,8 +810,7 @@ AbstractBasePtrList RectifyAbstractFromRegAttr(const PrimitivePtr &primitive, continue; } if (ori_index > input_abstract.size()) { - MS_LOG(EXCEPTION) << "index is out of range input abstract size " << input_abstract.size() - << " get index :" << ori_index; + MS_LOG(EXCEPTION) << "Index " << ori_index << " is out of range in input abstract size " << input_abstract.size(); } rectify_abs_list[index] = input_abstract[ori_index++]; } @@ -829,18 +830,18 @@ AbstractBasePtrList RectifyAbstractFromDynamicInput(const PrimitivePtr &primitiv for (auto item : dynamic_inputs_index) { if (item == kNotDynamicFlag) { if (input_index >= input_abstract.size()) { - MS_LOG(EXCEPTION) << " index " << input_index << " is out of range in input abstract " << input_abstract.size(); + MS_LOG(EXCEPTION) << "Index " << input_index << " is out of range in input abstract " << input_abstract.size(); } (void)rectifyed_abs_list.emplace_back(input_abstract[input_index++]); } else { if (item < 0) { - MS_LOG(EXCEPTION) << " the dynamic input size check error the index should be -1 or positive number but got " + MS_LOG(EXCEPTION) << "The dynamic input size check error the index should be -1 or positive number but got " << item; } AbstractBasePtrList dynamic_inputs_abs; for (auto index = item; index > 0; --index) { if (input_index >= input_abstract.size()) { - MS_LOG(EXCEPTION) << " index " << input_index << " is out of range in input abstract " + MS_LOG(EXCEPTION) << "Index " << input_index << " is out of range in input abstract " << input_abstract.size(); } (void)dynamic_inputs_abs.emplace_back(input_abstract[input_index++]); @@ -877,7 +878,7 @@ AnfNodePtr SexpToNode(const BaseRef &sexp, const BaseRef &graph, PrimitiveVarMap } auto value_node = CreateValueNodeWithSexp(sexp, primitive_vars); if (value_node == nullptr) { - MS_LOG(EXCEPTION) << "sexp cannot converted. sexp: " + sexp.ToString(); + MS_LOG(EXCEPTION) << "Sexp cannot converted, sexp: " + sexp.ToString(); } return value_node; } diff --git a/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse_checker.cc b/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse_checker.cc index 3bc5525de67..4e62010e497 100644 --- a/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse_checker.cc +++ b/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse_checker.cc @@ -16,6 +16,7 @@ #include "backend/optimizer/mem_reuse/mem_reuse_checker.h" #include +#include "utils/trace_base.h" namespace mindspore { namespace memreuse { @@ -376,7 +377,8 @@ void MemReuseChecker::CheckNormalIR(const session::KernelGraph *graph) { for (size_t i = 0; i < input_num; ++i) { if (i + 1 >= node->inputs().size()) { MS_LOG(EXCEPTION) << "Input index: " << i - << " is larger than input number: " << AnfAlgo::GetInputTensorNum(node); + << " is larger than input number: " << AnfAlgo::GetInputTensorNum(node) + << ". trace: " << trace::DumpSourceLines(node); } auto real_input_index = AnfAlgo::GetRealInputIndex(node, i); auto input = node->input(real_input_index + 1); diff --git a/mindspore/ccsrc/backend/optimizer/pass/communication_op_fusion.cc b/mindspore/ccsrc/backend/optimizer/pass/communication_op_fusion.cc index 961bea027a4..135a25c758f 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/communication_op_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/communication_op_fusion.cc @@ -317,7 +317,7 @@ AnfNodePtr CommunicationOpFusion::CreateFusedCommunicationOp(const FuncGraphPtr std::vector fusion_inputs = {NewValueNode(prim)}; // get all inputs of current segment if (end_index >= communication_op_info.communication_op_nodes.size()) { - MS_LOG(EXCEPTION) << "end index out of communication_op_nodes size"; + MS_LOG(EXCEPTION) << "End index is out of communication_op_nodes size"; } std::vector orig_nodes; for (size_t idx = start_index; idx <= end_index; ++idx) { @@ -440,7 +440,7 @@ bool CommunicationOpFusion::DoFusion(const FuncGraphPtr &func_graph, const Commu kernel_graph->ReplaceInternalOutput(communication_op_node_item, new_communication_op, 0, LongToSize(offset)); } if (!manager->Replace(communication_op_node_item, tuple_getitem)) { - MS_LOG(EXCEPTION) << "manager replace node failed"; + MS_LOG(EXCEPTION) << "Manager replace node failed"; } } start_index = end_index + 1; diff --git a/mindspore/ccsrc/backend/optimizer/pass/conv_transpose_to_conv_bp.cc b/mindspore/ccsrc/backend/optimizer/pass/conv_transpose_to_conv_bp.cc index d215b9c144d..8588da20c5f 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/conv_transpose_to_conv_bp.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/conv_transpose_to_conv_bp.cc @@ -18,13 +18,14 @@ #include #include "ir/primitive.h" #include "utils/utils.h" +#include "utils/trace_base.h" #include "backend/optimizer/common/helper.h" namespace mindspore { namespace opt { namespace { constexpr size_t kCNodePrimitiveIdx = 0; -} // namespace +} const BaseRef ConvTransposeToConvBackpropInputPass::DefinePattern() const { VarPtr Xs = std::make_shared(); @@ -39,8 +40,9 @@ const AnfNodePtr ConvTransposeToConvBackpropInputPass::Process(const FuncGraphPt auto conv_transpose = node->cast(); MS_EXCEPTION_IF_NULL(conv_transpose); - if (conv_transpose->size() == kCNodePrimitiveIdx) { - MS_LOG(EXCEPTION) << "Invalid cnode " << node->DebugString() << " input size " << conv_transpose->size(); + if (conv_transpose->inputs().empty()) { + MS_LOG(EXCEPTION) << "Cnode inputs should not be empty, cnode: " << node->DebugString() + << ", trace: " << trace::DumpSourceLines(conv_transpose); } auto prim = GetValueNode(conv_transpose->input(kCNodePrimitiveIdx)); diff --git a/mindspore/ccsrc/backend/optimizer/pass/custom_op_reg_info_to_attr.cc b/mindspore/ccsrc/backend/optimizer/pass/custom_op_reg_info_to_attr.cc index 10e8d542107..2e9641a93cc 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/custom_op_reg_info_to_attr.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/custom_op_reg_info_to_attr.cc @@ -103,7 +103,7 @@ void AddMissingAttrs(const CNodePtr &cnode, kernel::OpImplyType imply_type, auto default_value = attr->default_value(); if (default_value.empty()) { MS_LOG(EXCEPTION) << "attr [" << attr_name << "] in the registration information of op [" << op_name - << "] does not have a value."; + << "] does not have a value. trace: " << trace::DumpSourceLines(cnode); } ParseAttrDefaultValue(op_name, attr_name, default_value, attr->type(), primitive); need_update = true; diff --git a/mindspore/ccsrc/backend/optimizer/pass/optimize_dependence.cc b/mindspore/ccsrc/backend/optimizer/pass/optimize_dependence.cc index 5dad2e4fce2..50b933eea42 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/optimize_dependence.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/optimize_dependence.cc @@ -22,6 +22,7 @@ #include "backend/optimizer/common/helper.h" #include "base/core_ops.h" #include "utils/utils.h" +#include "utils/trace_base.h" #include "backend/session/kernel_graph.h" #include "backend/session/anf_runtime_algorithm.h" @@ -192,7 +193,8 @@ const AnfNodePtr OptimizeDependence::Process(const FuncGraphPtr &func_graph, con bool inputs_changed = false; for (auto index : candidate_inputs) { if (index >= new_inputs.size()) { - MS_LOG(EXCEPTION) << "Index is out of the size of " << cnode->DebugString() << " inputs."; + MS_LOG(EXCEPTION) << "Index is out of the size of " << cnode->DebugString() + << " inputs. trace: " << trace::DumpSourceLines(cnode); } auto replace_node = GetConvertNode(func_graph, cnode, index); if (replace_node != nullptr) { diff --git a/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.cc b/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.cc index a73c23d3bcf..cc131984694 100644 --- a/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.cc +++ b/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.cc @@ -25,6 +25,7 @@ #include #include "utils/hash_map.h" #include "utils/ms_context.h" +#include "utils/trace_base.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/optimizer/trt_pass/trt_op_factory.h" #include "vm/segment_runner.h" @@ -47,7 +48,7 @@ bool WeightCheck(const AnfNodePtr &node) { for (auto index : iter->second) { if (index >= real_inputs.size()) { MS_LOG(EXCEPTION) << "index out of range. node: " << node->DebugString() << ", index: " << index - << real_inputs.size(); + << real_inputs.size() << ". trace: " << trace::DumpSourceLines(node); } if (real_inputs[index].first->isa() && diff --git a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc index 3bfe0a19dee..151d53c2d6d 100644 --- a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc @@ -558,7 +558,7 @@ size_t AnfRuntimeAlgorithm::GetInputNum(const CNodePtr &cnode) { MS_EXCEPTION_IF_NULL(cnode); size_t input_num = cnode->size(); if (input_num == 0) { - MS_LOG(EXCEPTION) << "Cnode inputs size can't be zero"; + MS_LOG(EXCEPTION) << "Cnode inputs size can't be zero. trace: " << trace::DumpSourceLines(cnode); } return input_num - 1; } @@ -2122,7 +2122,8 @@ void AnfRuntimeAlgorithm::GetAllFatherRealNode(const AnfNodePtr &anf_node, std:: auto cnode = anf_node->cast(); MS_EXCEPTION_IF_NULL(cnode); if (cnode->inputs().empty()) { - MS_LOG(EXCEPTION) << "Illegal null input of cnode(%s)" << anf_node->DebugString(); + MS_LOG(EXCEPTION) << "Illegal null input of cnode(%s)" << anf_node->DebugString() + << ". trace: " << trace::DumpSourceLines(cnode); } auto input0 = cnode->input(0); if (IsPrimitive(input0, prim::kPrimMakeTuple)) { @@ -2136,7 +2137,7 @@ void AnfRuntimeAlgorithm::GetAllFatherRealNode(const AnfNodePtr &anf_node, std:: GetAllFatherRealNode(cnode->input(kRealInputNodeIndexInTupleGetItem), result, visited); } else if (IsPrimitive(input0, prim::kPrimDepend)) { if (cnode->inputs().size() != kDependInputSize) { - MS_LOG(EXCEPTION) << "Depend node must have 2 inputs!"; + MS_LOG(EXCEPTION) << "Depend node must have 2 inputs! trace: " << trace::DumpSourceLines(cnode); } GetAllFatherRealNode(cnode->input(kRealInputIndexInDepend), result, visited); GetAllFatherRealNode(cnode->input(kDependAttachNodeIndex), result, visited); @@ -2148,7 +2149,8 @@ void AnfRuntimeAlgorithm::InferShape(const CNodePtr &node, std::mapDebugString(); auto inputs = node->inputs(); if (inputs.empty()) { - MS_LOG(EXCEPTION) << "Invalid inputs"; + MS_LOG(EXCEPTION) << "Inputs should not be empty! Cnode: " << node->DebugString() + << ". trace: " << trace::DumpSourceLines(node); } AbstractBasePtrList args_spec_list; auto primitive = GetValueNode(inputs[0]); @@ -2182,7 +2184,8 @@ void AnfRuntimeAlgorithm::InferShape(const CNodePtr &node, std::mapShape(); if (!base_shape->isa()) { MS_LOG(EXCEPTION) << "Node:" << node->DebugString() - << " input is a tuple_get_item but real input node shape is not a TupleShape"; + << " input is a tuple_get_item but real input node shape is not a TupleShape. trace: " + << trace::DumpSourceLines(real_input); } auto abs = real_input->abstract()->cast(); MS_EXCEPTION_IF_NULL(abs); @@ -2430,7 +2433,7 @@ bool AnfRuntimeAlgorithm::IsCallNode(const AnfNodePtr &node) { const auto &inputs = cnode->inputs(); if (inputs.empty() || inputs[0] == nullptr) { - MS_LOG(EXCEPTION) << "Invalid call node:" << node->DebugString(); + MS_LOG(EXCEPTION) << "Invalid call node:" << node->DebugString() << ". trace: " << trace::DumpSourceLines(cnode); } return inputs[0]->isa() || (inputs[0]->isa() && IsValueNode(inputs[0])); } diff --git a/mindspore/ccsrc/backend/session/session_basic.cc b/mindspore/ccsrc/backend/session/session_basic.cc index ed97c54d4a6..5f1c1730651 100644 --- a/mindspore/ccsrc/backend/session/session_basic.cc +++ b/mindspore/ccsrc/backend/session/session_basic.cc @@ -161,7 +161,7 @@ BaseRef GetNodeOutputTensorFromInputs(const session::KernelWithIndex &node_outpu } for (size_t input_idx = 0; input_idx < graph->inputs().size(); input_idx++) { if (input_idx >= input_tensors.size()) { - MS_LOG(EXCEPTION) << "Input idx:" << input_idx << "out of range:" << input_tensors.size(); + MS_LOG(EXCEPTION) << "Input idx:" << input_idx << " is out of range:" << input_tensors.size(); } if (graph->inputs()[input_idx] == node) { return input_tensors[input_idx]; @@ -373,7 +373,7 @@ BaseRef CreateNodeOutputPlaceholder(const session::KernelWithIndex &node_output_ if (node->isa()) { for (size_t input_idx = 0; input_idx < graph->inputs().size(); input_idx++) { if (input_idx >= input_tensors.size()) { - MS_LOG(EXCEPTION) << "Input idx:" << input_idx << "out of range:" << input_tensors.size(); + MS_LOG(EXCEPTION) << "Input idx:" << input_idx << " is out of range:" << input_tensors.size(); } if (graph->inputs()[input_idx] == node) { return input_tensors[input_idx]; @@ -424,13 +424,15 @@ void CheckInputTensorShape(const TensorPtr &tensor, const CNodePtr &kernel, size if (tensor_shape.size() != input_shape.size()) { MS_LOG(EXCEPTION) << "The input tensor's shape size: " << tensor_shape.size() << " is not equal to expected size: " << input_shape.size() << " for input[" << input_index - << "] of kernel: " << AnfAlgo::GetCNodeName(kernel); + << "] of kernel: " << AnfAlgo::GetCNodeName(kernel) + << ", trace: " << trace::DumpSourceLines(kernel); } for (size_t i = 0; i < tensor_shape.size(); i++) { if (tensor_shape[i] < 0 || static_cast(tensor_shape[i]) != input_shape[i]) { MS_LOG(EXCEPTION) << "The input tensor's shape: " << tensor_shape << " is not equal to expected shape: " << input_shape << " for input[" << input_index - << "] of kernel: " << AnfAlgo::GetCNodeName(kernel); + << "] of kernel: " << AnfAlgo::GetCNodeName(kernel) + << ", trace: " << trace::DumpSourceLines(kernel); } } } @@ -1377,6 +1379,7 @@ void SessionBasic::HandleOpOutputs(const AnfNodePtr &kernel, const VectorRef &op } } } + TensorPtr SessionBasic::GetValueNodeOutputTensor(const AnfNodePtr &node, size_t output_index) { MS_EXCEPTION_IF_NULL(node); if (!node->isa()) { @@ -1840,7 +1843,8 @@ void SessionBasic::SetSummaryNodes(KernelGraph *graph) { auto cnode = n->cast(); MS_EXCEPTION_IF_NULL(cnode); if (cnode->inputs().size() <= kSummaryGetItem) { - MS_LOG(EXCEPTION) << "The node Summary should have 2 inputs at least!"; + MS_LOG(EXCEPTION) << "The node Summary should have 2 inputs at least, but got " << cnode->inputs().size() - 1 + << ". trace: " << trace::DumpSourceLines(cnode); } auto node = cnode->input(kSummaryGetItem); MS_EXCEPTION_IF_NULL(node); diff --git a/mindspore/ccsrc/backend/session/single_kernel_graph.cc b/mindspore/ccsrc/backend/session/single_kernel_graph.cc index 6121e162e5a..c5d62873cf6 100644 --- a/mindspore/ccsrc/backend/session/single_kernel_graph.cc +++ b/mindspore/ccsrc/backend/session/single_kernel_graph.cc @@ -16,6 +16,7 @@ #include "backend/session/single_kernel_graph.h" #include "backend/session/anf_runtime_algorithm.h" +#include "utils/trace_base.h" namespace mindspore { namespace session { @@ -45,7 +46,10 @@ std::shared_ptr SingleKernelGraph::ConstructKernelGraphBas // get output dynamic shape info AnfAlgo::SetNodeAttr(kAttrOutputIsDynamicShape, MakeValue(false), cnode); if (output_dtypes.size() != output_shapes.size()) { - MS_LOG(EXCEPTION) << " output_dtypes size should equal to output_shapes size, the op name is: " << op_name; + MS_LOG(EXCEPTION) + << "The size of output_dtypes should be equal to size of output_shapes, but got output_dtypes size: " + << output_dtypes.size() << ", output_shapes size: " << output_shapes.size() << ". The op name is: " << op_name + << ", trace: " << trace::DumpSourceLines(cnode); } AnfAlgo::SetOutputInferTypeAndShape(output_dtypes, output_shapes, cnode.get()); // set execution order