!26763 add trace for exception log

Merge pull request !26763 from yuchaojie/ir_fusion3
This commit is contained in:
i-robot 2021-11-26 07:02:06 +00:00 committed by Gitee
commit c87a50a174
44 changed files with 331 additions and 205 deletions

View File

@ -31,6 +31,7 @@
#include "base/core_ops.h"
#include "ir/graph_utils.h"
#include "utils/ms_context.h"
#include "utils/trace_base.h"
#include "mindspore/ccsrc/debug/common.h"
namespace mindspore {
@ -640,7 +641,8 @@ bool IsWeightBoundary(const AnfNodePtr &node) {
std::vector<int64_t> GetReduceAttrAxis(const CNodePtr &cnode) {
if (AnfAlgo::GetInputTensorNum(cnode) != 1 || AnfAlgo::GetOutputTensorNum(cnode) != 1) {
MS_LOG(EXCEPTION) << "The reduce node [" << cnode->DebugString() << "] is not single input or single output.";
MS_LOG(EXCEPTION) << "The reduce node [" << cnode->DebugString()
<< "] is not single input or single output. trace: " << trace::DumpSourceLines(cnode);
}
std::vector<int64_t> axis;
auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(cnode, 0);

View File

@ -20,6 +20,7 @@
#include "backend/session/anf_runtime_algorithm.h"
#include "utils/ms_context.h"
#include "utils/utils.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace {
@ -272,7 +273,8 @@ void HcomUtil::GetHcomGroup(NotNull<const AnfNodePtr &> anf_node, NotNull<std::s
if (attr != nullptr) {
*group = GetValue<std::string>(attr);
} else {
MS_LOG(EXCEPTION) << "Get Hcom Group Attr of Op:" << anf_node->fullname_with_scope() << " failed";
MS_LOG(EXCEPTION) << "Get Hcom Group Attr of Op:" << anf_node->fullname_with_scope()
<< " failed. trace: " << trace::DumpSourceLines(anf_node);
}
}
} // namespace mindspore

View File

@ -16,6 +16,7 @@
#include "backend/kernel_compiler/host/dynamic_broadcast_gradient_args_kernel.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace kernel {
@ -124,10 +125,12 @@ std::vector<int64_t> GetInputShape(const CNodePtr &cnode, size_t index) {
auto shape_x = AnfAlgo::GetPrevNodeOutputInferShape(cnode, index);
auto type_x = AnfAlgo::GetOutputInferDataType(cnode, index);
if (type_x != TypeId::kNumberTypeInt64) {
MS_LOG(EXCEPTION) << "Input x type must be int64, but :" << type_x;
MS_LOG(EXCEPTION) << "Input x type must be int64, but got " << type_x
<< ". trace: " << trace::DumpSourceLines(cnode);
}
if (shape_x.size() != 1) {
MS_LOG(EXCEPTION) << "Input" << index << " must be [1-D], but " << shape_x.size() << "-D.";
MS_LOG(EXCEPTION) << "Input" << index << " must be [1-D], but got " << shape_x.size()
<< "-D. trace: " << trace::DumpSourceLines(cnode);
}
size_t x_num = shape_x[0];
@ -184,7 +187,8 @@ void DynamicBroadcastGradientArgsKernel::Execute() {
MS_EXCEPTION_IF_NULL(cnode);
auto input_num = AnfAlgo::GetInputTensorNum(cnode);
if (input_num != kInputNum) {
MS_LOG(EXCEPTION) << "Invalid Input Num:" << input_num;
MS_LOG(EXCEPTION) << "Invalid input num, should be " << kInputNum << ", but got " << input_num
<< ". trace: " << trace::DumpSourceLines(cnode);
}
std::vector<std::vector<int64_t>> input_shapes(kInputNum);

View File

@ -19,6 +19,7 @@
#include <functional>
#include "backend/session/anf_runtime_algorithm.h"
#include "abstract/utils.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace kernel {
@ -29,12 +30,14 @@ std::vector<int64_t> GetInputValue(const CNodePtr &cnode, size_t index) {
auto address_x = AnfAlgo::GetPrevNodeMutableOutputAddr(cnode, index);
auto shape_x = AnfAlgo::GetPrevNodeOutputInferShape(cnode, index);
if (shape_x.size() != 1) {
MS_LOG(EXCEPTION) << "Input" << index << " must be [1-D], but " << shape_x.size() << "-D.";
MS_LOG(EXCEPTION) << "Input" << index << " must be [1-D], but got " << shape_x.size()
<< "-D. trace: " << trace::DumpSourceLines(cnode);
}
session::KernelWithIndex kernel_with_index = AnfAlgo::GetPrevNodeOutput(cnode, index);
auto type_x = AnfAlgo::GetOutputInferDataType(kernel_with_index.first, kernel_with_index.second);
if (type_x != TypeId::kNumberTypeInt64 && type_x != TypeId::kNumberTypeInt32) {
MS_LOG(EXCEPTION) << "Input x type must be int64 or int32, but :" << TypeIdToType(type_x);
MS_LOG(EXCEPTION) << "Input x type must be int64 or int32, but got " << TypeIdToType(type_x)
<< ". trace: " << trace::DumpSourceLines(cnode);
}
size_t x_num = shape_x[0];
@ -67,7 +70,8 @@ void DynamicReshapeKernel::Execute() {
MS_EXCEPTION_IF_NULL(cnode);
auto input_num = AnfAlgo::GetInputTensorNum(cnode);
if (input_num != kInputNum) {
MS_LOG(EXCEPTION) << "Invalid Input Num:" << input_num;
MS_LOG(EXCEPTION) << "Invalid input num, should be " << kInputNum << ", but got " << input_num
<< ". trace: " << trace::DumpSourceLines(cnode);
}
auto address_x = AnfAlgo::GetPrevNodeMutableOutputAddr(cnode, 0);

View File

@ -16,6 +16,7 @@
#include "backend/kernel_compiler/host/dynamic_shape_kernel.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace kernel {
@ -25,7 +26,8 @@ void DynamicShapeKernel::Execute() {
MS_EXCEPTION_IF_NULL(cnode);
auto input_num = AnfAlgo::GetInputTensorNum(cnode);
if (input_num != 1) {
MS_LOG(EXCEPTION) << "Invalid Input Num:" << input_num;
MS_LOG(EXCEPTION) << "Op [" << cnode->DebugString() << "] has invalid input num, should be 1, but got " << input_num
<< ". trace: " << trace::DumpSourceLines(cnode);
}
auto prev_output_shape = AnfAlgo::GetPrevNodeOutputInferShape(cnode, 0);

View File

@ -52,7 +52,7 @@ bool AssignKernel::Launch(const std::vector<AddressPtr> &inputs, const std::vect
std::vector<TaskInfoPtr> AssignKernel::GenTask(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &, uint32_t stream_id) {
if (inputs.size() != 2) {
MS_LOG(EXCEPTION) << "inputs size is not two, got " << inputs.size();
MS_LOG(EXCEPTION) << "Inputs size should be 2, but got " << inputs.size();
}
stream_id_ = stream_id;

View File

@ -194,7 +194,7 @@ bool TbeAdapter::IsPlaceHolderInput(const AnfNodePtr &node, const OpIOInfoPtr &i
auto none_index = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "placeholder_index");
return find(none_index.begin(), none_index.end(), input_ptr->index()) != none_index.end();
} else {
MS_LOG(EXCEPTION) << "Cnode: " << cnode_name << "doesn't has attribute placeholder_index.";
MS_LOG(EXCEPTION) << "Cnode: " << cnode_name << " doesn't has attribute placeholder_index.";
}
}
void TbeAdapter::CastAttrJsonPrePass(const AnfNodePtr &anf_node, std::vector<OpAttrPtr> *op_info_attrs,
@ -235,7 +235,7 @@ void TbeAdapter::CastAttrJsonPost(const AnfNodePtr &anf_node, nlohmann::json *at
if (iter != dst_type_map.end()) {
attrs_json->at(0)[kJValue] = iter->second;
} else {
MS_LOG(EXCEPTION) << "Invalid type:" << type_id;
MS_LOG(EXCEPTION) << "Invalid type: " << type_id;
}
}
void TbeAdapter::LayerNormAttrJsonPost(const AnfNodePtr &anf_node, nlohmann::json *attrs_json) {

View File

@ -15,6 +15,7 @@
*/
#include "backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_broadcast_selecter.h"
#include "utils/utils.h"
#include "utils/trace_base.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/kernel_compiler/tbe/tbe_kernel_select/common_utils.h"
@ -37,11 +38,13 @@ bool TbeKernelBroadCastSelecter::GetShapeInfo(SupportFormat *support_format) {
auto dynamic_size_vec = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(cnode_ptr_, kAttrDynInputSizes);
constexpr int64_t DYNAMIC_INPUT_NUM = 2;
if (dynamic_size_vec.empty()) {
MS_LOG(EXCEPTION) << "Node [" << AnfAlgo::GetCNodeName(cnode_ptr_) << "]'s attr [dyn_input_sizes] is empty.";
MS_LOG(EXCEPTION) << "Node [" << AnfAlgo::GetCNodeName(cnode_ptr_)
<< "]'s attr [dyn_input_sizes] is empty. trace: " << trace::DumpSourceLines(cnode_ptr_);
}
if (dynamic_size_vec[0] < DYNAMIC_INPUT_NUM) {
MS_LOG(EXCEPTION) << "Node [" << AnfAlgo::GetCNodeName(cnode_ptr_)
<< "]'s attr [dyn_input_sizes] value less than " << DYNAMIC_INPUT_NUM;
<< "]'s attr [dyn_input_sizes] value less than " << DYNAMIC_INPUT_NUM
<< ". trace: " << trace::DumpSourceLines(cnode_ptr_);
}
auto dynamic_input_shape0_ = AnfAlgo::GetPrevNodeOutputInferShape(cnode_ptr_, kInputIndex_0);
PadScalarShape(&dynamic_input_shape0_);

View File

@ -19,6 +19,7 @@
#include <vector>
#include "backend/session/anf_runtime_algorithm.h"
#include "frontend/parallel/ops_info/ops_utils.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace kernel {
@ -51,7 +52,7 @@ static bool CheckStridedSlice(const CNodePtr &cnode) {
MS_EXCEPTION_IF_NULL(input_value);
if (!input_value->isa<Tensor>()) {
MS_LOG(EXCEPTION) << "For 'StrideSlice', the first input value should be a tensor, but got "
<< input_value->ToString();
<< input_value->ToString() << ". trace: " << trace::DumpSourceLines(cnode);
}
input_dims = SizeToInt(input_value->cast<TensorPtr>()->shape().size());
} else if (input->isa<CNode>() || input->isa<Parameter>()) {
@ -59,12 +60,12 @@ static bool CheckStridedSlice(const CNodePtr &cnode) {
MS_EXCEPTION_IF_NULL(input_abstract);
if (!input_abstract->isa<AbstractTensor>()) {
MS_LOG(EXCEPTION) << "For 'StrideSlice', the first input value should be a tensor, but got "
<< input_abstract->ToString();
<< input_abstract->ToString() << ". trace: " << trace::DumpSourceLines(cnode);
}
input_dims = SizeToInt(input_abstract->cast<AbstractTensorPtr>()->shape()->shape().size());
} else {
MS_LOG(EXCEPTION) << "For 'StrideSlice', the first input node should be a 'ValueNode' or a 'CNode', but got "
<< input->ToString();
<< input->ToString() << ". trace: " << trace::DumpSourceLines(cnode);
}
const int base_number = 2;
if (shrink_axis_mask >= std::pow<int, int>(base_number, input_dims - 1) && input_dims > 1) {
@ -79,7 +80,7 @@ static bool CheckTopK(const CNodePtr &cnode) {
auto sorted = AnfAlgo::GetNodeAttr<bool>(cnode, kAttrSorted);
return sorted;
}
MS_LOG(EXCEPTION) << "For 'TopK', it should be have attribute 'sorted'.";
MS_LOG(EXCEPTION) << "For 'TopK', it should be have attribute 'sorted'. trace: " << trace::DumpSourceLines(cnode);
}
bool TbePropertyChecker::CheckTbeProperties(const mindspore::CNodePtr &cnode) {

View File

@ -46,7 +46,7 @@ bool IsPartOutputsOfHcclOp(const AnfNodePtr &node, const CNodePtr &cur_hccl, con
auto &node_users = manager->node_users();
auto iter = node_users.find(prev_hccl_op);
if (iter == node_users.end()) {
MS_LOG(EXCEPTION) << "node has no output in manager"
MS_LOG(EXCEPTION) << "Node has no output in manager"
<< " trace: " << trace::DumpSourceLines(cur_hccl);
}
for (const auto &node_index : iter->second) {

View File

@ -21,6 +21,7 @@
#include <map>
#include "utils/utils.h"
#include "utils/trace_base.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/kernel_compiler/common_utils.h"
@ -40,11 +41,12 @@ const std::map<std::string, ConvertFunction> kReduceConvertMap = {{kOpFormat_FRA
void SafeCheckFunction(const CNodePtr &cnode, const std::vector<int64_t> &reduce_axis) {
MS_EXCEPTION_IF_NULL(cnode);
if (reduce_axis.empty()) {
MS_LOG(EXCEPTION) << "The node " << cnode->DebugString() << "'s reduce axis got a empty vector";
MS_LOG(EXCEPTION) << "The node " << cnode->DebugString()
<< "'s reduce axis got a empty vector, trace: " << trace::DumpSourceLines(cnode);
}
if (AnfAlgo::GetInputTensorNum(cnode) != 1 || AnfAlgo::GetOutputTensorNum(cnode) != 1) {
MS_LOG(EXCEPTION) << "the kind of reduce node [" << cnode->DebugString()
<< "] is not single input or single output.";
MS_LOG(EXCEPTION) << "The kind of reduce node [" << cnode->DebugString()
<< "] is not single input or single output. trace: " << trace::DumpSourceLines(cnode);
}
for (auto elem : reduce_axis) {
if (elem > kAxisDim) {

View File

@ -47,8 +47,8 @@ const AnfNodePtr ConvertUnSupportNodeToAICPU::Process(const mindspore::FuncGraph
AnfAlgo::SetSelectKernelBuildInfo(builder->Build(), node.get());
AnfAlgo::SetNodeAttr(kAttrIsAICPUKernel, MakeValue(true), node);
} else {
MS_LOG(EXCEPTION) << " kernel " << kernel_builder_info->ToString() << "is not supported in AiCPU & AiCore : node ["
<< node->DebugString() << "]" << trace::DumpSourceLines(node);
MS_LOG(EXCEPTION) << "Kernel " << kernel_builder_info->ToString() << "is not supported in AiCPU & AiCore : node ["
<< node->DebugString() << "]. trace:" << trace::DumpSourceLines(node);
}
return nullptr;
}

View File

@ -42,7 +42,7 @@ session::KernelWithIndex DealRefAndSpiltUnSupportedTransdata::FindRefOriginNode(
if (ref_infos.count(cur_out_index) != 0) {
auto in_index = ref_infos.at(cur_out_index);
if (in_index > cnode->inputs().size()) {
MS_LOG(EXCEPTION) << "ref op has wrong inputs: op inputs num is " << cnode->inputs().size()
MS_LOG(EXCEPTION) << "Ref op has wrong inputs: op inputs num is " << cnode->inputs().size()
<< ", ref info is " << cur_out_index;
}
AnfNodePtr next_node = cnode->input(in_index + 1);
@ -84,7 +84,7 @@ void DealRefAndSpiltUnSupportedTransdata::AddRefPairToKernelGraph(const FuncGrap
auto final_ref = (final_node == get_item ? cnode : final_node);
session::AnfWithOutIndex final_pair = std::make_pair(final_ref, final_index);
if (kernel_graph->IsInRefOutputMap(final_pair)) {
MS_LOG(EXCEPTION) << "ref_pair is already in ref map, node is " << final_ref->DebugString() << ", index is "
MS_LOG(EXCEPTION) << "Ref_pair is already in ref map, node is " << final_ref->DebugString() << ", index is "
<< final_index;
}
MS_LOG(DEBUG) << "Add Ref pair, final {node ptr " << final_pair.first.get() << " , info is "
@ -214,7 +214,7 @@ CNodePtr DealRefAndSpiltUnSupportedTransdata::DealRefSingleOutput(
}
auto ref_info = *(ref_infos.begin());
if (ref_info.second > cnode->inputs().size()) {
MS_LOG(EXCEPTION) << "ref op has wrong inputs: op inputs num is " << cnode->inputs().size() << ", ref info is "
MS_LOG(EXCEPTION) << "Ref op has wrong inputs: op inputs num is " << cnode->inputs().size() << ", ref info is "
<< ref_info.second;
}
return AddAdditionalToRefOutput(func_graph, cnode, ref_info.first, ref_info.second, nullptr);

View File

@ -23,6 +23,7 @@
#include "backend/session/anf_runtime_algorithm.h"
#include "utils/utils.h"
#include "utils/trace_base.h"
#include "base/core_ops.h"
namespace mindspore {
@ -180,7 +181,8 @@ AnfNodePtr MergeCastToNextOp(const FuncGraphPtr &graph, const CNodePtr &node, co
AnfAlgo::SetSelectKernelBuildInfo(*alternative_kernel_info, next_cnode.get());
if (AnfAlgo::GetInputTensorNum(node) < kCastInputTensorNum) {
MS_LOG(EXCEPTION) << "Op[" << node->DebugString() << "] has wrong input num:" << AnfAlgo::GetInputTensorNum(node)
<< ", should be not less than " << kCastInputTensorNum;
<< ", should be not less than " << kCastInputTensorNum
<< ". trace: " << trace::DumpSourceLines(node);
}
return node->input(1);
}

View File

@ -58,8 +58,9 @@ void BatchNormGradSplit::CreateOutputsOfReduceGrad(const FuncGraphPtr &graph, co
const auto &bn_grad_inputs = bn_grad_node->inputs();
CheckCNodeInputSize(bn_grad_node, kBNGradInputTensorNum);
if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) {
MS_LOG(EXCEPTION) << "BNTrainingReduceGrad_outputs has wrong size"
<< " trace: " << trace::DumpSourceLines(bn_grad_node);
MS_LOG(EXCEPTION) << "Outputs of BNTrainingReduceGrad has wrong size, should be " << kBNTrainingUpdateGradOutputNum
<< ", but got " << bn_update_grad_outputs.size()
<< ". trace: " << trace::DumpSourceLines(bn_grad_node);
}
std::vector<AnfNodePtr> bn_reduce_grad_inputs = {
NewValueNode(std::make_shared<Primitive>(kBNTrainingReduceGradOpName)),
@ -110,15 +111,15 @@ const AnfNodePtr BatchNormGradSplit::Process(const FuncGraphPtr &func_graph, con
std::vector<AnfNodePtr> bn_update_grad_outputs;
CreateOutputsOfUpdateGrad(func_graph, cnode, &bn_update_grad_outputs);
if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) {
MS_LOG(EXCEPTION) << "bn_update_grad_outputs has wrong size"
<< " trace: " << trace::DumpSourceLines(node);
MS_LOG(EXCEPTION) << "Outputs of bn_update_grad has wrong size, should be " << kBNTrainingUpdateGradOutputNum
<< ", but got " << bn_update_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(node);
}
std::vector<AnfNodePtr> bn_reduce_grad_outputs;
CreateOutputsOfReduceGrad(func_graph, cnode, bn_update_grad_outputs, &bn_reduce_grad_outputs);
if (bn_reduce_grad_outputs.size() != kSingleOutputNum) {
MS_LOG(EXCEPTION) << "bn_reduce_grad_outputs has wrong size"
<< " trace: " << trace::DumpSourceLines(node);
MS_LOG(EXCEPTION) << "Outputs of bn_reduce_grad has wrong size, should be " << kSingleOutputNum << ", but got "
<< bn_reduce_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(node);
}
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), bn_reduce_grad_outputs[0],

View File

@ -58,7 +58,9 @@ void BnGradSplit::CreateOutputsOfReduceGrad(const FuncGraphPtr &graph, const CNo
auto bn_grad_inputs = bn_grad_node->inputs();
CheckCNodeInputSize(bn_grad_node, kBNGradInputTensorNum);
if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) {
MS_LOG(EXCEPTION) << "bn_update_grad_outputs has wrong size";
MS_LOG(EXCEPTION) << "Outputs of bn_update_grad has wrong size, should be " << kBNTrainingUpdateGradOutputNum
<< ", but got " << bn_update_grad_outputs.size()
<< ". trace: " << trace::DumpSourceLines(bn_grad_node);
}
std::vector<AnfNodePtr> bn_reduce_grad_inputs = {
NewValueNode(std::make_shared<Primitive>(kBNTrainingReduceGradOpName)),
@ -87,15 +89,15 @@ CNodePtr BnGradSplit::BNGradSplitForTBE(const FuncGraphPtr &func_graph, const CN
std::vector<AnfNodePtr> bn_update_grad_outputs;
CreateOutputsOfUpdateGrad(func_graph, cnode, &bn_update_grad_outputs);
if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) {
MS_LOG(EXCEPTION) << "bn_update_grad_outputs has wrong size"
<< " trace: " << trace::DumpSourceLines(cnode);
MS_LOG(EXCEPTION) << "Outputs of bn_update_grad has wrong size, should be " << kBNTrainingUpdateGradOutputNum
<< ", but got " << bn_update_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(cnode);
}
std::vector<AnfNodePtr> bn_reduce_grad_outputs;
CreateOutputsOfReduceGrad(func_graph, cnode, bn_update_grad_outputs, &bn_reduce_grad_outputs);
if (bn_reduce_grad_outputs.size() != 1) {
MS_LOG(EXCEPTION) << "bn_reduce_grad_outputs has wrong size"
<< " trace: " << trace::DumpSourceLines(cnode);
MS_LOG(EXCEPTION) << "Outputs of bn_reduce_grad has wrong size, should be " << 1 << ", but got "
<< bn_reduce_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(cnode);
}
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), bn_reduce_grad_outputs[0],
@ -112,8 +114,8 @@ CNodePtr SyncBnGradSplit::SyncBNGradSplitForTBE(const FuncGraphPtr &func_graph,
CreateOutputsOfUpdateGrad(func_graph, cnode, &bn_update_grad_outputs);
if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) {
MS_LOG(EXCEPTION) << "bn_update_grad_outputs has wrong size"
<< " trace: " << trace::DumpSourceLines(cnode);
MS_LOG(EXCEPTION) << "Outputs of bn_update_grad has wrong size, should be " << kBNTrainingUpdateGradOutputNum
<< ", but got " << bn_update_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(cnode);
}
std::vector<AnfNodePtr> allreduce_mul_outputs;
@ -125,8 +127,8 @@ CNodePtr SyncBnGradSplit::SyncBNGradSplitForTBE(const FuncGraphPtr &func_graph,
std::vector<AnfNodePtr> bn_reduce_grad_outputs;
CreateOutputsOfReduceGrad(func_graph, cnode, allreduce_mul_outputs, &bn_reduce_grad_outputs);
if (bn_reduce_grad_outputs.size() != 1) {
MS_LOG(EXCEPTION) << "bn_reduce_grad_outputs has wrong size"
<< " trace: " << trace::DumpSourceLines(cnode);
MS_LOG(EXCEPTION) << "Outputs of bn_reduce_grad has wrong size, should be " << 1 << ", but got "
<< bn_reduce_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(cnode);
}
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), bn_reduce_grad_outputs[0],

View File

@ -161,11 +161,13 @@ AnfNodePtr CreateValueNodeOfDeviceNumReciprocal(const FuncGraphPtr &graph, const
MS_EXCEPTION_IF_NULL(graph);
MS_EXCEPTION_IF_NULL(sync_bn_cnode);
if (!AnfAlgo::HasNodeAttr(kDeviceNum, sync_bn_cnode)) {
MS_LOG(EXCEPTION) << "The node [" << sync_bn_cnode->DebugString() << "] does not have attr device_num.";
MS_LOG(EXCEPTION) << "The node [" << sync_bn_cnode->DebugString()
<< "] does not have attr device_num. trace: " << trace::DumpSourceLines(sync_bn_cnode);
}
auto device_num = AnfAlgo::GetNodeAttr<int64_t>(sync_bn_cnode, kDeviceNum);
if (device_num == 0) {
MS_LOG(EXCEPTION) << "The device_num attr of node [" << sync_bn_cnode->DebugString() << "] should not be 0";
MS_LOG(EXCEPTION) << "The device_num attr of node [" << sync_bn_cnode->DebugString()
<< "] should not be 0. trace: " << trace::DumpSourceLines(sync_bn_cnode);
}
MS_LOG(INFO) << "device_num value: " << device_num;
const float device_num_reciprocal = 1.0 / device_num;
@ -224,7 +226,8 @@ AnfNodePtr CreateAllReduceAndMul(const FuncGraphPtr &graph, const AnfNodePtr &al
auto sync_bn_opname = sync_bn_cnode->fullname_with_scope();
auto opid_pos = sync_bn_opname.rfind("-op");
if (opid_pos == std::string::npos || opid_pos + kPositionOffset >= sync_bn_opname.size()) {
MS_LOG(EXCEPTION) << "op[" << sync_bn_cnode->DebugString() << "] has no opid.";
MS_LOG(EXCEPTION) << "Op[" << sync_bn_cnode->DebugString()
<< "] has no opid. trace: " << trace::DumpSourceLines(sync_bn_cnode);
return nullptr;
}
int64_t opid = std::stol(sync_bn_opname.substr(opid_pos + kPositionOffset));

View File

@ -20,6 +20,7 @@
#include "backend/session/anf_runtime_algorithm.h"
#include "ir/primitive.h"
#include "utils/utils.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace opt {
@ -64,16 +65,19 @@ CNodePtr GatherV2DsFission::CreatePad(const FuncGraphPtr &graph, const CNodePtr
auto param_abstract_shape = origin_node->input(1)->Shape();
MS_EXCEPTION_IF_NULL(param_abstract_shape);
if (!param_abstract_shape->isa<abstract::Shape>()) {
MS_LOG(EXCEPTION) << "The node [" << origin_node->DebugString() << "]'s first input has wrong shape type.";
MS_LOG(EXCEPTION) << "The node [" << origin_node->DebugString()
<< "]'s first input has wrong shape type. trace: " << trace::DumpSourceLines(origin_node);
}
auto param_dyn_shape = param_abstract_shape->cast<abstract::ShapePtr>();
ShapeVector shape(param_dyn_shape->shape());
if (shape.empty()) {
MS_LOG(EXCEPTION) << "The shape of node [" << origin_node->DebugString() << "]'s first input is empty.";
MS_LOG(EXCEPTION) << "The shape of node [" << origin_node->DebugString()
<< "]'s first input is empty. trace: " << trace::DumpSourceLines(origin_node);
}
if (shape[shape.size() - 1] == -1) {
MS_LOG(EXCEPTION) << "The node [" << origin_node->DebugString()
<< "]'s first input should not be dynamic, shape:" << shape;
<< "]'s first input should not be dynamic, but got shape:" << shape
<< ". trace: " << trace::DumpSourceLines(origin_node);
}
shape[shape.size() - 1] = SizeToLong(pad_dim_size);
auto type_id = AnfAlgo::GetPrevNodeOutputInferDataType(origin_node, 0);
@ -113,7 +117,8 @@ CNodePtr GatherV2DsFission::CreateGatherV2Ds(const FuncGraphPtr &graph, const CN
MS_EXCEPTION_IF_NULL(origin_node);
MS_EXCEPTION_IF_NULL(pad);
if (origin_node->size() != kGatherInputNum) {
MS_LOG(EXCEPTION) << "In dynamic shape scene, gatherv2 should have 3 inputs";
MS_LOG(EXCEPTION) << "In dynamic shape scene, gatherv2 should have 3 inputs, but got " << origin_node->size()
<< ". trace: " << trace::DumpSourceLines(origin_node);
}
std::vector<AnfNodePtr> gatherv2_inputs = {NewValueNode(std::make_shared<Primitive>(prim::kPrimGather->name())), pad,
origin_node->input(kGatherInputIndicesIndex),

View File

@ -20,6 +20,7 @@
#include "backend/session/anf_runtime_algorithm.h"
#include "frontend/optimizer/opt.h"
#include "backend/optimizer/common/helper.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace opt {
@ -38,7 +39,8 @@ tensor::TensorPtr CreateTensor(const AnfNodePtr &node) {
MS_LOG(ERROR) << "MaxPool3DGradGrad only support NCDHW format, but got " << data_format;
}
if (ksize.size() != kKernelSizeNum) {
MS_LOG(EXCEPTION) << "kernel_size of MaxPool3DGradGrad must be five, but got :" << ksize;
MS_LOG(EXCEPTION) << "kernel_size of MaxPool3DGradGrad must be five, but got " << ksize
<< ". trace: " << trace::DumpSourceLines(node);
}
int64_t d = ksize[kDim2];
int64_t h = ksize[kDim3];

View File

@ -21,6 +21,7 @@
#include "backend/optimizer/common/helper.h"
#include "backend/kernel_compiler/kernel_build_info.h"
#include "utils/utils.h"
#include "utils/trace_base.h"
#include "backend/session/kernel_graph.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "runtime/device/kernel_info.h"
@ -70,21 +71,25 @@ std::vector<ValueNodePtr> ConvertAttrToValueNode(const std::shared_ptr<kernel::O
std::vector<ValueNodePtr> ret = {};
auto attrs = op_info->attrs_ptr();
if (attrs.empty()) {
MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() << ") doesn't have any attrs.";
MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString()
<< ") doesn't have any attrs. trace: " << trace::DumpSourceLines(cnode);
}
for (const auto &attr : attrs) {
if (!AnfAlgo::HasNodeAttr(attr->name(), cnode)) {
MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() << ") doesn't have attr(" << attr->name() << ")";
MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() << ") doesn't have attr(" << attr->name()
<< "). trace: " << trace::DumpSourceLines(cnode);
}
auto attr_value = AnfAlgo::GetNodeAttr<int64_t>(cnode, attr->name());
auto value_node = CreateValueNode(attr_value);
if (value_node == nullptr) {
MS_LOG(EXCEPTION) << "Create value node error, node: " << cnode->DebugString() << ", seed value: " << attr_value;
MS_LOG(EXCEPTION) << "Create value node error, node: " << cnode->DebugString() << ", seed value: " << attr_value
<< ". trace: " << trace::DumpSourceLines(cnode);
}
ret.emplace_back(value_node);
}
if (ret.empty()) {
MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() << ") doesn't have any matched attrs.";
MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString()
<< ") doesn't have any matched attrs. trace: " << trace::DumpSourceLines(cnode);
}
return ret;
}

View File

@ -35,10 +35,11 @@ size_t GetSmallSplitSize(const AnfNodePtr &split_node, int64_t split_dim, int64_
split_dim += SizeToLong(input_shape.size());
}
if (LongToSize(split_dim) >= input_shape.size()) {
MS_LOG(EXCEPTION) << "The split_dim value should be less than the shape size of input 0";
MS_LOG(EXCEPTION) << "The split_dim value should be less than the shape size of input 0. trace: "
<< trace::DumpSourceLines(split_node);
}
if (num_split == 0) {
MS_LOG(EXCEPTION) << "Divisor 'num_split' should not be 0.";
MS_LOG(EXCEPTION) << "Divisor 'num_split' should not be 0. trace: " << trace::DumpSourceLines(split_node);
}
return input_shape[LongToSize(split_dim)] / LongToSize(num_split);
}
@ -92,7 +93,7 @@ void SetAttrAndAbstractForBaseSplitv(const CNodePtr &origin_cnode, const CNodePt
split_dim += SizeToLong(output_shape.size());
}
if (split_dim < 0) {
MS_LOG(EXCEPTION) << "Error split dim: " << split_dim;
MS_LOG(EXCEPTION) << "Error split dim: " << split_dim << ", trace: " << trace::DumpSourceLines(origin_cnode);
}
auto split_dim_l = LongToSize(split_dim);
auto num_split_l = LongToSize(num_split);

View File

@ -23,6 +23,7 @@
#include "backend/optimizer/common/helper.h"
#include "base/core_ops.h"
#include "utils/utils.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace opt {
@ -70,7 +71,7 @@ bool GetKernelSize(const AnfNodePtr &node, int64_t *kd, int64_t *kh, int64_t *kw
*kh = kernel_size[kDim3];
*kw = kernel_size[kDim4];
} else {
MS_LOG(EXCEPTION) << "Unknown kernel size " << kernel_size.size();
MS_LOG(EXCEPTION) << "Unknown kernel size " << kernel_size.size() << ", trace: " << trace::DumpSourceLines(node);
}
return true;
}
@ -83,22 +84,22 @@ bool GetStrideSize(const AnfNodePtr &node, int64_t *sd, int64_t *sh, int64_t *sw
MS_EXCEPTION_IF_NULL(sh);
MS_EXCEPTION_IF_NULL(sw);
if (AnfAlgo::HasNodeAttr("strides", node->cast<CNodePtr>())) {
auto kernel_size = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "strides");
if (kernel_size.size() == 1) {
*sd = kernel_size[kDim0];
*sh = kernel_size[kDim0];
*sw = kernel_size[kDim0];
} else if (kernel_size.size() == kDHWDimNum) {
*sd = kernel_size[kDim0];
*sh = kernel_size[kDim1];
*sw = kernel_size[kDim2];
} else if (kernel_size.size() == kNCDHWDimNum) {
auto stride_size = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "strides");
if (stride_size.size() == 1) {
*sd = stride_size[kDim0];
*sh = stride_size[kDim0];
*sw = stride_size[kDim0];
} else if (stride_size.size() == kDHWDimNum) {
*sd = stride_size[kDim0];
*sh = stride_size[kDim1];
*sw = stride_size[kDim2];
} else if (stride_size.size() == kNCDHWDimNum) {
// NCDHW
*sd = kernel_size[kDim2];
*sh = kernel_size[kDim3];
*sw = kernel_size[kDim4];
*sd = stride_size[kDim2];
*sh = stride_size[kDim3];
*sw = stride_size[kDim4];
} else {
MS_LOG(EXCEPTION) << "Unknown strides size " << kernel_size.size();
MS_LOG(EXCEPTION) << "Unknown strides size " << stride_size.size() << ", trace: " << trace::DumpSourceLines(node);
}
return true;
}
@ -109,7 +110,7 @@ void GetAttrs(const AnfNodePtr &node, std::vector<int64_t> *pad_list, bool *coun
int64_t *divisor_override) {
MS_EXCEPTION_IF_NULL(node);
if (!AnfAlgo::HasNodeAttr("pad_list", node->cast<CNodePtr>())) {
MS_LOG(EXCEPTION) << "AvgPool3D should has attr pad_list";
MS_LOG(EXCEPTION) << "AvgPool3D should has attr pad_list, trace: " << trace::DumpSourceLines(node);
}
*pad_list = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "pad_list");
if (AnfAlgo::HasNodeAttr("count_include_pad", node->cast<CNodePtr>())) {
@ -259,7 +260,9 @@ const AnfNodePtr AvgPool3DFusion::Process(const FuncGraphPtr &func_graph, const
auto dims_in = AnfAlgo::GetPrevNodeOutputInferShape(avg_pool_3d_node, 0);
auto dims_out = AnfAlgo::GetOutputInferShape(avg_pool_3d_node, 0);
if (dims_in.size() < k5DInferDims || dims_out.size() < k5DInferDims) {
MS_LOG(EXCEPTION) << "AvgPool3D's in_out infer shape dims can not be less " << k5DInferDims;
MS_LOG(EXCEPTION) << "AvgPool3D's in_out infer shape dims can not be less " << k5DInferDims
<< ", but got in_shape is " << dims_in.size() << "-D, out_shape is " << dims_out.size()
<< "-D. trace: " << trace::DumpSourceLines(node);
}
auto fn = SizeToLong(dims_in[kDim0]);
auto fc = SizeToLong(dims_in[kDim1]);
@ -274,14 +277,14 @@ const AnfNodePtr AvgPool3DFusion::Process(const FuncGraphPtr &func_graph, const
int64_t kh;
int64_t kw;
if (!GetKernelSize(avg_pool_3d_node, &kd, &kh, &kw)) {
MS_LOG(EXCEPTION) << "GetK kernel size failed";
MS_LOG(EXCEPTION) << "Get kernel size failed, trace: " << trace::DumpSourceLines(node);
}
// strides
int64_t sd;
int64_t sh;
int64_t sw;
if (!GetStrideSize(avg_pool_3d_node, &sd, &sh, &sw)) {
MS_LOG(EXCEPTION) << "GetK stride size failed";
MS_LOG(EXCEPTION) << "Get stride size failed, trace: " << trace::DumpSourceLines(node);
}
std::vector<int64_t> pad_list;
bool count_include_pad = false;

View File

@ -24,6 +24,7 @@
#include "backend/optimizer/common/helper.h"
#include "base/core_ops.h"
#include "utils/utils.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace opt {
@ -43,22 +44,22 @@ void GetAttrs(const AnfNodePtr &node, std::vector<int64_t> *kernel_size, std::ve
MS_EXCEPTION_IF_NULL(node);
// attr kernel size
if (!AnfAlgo::HasNodeAttr("kernel_size", node->cast<CNodePtr>())) {
MS_LOG(EXCEPTION) << "AvgPool3D should has attr kernel_size";
MS_LOG(EXCEPTION) << "AvgPool3D should has attr kernel_size, trace: " << trace::DumpSourceLines(node);
}
*kernel_size = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "kernel_size");
// attr strides
if (!AnfAlgo::HasNodeAttr("strides", node->cast<CNodePtr>())) {
MS_LOG(EXCEPTION) << "AvgPool3D should has attr strides";
MS_LOG(EXCEPTION) << "AvgPool3D should has attr strides, trace: " << trace::DumpSourceLines(node);
}
*strides = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "strides");
// sttr pad_list
if (!AnfAlgo::HasNodeAttr("pad_list", node->cast<CNodePtr>())) {
MS_LOG(EXCEPTION) << "AvgPool3D should has attr pad_list";
MS_LOG(EXCEPTION) << "AvgPool3D should has attr pad_list, trace: " << trace::DumpSourceLines(node);
}
*pad_list = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "pad_list");
// attr origin input shape
if (!AnfAlgo::HasNodeAttr("origin_input_shape", node->cast<CNodePtr>())) {
MS_LOG(EXCEPTION) << "AvgPool3D should has attr origin_input_shape";
MS_LOG(EXCEPTION) << "AvgPool3D should has attr origin_input_shape, trace: " << trace::DumpSourceLines(node);
}
*origin_input_shape = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "origin_input_shape");
// attr count include pad

View File

@ -20,6 +20,7 @@
#include "backend/session/anf_runtime_algorithm.h"
#include "ir/primitive.h"
#include "utils/utils.h"
#include "utils/trace_base.h"
#include "base/core_ops.h"
#include "abstract/abstract_value.h"
#include "backend/optimizer/common/helper.h"
@ -38,7 +39,9 @@ bool CheckSupported(const CNodePtr &conv_back_filter) {
auto x_shape = AnfAlgo::GetPrevNodeOutputInferShape(conv_back_filter, 1);
auto out_shape = AnfAlgo::GetOutputInferShape(conv_back_filter, 0);
if (y_shape.size() != kNCHWShapeSize || x_shape.size() != kNCHWShapeSize || out_shape.size() != kNCHWShapeSize) {
MS_LOG(EXCEPTION) << "The dim of Conv2dBackpropFilter's input and output should be 4";
MS_LOG(EXCEPTION) << "The dim of Conv2dBackpropFilter's input and output should be 4, but got y_shape is "
<< y_shape.size() << "-D, x_shape is " << x_shape.size() << "-D, out_shape is "
<< out_shape.size() << "-D. trace: " << trace::DumpSourceLines(conv_back_filter);
}
const std::set<size_t> kSupportedBatchSize = {32, 256};
if (kSupportedBatchSize.find(x_shape[0]) == kSupportedBatchSize.end()) {

View File

@ -17,6 +17,7 @@
#include "backend/optimizer/ascend/mindir/all_to_all_unify_mindir.h"
#include <vector>
#include <string>
#include "utils/trace_base.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "runtime/hccl_adapter/hccl_adapter.h"
#include "backend/optimizer/common/helper.h"
@ -37,7 +38,8 @@ void ChangePrimitiveToAllToAllV(const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(neighbor_exchange);
if (neighbor_exchange->size() == kCNodePrimitiveIdx) {
MS_LOG(EXCEPTION) << "Invalid cnode " << node->DebugString() << " input size " << neighbor_exchange->size();
MS_LOG(EXCEPTION) << "Inputs should not be empty for cnode " << node->DebugString()
<< ". trace: " << trace::DumpSourceLines(neighbor_exchange);
}
auto prim = GetValueNode<PrimitivePtr>(neighbor_exchange->input(kCNodePrimitiveIdx));
@ -62,7 +64,8 @@ CNodePtr AllToAllUnifyMindIR::CreateSplitNode(const FuncGraphPtr &graph, const C
int64_t split_dim = AnfAlgo::GetNodeAttr<int64_t>(all_to_all, kAttrSplitDim);
if (all_to_all->size() <= kAllToAllInputIdx) {
MS_LOG(EXCEPTION) << "Invalid cnode " << all_to_all->DebugString() << " input size " << all_to_all->size();
MS_LOG(EXCEPTION) << "Inputs should not be empty for cnode " << all_to_all->DebugString()
<< ". trace: " << trace::DumpSourceLines(all_to_all);
}
auto all_to_all_input = all_to_all->input(kAllToAllInputIdx);
std::vector<AnfNodePtr> split_input = {NewValueNode(std::make_shared<Primitive>(prim::kPrimSplitV->name())),
@ -73,11 +76,12 @@ CNodePtr AllToAllUnifyMindIR::CreateSplitNode(const FuncGraphPtr &graph, const C
auto shape = AnfAlgo::GetOutputInferShape(all_to_all_input, 0);
split_dim = NormalizeDim(shape, split_dim);
if (SizeToLong(shape.size()) <= split_dim) {
MS_LOG(EXCEPTION) << "Invalid split dim " << split_dim << " is over the shape size " << shape.size();
MS_LOG(EXCEPTION) << "Invalid split dim " << split_dim << " is over the shape size " << shape.size()
<< ". trace: " << trace::DumpSourceLines(all_to_all);
}
if (split_count == 0 || shape[LongToSize(split_dim)] % static_cast<size_t>(split_count) != 0) {
MS_LOG(EXCEPTION) << "Invalid split count " << split_count << " cannot be divisible by shape[" << split_dim
<< "] = " << shape[LongToSize(split_dim)];
<< "] = " << shape[LongToSize(split_dim)] << ". trace: " << trace::DumpSourceLines(all_to_all);
}
shape[LongToSize(split_dim)] /= static_cast<size_t>(split_count);
std::vector<TypeId> dtypes(split_count, dtype);
@ -101,7 +105,8 @@ CNodePtr AllToAllUnifyMindIR::CreateAllToAllvNode(const FuncGraphPtr &graph, con
std::vector<AnfNodePtr> split_outputs;
CreateMultipleOutputsOfAnfNode(graph, split, static_cast<size_t>(split_count), &split_outputs);
if (split_outputs.empty()) {
MS_LOG(EXCEPTION) << "The node " << split->DebugString() << " should have at least one output, but got 0.";
MS_LOG(EXCEPTION) << "The node " << split->DebugString()
<< " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(split);
}
std::vector<AnfNodePtr> all_to_all_v_input = {NewValueNode(std::make_shared<Primitive>(kAllToAllVOpName))};
(void)all_to_all_v_input.insert(all_to_all_v_input.end(), split_outputs.begin(), split_outputs.end());
@ -135,7 +140,8 @@ CNodePtr AllToAllUnifyMindIR::CreateConcatNode(const FuncGraphPtr &graph, const
std::vector<AnfNodePtr> all_to_all_v_outputs;
CreateMultipleOutputsOfAnfNode(graph, all_to_all_v, static_cast<size_t>(split_count), &all_to_all_v_outputs);
if (all_to_all_v_outputs.empty()) {
MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString() << " should have at least one output, but got 0.";
MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString()
<< " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(all_to_all_v);
}
std::vector<AnfNodePtr> concat_input = {NewValueNode(std::make_shared<Primitive>(kConcatOpName))};
(void)concat_input.insert(concat_input.end(), all_to_all_v_outputs.begin(), all_to_all_v_outputs.end());
@ -144,7 +150,8 @@ CNodePtr AllToAllUnifyMindIR::CreateConcatNode(const FuncGraphPtr &graph, const
auto single_shape = AnfAlgo::GetOutputInferShape(all_to_all_v_outputs[0], 0);
concat_dim = NormalizeDim(single_shape, concat_dim);
if (LongToSize(concat_dim) >= single_shape.size()) {
MS_LOG(EXCEPTION) << "Invalid concat dim " << concat_dim << " is greater than shape size " << single_shape.size();
MS_LOG(EXCEPTION) << "Invalid concat dim " << concat_dim << " is greater than shape size " << single_shape.size()
<< ". trace: " << trace::DumpSourceLines(all_to_all);
}
single_shape[LongToSize(concat_dim)] *= static_cast<size_t>(split_count);
AnfAlgo::SetOutputInferTypeAndShape({AnfAlgo::GetOutputInferDataType(all_to_all_v_outputs[0], 0)}, {single_shape},

View File

@ -24,6 +24,7 @@
#include "utils/utils.h"
#include "utils/check_convert_utils.h"
#include "utils/convert_utils_base.h"
#include "utils/trace_base.h"
#include "backend/optimizer/common/helper.h"
#include "runtime/device/kernel_info.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -43,15 +44,15 @@ std::vector<int64_t> GetInputXShape(const AnfNodePtr &node) {
return shapes;
}
int64_t windowed_output_size(int64_t input_size, int64_t ksize, int64_t stride, PadMode pad_mode, int64_t *pad_before,
int64_t *pad_after) {
int64_t windowed_output_size(const AnfNodePtr &node, int64_t input_size, int64_t ksize, int64_t stride,
PadMode pad_mode, int64_t *pad_before, int64_t *pad_after) {
MS_EXCEPTION_IF_NULL(pad_before);
MS_EXCEPTION_IF_NULL(pad_after);
int64_t output = 0;
*pad_before = 0;
*pad_after = 0;
if (stride == 0) {
MS_LOG(EXCEPTION) << "The stride of AvgPoolGrad should not be 0.";
MS_LOG(EXCEPTION) << "The stride of AvgPoolGrad should not be 0. trace: " << trace::DumpSourceLines(node);
return 0;
}
if (pad_mode == PadMode::VALID) {
@ -62,13 +63,15 @@ int64_t windowed_output_size(int64_t input_size, int64_t ksize, int64_t stride,
*pad_before = pad_need / 2;
*pad_after = pad_need - *pad_before;
} else {
MS_LOG(EXCEPTION) << "The pad mode of AvgPoolGrad should be SAME or VALID, but got PAD";
MS_LOG(EXCEPTION) << "The pad mode of AvgPoolGrad should be SAME or VALID, but got PAD. trace: "
<< trace::DumpSourceLines(node);
}
return output;
}
std::vector<std::vector<float>> GetAssistInputMatrix(const std::vector<int64_t> &x_shape, int64_t pad_top,
int64_t pad_bottom, int64_t pad_left, int64_t pad_right) {
std::vector<std::vector<float>> GetAssistInputMatrix(const AnfNodePtr &node, const std::vector<int64_t> &x_shape,
int64_t pad_top, int64_t pad_bottom, int64_t pad_left,
int64_t pad_right) {
// `assist_input_matrix` is a 2d matrix with input_shape after padding,
// the value of element which is padded is 0, else are 1.
// For each element of output, it is mapped for slide window: `[h*h_stride : h*h_stride + h_ksize,
@ -76,7 +79,7 @@ std::vector<std::vector<float>> GetAssistInputMatrix(const std::vector<int64_t>
// number of input that associate with output element.
std::vector<std::vector<float>> assist_input_matrix;
if (x_shape.size() < kShapeDimNum) {
MS_LOG(EXCEPTION) << "The dim of x_shape should not be less than 4.";
MS_LOG(EXCEPTION) << "The dim of x_shape should not be less than 4. trace: " << trace::DumpSourceLines(node);
}
std::vector<int64_t> in_shape_after_padding_2d = {x_shape[kDim2] + pad_top + pad_bottom,
x_shape[kDim3] + pad_left + pad_right};
@ -97,22 +100,24 @@ std::vector<std::vector<float>> GetAssistInputMatrix(const std::vector<int64_t>
return assist_input_matrix;
}
ValueNodePtr CreateMeanMatrixValueNode(const FuncGraphPtr &func_graph, const std::vector<int64_t> &x_shape,
const std::vector<int64_t> &k_size, const std::vector<int64_t> &stride,
const PadMode pad_mode, const TypeId x_dtype) {
ValueNodePtr CreateMeanMatrixValueNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const std::vector<int64_t> &x_shape, const std::vector<int64_t> &k_size,
const std::vector<int64_t> &stride, const PadMode pad_mode,
const TypeId x_dtype) {
MS_EXCEPTION_IF_NULL(func_graph);
auto kernel_graph = func_graph->cast<KernelGraphPtr>();
MS_EXCEPTION_IF_NULL(kernel_graph);
if (x_shape.size() != kShapeDimNum || k_size.size() != kShapeDimNum || stride.size() != kShapeDimNum) {
MS_LOG(EXCEPTION) << "The dim of x_shape, kernel_size and strides of AvgPoolGrad should be 4, but got x_shape:"
<< x_shape << ", kernel_size:" << k_size << ", strides:" << stride;
<< x_shape << ", kernel_size:" << k_size << ", strides:" << stride
<< ". trace: " << trace::DumpSourceLines(node);
}
int64_t pad_top, pad_bottom, pad_left, pad_right;
int64_t h_output =
windowed_output_size(x_shape[kDim2], k_size[kDim2], stride[kDim2], pad_mode, &pad_top, &pad_bottom);
windowed_output_size(node, x_shape[kDim2], k_size[kDim2], stride[kDim2], pad_mode, &pad_top, &pad_bottom);
int64_t w_output =
windowed_output_size(x_shape[kDim3], k_size[kDim3], stride[kDim3], pad_mode, &pad_left, &pad_right);
auto assist_input_matrix = GetAssistInputMatrix(x_shape, pad_top, pad_bottom, pad_left, pad_right);
windowed_output_size(node, x_shape[kDim3], k_size[kDim3], stride[kDim3], pad_mode, &pad_left, &pad_right);
auto assist_input_matrix = GetAssistInputMatrix(node, x_shape, pad_top, pad_bottom, pad_left, pad_right);
// calculate output
std::vector<float> hw_output(h_output * w_output, 0.0);
@ -153,14 +158,15 @@ ValueNodePtr CreateMeanMatrixValueNode(const FuncGraphPtr &func_graph, const std
return mean_matrix_vnode;
}
ValueNodePtr CreateKernelMatrixValueNode(const FuncGraphPtr &func_graph, const std::vector<int64_t> &x_shape,
const std::vector<int64_t> &k_size, const TypeId x_dtype) {
ValueNodePtr CreateKernelMatrixValueNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const std::vector<int64_t> &x_shape, const std::vector<int64_t> &k_size,
const TypeId x_dtype) {
MS_EXCEPTION_IF_NULL(func_graph);
auto kernel_graph = func_graph->cast<KernelGraphPtr>();
MS_EXCEPTION_IF_NULL(kernel_graph);
if (x_shape.size() != kShapeDimNum || k_size.size() != kShapeDimNum) {
MS_LOG(EXCEPTION) << "The dim of x_shape and kernel_size of AvgPoolGrad should be 4, but got x_shape:" << x_shape
<< ", kernel_size:" << k_size;
<< ", kernel_size:" << k_size << ". trace: " << trace::DumpSourceLines(node);
}
std::vector<int64_t> kernel_shape = {1, x_shape[kDim1], k_size[kDim2], k_size[kDim3]};
auto data_size = std::accumulate(kernel_shape.begin(), kernel_shape.end(), int64_t(1), std::multiplies<int64_t>());
@ -197,8 +203,8 @@ const AnfNodePtr AvgPoolGradUnifyMindIR::Process(const FuncGraphPtr &graph, cons
auto pad_mode = PadMode(AnfAlgo::GetNodeAttr<int64_t>(avgpool_grad, kAttrPadMode));
auto x_shape_vnode = CreateShapeValueNode(graph, x_shape);
auto mean_matrix_vnode = CreateMeanMatrixValueNode(graph, x_shape, k_size, stride, pad_mode, x_dtype);
auto kernel_matrix_vnode = CreateKernelMatrixValueNode(graph, x_shape, k_size, x_dtype);
auto mean_matrix_vnode = CreateMeanMatrixValueNode(graph, node, x_shape, k_size, stride, pad_mode, x_dtype);
auto kernel_matrix_vnode = CreateKernelMatrixValueNode(graph, node, x_shape, k_size, x_dtype);
std::vector<AnfNodePtr> avgpool_grad_vm_inputs = {NewValueNode(std::make_shared<Primitive>(kAvgPoolGradVmOpName)),
x_shape_vnode, avgpool_grad->input(3), mean_matrix_vnode,

View File

@ -24,6 +24,7 @@
#include "utils/utils.h"
#include "utils/ms_context.h"
#include "utils/check_convert_utils.h"
#include "utils/trace_base.h"
#include "backend/optimizer/common/helper.h"
#include "runtime/device/kernel_info.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -31,7 +32,7 @@
namespace mindspore {
namespace opt {
namespace {
constexpr size_t kConv2DBackpropInputNum = 4;
constexpr size_t kConv2DBackpropInputNum = 3;
constexpr size_t kConv2DAxisNum = 4;
constexpr auto kAttrOffsetA = "offset_a";
constexpr auto kAttrPadList = "pad_list";
@ -56,11 +57,11 @@ bool NeedUpdate(const CNodePtr &conv2d, std::vector<size_t> in_shape, std::vecto
int64_t data_format;
bool result = CheckAndConvertUtils::GetDataFormatEnumValue(data_format_ptr, &data_format);
if (!result || data_format != Format::NCHW) {
MS_LOG(EXCEPTION) << "Conv2D only supports NCHW when group > 1";
MS_LOG(EXCEPTION) << "Conv2D only supports NCHW when group > 1, trace: " << trace::DumpSourceLines(conv2d);
}
if (in_shape.size() != kConv2DAxisNum || out_shape.size() != kConv2DAxisNum) {
MS_LOG(EXCEPTION) << "Conv2D's input and output should have 4 axis, but got input axis num: " << in_shape.size()
<< "output axis num: " << out_shape.size();
<< "output axis num: " << out_shape.size() << ". trace: " << trace::DumpSourceLines(conv2d);
}
auto in_channel = in_shape[kDim1];
auto out_channel = out_shape[kDim1];
@ -114,7 +115,7 @@ CNodePtr CreateTranspose(const FuncGraphPtr &graph, const CNodePtr &conv2d, cons
auto out_shape = AnfAlgo::GetOutputInferShape(input_node, 0);
if (out_shape.size() != kConv2DAxisNum) {
MS_LOG(EXCEPTION) << "Conv2D's output axis number should be " << kConv2DAxisNum << ", but got "
<< out_shape.size();
<< out_shape.size() << ". trace: " << trace::DumpSourceLines(conv2d);
}
std::swap(out_shape[kDim0], out_shape[kDim1]);
auto shapes = {out_shape};
@ -226,7 +227,7 @@ CNodePtr Conv2DBackpropInputUnifyMindIR::CreateDepthwiseConv2DBackpropInput(cons
MS_EXCEPTION_IF_NULL(conv2d_backin);
CNodePtr depth_conv_backin = nullptr;
if (conv2d_backin->inputs().size() == kConv2DBackpropInputNum) {
if (AnfUtils::GetInputTensorNum(conv2d_backin) == kConv2DBackpropInputNum) {
std::vector<AnfNodePtr> depth_conv_backin_inputs = {
NewValueNode(std::make_shared<Primitive>(kDepthwiseConv2dNativeBackpropInputOpName)),
conv2d_backin->input(kIndex3), transpose, conv2d_backin->input(kIndex1)};
@ -265,11 +266,12 @@ const AnfNodePtr Conv2DBackpropInputUnifyMindIR::Process(const FuncGraphPtr &gra
return nullptr;
}
auto input_size = conv2d_backin->inputs().size();
auto input_size = AnfUtils::GetInputTensorNum(conv2d_backin);
// In pynative mode, input_sizes input will be convert to attr if Conv2DBackpropInput is a forward op.
if (input_size != kConv2DBackpropInputNum && input_size != kConv2DBackpropInputNum - 1) {
MS_LOG(EXCEPTION) << "Conv2DBackpropInput's input number should be " << (kConv2DBackpropInputNum - 1) << " or "
<< (kConv2DBackpropInputNum - 2) << ", but got " << (input_size - 1);
MS_LOG(EXCEPTION) << "Conv2DBackpropInput's input number should be " << kConv2DBackpropInputNum << " or "
<< (kConv2DBackpropInputNum - 1) << ", but got " << input_size
<< ". trace: " << trace::DumpSourceLines(node);
}
auto transpose = CreateTranspose(graph, conv2d_backin, conv2d_backin->input(kIndex2), true, *this);
auto depth_conv_backin = CreateDepthwiseConv2DBackpropInput(graph, conv2d_backin, transpose);
@ -281,9 +283,10 @@ CNodePtr Conv2DBackpropFilterUnifyMindIR::CreateDepthwiseConv2DBackpropFilter(co
const CNodePtr &conv2d_backfil) const {
MS_EXCEPTION_IF_NULL(graph);
MS_EXCEPTION_IF_NULL(conv2d_backfil);
if (conv2d_backfil->inputs().size() != kConv2DBackpropInputNum) {
MS_LOG(EXCEPTION) << "Conv2DBackpropFilter's input number should be " << (kConv2DBackpropInputNum - 1)
<< ", but got " << (conv2d_backfil->inputs().size() - 1);
if (AnfUtils::GetInputTensorNum(conv2d_backfil) != kConv2DBackpropInputNum) {
MS_LOG(EXCEPTION) << "Conv2DBackpropFilter's input number should be " << kConv2DBackpropInputNum << ", but got "
<< AnfUtils::GetInputTensorNum(conv2d_backfil)
<< ". trace: " << trace::DumpSourceLines(conv2d_backfil);
}
auto filter_size_node = conv2d_backfil->input(kIndex3);
MS_EXCEPTION_IF_NULL(filter_size_node);
@ -307,7 +310,7 @@ CNodePtr Conv2DBackpropFilterUnifyMindIR::CreateDepthwiseConv2DBackpropFilter(co
std::vector<size_t> out_shape = AnfAlgo::GetOutputInferShape(conv2d_backfil, 0);
if (out_shape.size() != kConv2DAxisNum) {
MS_LOG(EXCEPTION) << "Conv2DBackpropFilter's output axis number should be " << kConv2DAxisNum << ", but got "
<< out_shape.size();
<< out_shape.size() << ". trace: " << trace::DumpSourceLines(conv2d_backfil);
}
std::swap(out_shape[0], out_shape[1]);
auto shapes = {out_shape};

View File

@ -69,11 +69,11 @@ ValueNodePtr CreateKeepPorbValueNode(const FuncGraphPtr &func_graph, const AnfNo
MS_EXCEPTION_IF_NULL(cnode);
// Step1: get keep_prob
if (!AnfAlgo::HasNodeAttr(kKeepProb, cnode)) {
MS_LOG(EXCEPTION) << "Dropout node does not have attr: keep_prob.";
MS_LOG(EXCEPTION) << "Dropout node does not have attr: keep_prob. trace: " << trace::DumpSourceLines(node);
}
if (AnfAlgo::GetCNodeName(cnode) == kDropoutOpName) {
if (!AnfAlgo::HasNodeAttr(kSeed0, cnode) || !AnfAlgo::HasNodeAttr(kSeed1, cnode)) {
MS_LOG(EXCEPTION) << "Dropout node does not have attr: seed0 or seed1.";
MS_LOG(EXCEPTION) << "Dropout node does not have attr: seed0 or seed1. trace: " << trace::DumpSourceLines(node);
}
}
auto keep_prob = AnfAlgo::GetNodeAttr<float>(node, kKeepProb);

View File

@ -34,8 +34,9 @@ void FakeLearnedScaleQuantPerLayerGradUnifyMindIR::CreateOutputsOfLSQPerLayerGra
MS_EXCEPTION_IF_NULL(lsq_perlayer_grad_node);
const auto &lsq_perlayer_grad_inputs = lsq_perlayer_grad_node->inputs();
if (lsq_perlayer_grad_inputs.size() < kFakeLearnedScaleQuantGradInputNum) {
MS_LOG(EXCEPTION) << "lsq_perlayer_grad_node has wrong inputs size."
<< " trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node);
MS_LOG(EXCEPTION) << "Lsq_perlayer_grad_node has wrong inputs size, should be not less than "
<< kFakeLearnedScaleQuantGradInputNum << ", but got " << lsq_perlayer_grad_inputs.size()
<< ". trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node);
}
std::vector<AnfNodePtr> lsq_perlayer_grad_d_inputs = {
NewValueNode(std::make_shared<Primitive>(kFakeLearnedScaleQuantPerLayerGradDOpName)),
@ -65,12 +66,14 @@ void FakeLearnedScaleQuantPerLayerGradUnifyMindIR::CreateOutputsOfLSQPerLayerRed
MS_EXCEPTION_IF_NULL(lsq_perlayer_reduce_grad_outputs);
const auto &lsq_perlayer_grad_inputs = lsq_perlayer_grad_node->inputs();
if (lsq_perlayer_grad_inputs.size() < kFakeLearnedScaleQuantGradInputNum) {
MS_LOG(EXCEPTION) << "lsq_perlayer_grad_node has wrong inputs size"
<< " trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node);
MS_LOG(EXCEPTION) << "Lsq_perlayer_grad_node has wrong inputs size, should be not less than "
<< kFakeLearnedScaleQuantGradInputNum << ", but got " << lsq_perlayer_grad_inputs.size()
<< ". trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node);
}
if (lsq_perlayer_grad_d_outputs.size() != kFakeLearnedScaleQuantGradDOutputNum) {
MS_LOG(EXCEPTION) << "lsq_perlayer_grad_d_outputs has wrong size"
<< " trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node);
MS_LOG(EXCEPTION) << "Lsq_perlayer_grad_d_outputs has wrong inputs size, should be "
<< kFakeLearnedScaleQuantGradDOutputNum << ", but got " << lsq_perlayer_grad_d_outputs.size()
<< ". trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node);
}
std::vector<AnfNodePtr> lsq_perlayer_reduce_grad_inputs = {
NewValueNode(std::make_shared<Primitive>(kFakeLearnedScaleQuantPerLayerGradDReduceOpName)),
@ -93,8 +96,9 @@ void FakeLearnedScaleQuantPerChannelGradUnifyMindIR::CreateOutputsOfLSQPerChanne
MS_EXCEPTION_IF_NULL(lsq_perchannel_grad_node);
const auto &lsq_perchannel_grad_inputs = lsq_perchannel_grad_node->inputs();
if (lsq_perchannel_grad_inputs.size() < kFakeLearnedScaleQuantGradInputNum) {
MS_LOG(EXCEPTION) << "lsq_perchannel_grad_node has wrong inputs size."
<< " trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node);
MS_LOG(EXCEPTION) << "Lsq_perchannel_grad_node has wrong inputs size, should be not less than "
<< kFakeLearnedScaleQuantGradInputNum << ", but got " << lsq_perchannel_grad_inputs.size()
<< ". trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node);
}
std::vector<AnfNodePtr> lsq_perchannel_grad_d_inputs = {
NewValueNode(std::make_shared<Primitive>(kFakeLearnedScaleQuantPerChannelGradDOpName)),
@ -125,12 +129,14 @@ void FakeLearnedScaleQuantPerChannelGradUnifyMindIR::CreateOutputsOfLSQPerChanne
MS_EXCEPTION_IF_NULL(lsq_perchannel_reduce_grad_outputs);
const auto &lsq_perchannel_grad_inputs = lsq_perchannel_grad_node->inputs();
if (lsq_perchannel_grad_inputs.size() < kFakeLearnedScaleQuantGradInputNum) {
MS_LOG(EXCEPTION) << "lsq_perchannel_grad_node has wrong inputs size"
<< " trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node);
MS_LOG(EXCEPTION) << "Lsq_perchannel_grad_node has wrong inputs size, should be not less than "
<< kFakeLearnedScaleQuantGradInputNum << ", but got " << lsq_perchannel_grad_inputs.size()
<< ". trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node);
}
if (lsq_perchannel_grad_d_outputs.size() != kFakeLearnedScaleQuantGradDOutputNum) {
MS_LOG(EXCEPTION) << "lsq_perchannel_grad_d_outputs has wrong size"
<< " trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node);
MS_LOG(EXCEPTION) << "Lsq_perchannel_grad_d_outputs has wrong inputs size, should be "
<< kFakeLearnedScaleQuantGradDOutputNum << ", but got " << lsq_perchannel_grad_inputs.size()
<< ". trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node);
}
std::vector<AnfNodePtr> lsq_perchannel_reduce_grad_inputs = {
NewValueNode(std::make_shared<Primitive>(kFakeLearnedScaleQuantPerChannelGradDReduceOpName)),
@ -164,16 +170,18 @@ const AnfNodePtr FakeLearnedScaleQuantPerLayerGradUnifyMindIR::Process(const Fun
std::vector<AnfNodePtr> lsq_perlayer_grad_d_outputs;
CreateOutputsOfLSQPerLayerGradD(func_graph, cnode, &lsq_perlayer_grad_d_outputs);
if (lsq_perlayer_grad_d_outputs.size() != kFakeLearnedScaleQuantGradOutputNum) {
MS_LOG(EXCEPTION) << "fake_learned_scale_quant_perlayer_grad_d_outputs has wrong size"
<< " trace: " << trace::DumpSourceLines(node);
MS_LOG(EXCEPTION) << "Fake_learned_scale_quant_perlayer_grad_d_outputs has wrong inputs size, should be "
<< kFakeLearnedScaleQuantGradOutputNum << ", but got " << lsq_perlayer_grad_d_outputs.size()
<< ". trace: " << trace::DumpSourceLines(node);
}
std::vector<AnfNodePtr> lsq_perlayer_reduce_grad_outputs;
CreateOutputsOfLSQPerLayerReduceGrad(func_graph, cnode, lsq_perlayer_grad_d_outputs,
&lsq_perlayer_reduce_grad_outputs);
if (lsq_perlayer_reduce_grad_outputs.size() != kSingleOutputNum) {
MS_LOG(EXCEPTION) << "fake_learned_scale_quant_perlayer_reduce_grad_outputs has wrong size"
<< " trace: " << trace::DumpSourceLines(node);
MS_LOG(EXCEPTION) << "Fake_learned_scale_quant_perlayer_reduce_grad_outputs has wrong inputs size, should be "
<< kSingleOutputNum << ", but got " << lsq_perlayer_reduce_grad_outputs.size()
<< ". trace: " << trace::DumpSourceLines(node);
}
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), lsq_perlayer_grad_d_outputs[0],
@ -201,16 +209,18 @@ const AnfNodePtr FakeLearnedScaleQuantPerChannelGradUnifyMindIR::Process(const F
std::vector<AnfNodePtr> lsq_perchannel_grad_d_outputs;
CreateOutputsOfLSQPerChannelGradD(func_graph, cnode, &lsq_perchannel_grad_d_outputs);
if (lsq_perchannel_grad_d_outputs.size() != kFakeLearnedScaleQuantGradOutputNum) {
MS_LOG(EXCEPTION) << "fake_learned_scale_quant_perchannel_grad_d_outputs has wrong size"
<< " trace: " << trace::DumpSourceLines(node);
MS_LOG(EXCEPTION) << "Fake_learned_scale_quant_perchannel_grad_d_outputs has wrong inputs size, should be "
<< kFakeLearnedScaleQuantGradOutputNum << ", but got " << lsq_perchannel_grad_d_outputs.size()
<< ". trace: " << trace::DumpSourceLines(node);
}
std::vector<AnfNodePtr> lsq_perchannel_reduce_grad_outputs;
CreateOutputsOfLSQPerChannelReduceGrad(func_graph, cnode, lsq_perchannel_grad_d_outputs,
&lsq_perchannel_reduce_grad_outputs);
if (lsq_perchannel_reduce_grad_outputs.size() != kSingleOutputNum) {
MS_LOG(EXCEPTION) << "fake_learned_scale_quant_perchannel_reduce_grad_outputs has wrong size"
<< " trace: " << trace::DumpSourceLines(node);
MS_LOG(EXCEPTION) << "Fake_learned_scale_quant_perchannel_reduce_grad_outputs has wrong inputs size, should be "
<< kSingleOutputNum << ", but got " << lsq_perchannel_reduce_grad_outputs.size()
<< ". trace: " << trace::DumpSourceLines(node);
}
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), lsq_perchannel_grad_d_outputs[0],

View File

@ -21,6 +21,7 @@
#include "utils/utils.h"
#include "utils/ms_context.h"
#include "utils/trace_base.h"
#include "backend/optimizer/common/helper.h"
#include "runtime/device/kernel_info.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -37,7 +38,7 @@ CNodePtr GetMaxPool(const CNodePtr &maxpool_grad) {
MS_EXCEPTION_IF_NULL(maxpool_grad);
if (maxpool_grad->inputs().size() != kMaxPoolGradInputNum) {
MS_LOG(EXCEPTION) << "MaxPoolGrad's input number should be " << (kMaxPoolGradInputNum - 1) << ", but got "
<< (maxpool_grad->inputs().size() - 1);
<< (maxpool_grad->inputs().size() - 1) << ". trace: " << trace::DumpSourceLines(maxpool_grad);
}
auto maxpool_anf = maxpool_grad->input(kIndex2);
MS_EXCEPTION_IF_NULL(maxpool_anf);
@ -50,7 +51,7 @@ CNodePtr MaxPool2MaxPoolWithArgmax::CreateMaxPoolWithArgmax(const FuncGraphPtr &
MS_EXCEPTION_IF_NULL(maxpool);
if (maxpool->inputs().size() != kMaxPoolInputNum) {
MS_LOG(EXCEPTION) << "MaxPool's input number should be " << (kMaxPoolInputNum - 1) << ", but got "
<< (maxpool->inputs().size() - 1);
<< (maxpool->inputs().size() - 1) << ". trace: " << trace::DumpSourceLines(maxpool);
}
std::vector<AnfNodePtr> maxpool_argmax_inputs = {NewValueNode(std::make_shared<Primitive>(kMaxPoolWithArgmaxOpName)),
maxpool->input(kIndex1)};
@ -74,7 +75,7 @@ CNodePtr MaxPool2MaxPoolWithArgmax::CreateMaxPoolGradWithArgmax(
MS_EXCEPTION_IF_NULL(maxpool_grad);
if (maxpool_grad->inputs().size() != kMaxPoolGradInputNum) {
MS_LOG(EXCEPTION) << "MaxPoolGrad's input number should be " << (kMaxPoolGradInputNum - 1) << ", but got "
<< (maxpool_grad->inputs().size() - 1);
<< (maxpool_grad->inputs().size() - 1) << ". trace: " << trace::DumpSourceLines(maxpool_grad);
}
// MaxPoolGrad's inputs are {input, output, grad_input}, MaxPoolGradWithArgmax's inputs are
// {input, grad_input, argmax_output}
@ -95,11 +96,11 @@ void MaxPool2MaxPoolWithArgmax::SetNodeAttrs(const CNodePtr &maxpool, const CNod
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool, kAttrKernelSize);
if (strides.size() != kMaxPoolAttrAxisNum) {
MS_LOG(EXCEPTION) << "MaxPool's attr strides has wrong axis number, should be " << kMaxPoolAttrAxisNum
<< ", but got " << strides.size();
<< ", but got " << strides.size() << ". trace: " << trace::DumpSourceLines(maxpool);
}
if (ksize.size() != kMaxPoolAttrAxisNum) {
MS_LOG(EXCEPTION) << "MaxPool's attr ksize has wrong axis number, should be " << kMaxPoolAttrAxisNum << ", but got "
<< ksize.size();
<< ksize.size() << ". trace: " << trace::DumpSourceLines(maxpool);
}
// note that strides and ksize change from (1, 1, x, y) to (1, x, y, 1)
strides[kIndex1] = strides[kIndex2];

View File

@ -22,6 +22,7 @@
#include "backend/session/anf_runtime_algorithm.h"
#include "base/core_ops.h"
#include "utils/utils.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace opt {
@ -66,7 +67,9 @@ const AnfNodePtr MaxPoolWithArgmaxUnifyMindIR::Process(const FuncGraphPtr &graph
auto output_shape = AnfAlgo::GetOutputInferShape(maxpool_with_argmax, 0);
auto argmax_shape = output_shape;
if (argmax_shape.size() != kMaxPoolWithArgmaxShape || ksize.size() != kMaxPoolWithArgmaxShape) {
MS_LOG(EXCEPTION) << "argmax or kernel_size's shape size not equal to 4";
MS_LOG(EXCEPTION) << "Argmax or kernel_size's shape dim should be equal to 4, but got argmax dim: "
<< argmax_shape.size() << ", kernel_size dim: " << ksize.size()
<< ". trace: " << trace::DumpSourceLines(node);
}
argmax_shape[kDim2] = LongToSize(ksize[kDim1] * ksize[kDim2]);
argmax_shape[kDim3] = (output_shape[kDim2] * output_shape[kDim3] + kAlignBytes - 1) / kAlignBytes + 1;
@ -100,7 +103,9 @@ const AnfNodePtr MaxPoolGradWithArgmaxUnifyMindIR::Process(const FuncGraphPtr &g
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool_grad_with_argmax, kAttrKernelSize);
auto argmax_shape = AnfAlgo::GetOutputInferShape(tuple_getitem0_anf, 0);
if (argmax_shape.size() != kMaxPoolWithArgmaxShape || ksize.size() != kMaxPoolWithArgmaxShape) {
MS_LOG(EXCEPTION) << "argmax or kernel_size's shape size not equal to 4";
MS_LOG(EXCEPTION) << "Argmax or kernel_size's shape dim should be equal to 4, but got argmax dim: "
<< argmax_shape.size() << ", kernel_size dim: " << ksize.size()
<< ". trace: " << trace::DumpSourceLines(node);
}
argmax_shape[kDim3] = (argmax_shape[kDim2] * argmax_shape[kDim3] + kAlignBytes - 1) / kAlignBytes + 1;
argmax_shape[kDim2] = LongToSize(ksize[kDim1] * ksize[kDim2]);

View File

@ -21,6 +21,7 @@
#include "backend/session/anf_runtime_algorithm.h"
#include "runtime/hccl_adapter/hccl_adapter.h"
#include "backend/optimizer/common/helper.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace opt {
@ -290,7 +291,8 @@ CNodePtr CreateAllToAllvNode(const FuncGraphPtr &graph, const CNodePtr &neighbor
CreateMultipleOutputsOfAnfNode(graph, split_nodes[i], static_cast<size_t>(split_num[i]), &output);
if (output.empty()) {
MS_LOG(EXCEPTION) << "The node " << split_nodes[i]->DebugString()
<< " should have at least one output, but got 0.";
<< " should have at least one output, but got 0. trace: "
<< trace::DumpSourceLines(split_nodes[i]);
}
}
split_outputs.emplace_back(output);
@ -365,7 +367,8 @@ std::vector<CNodePtr> NeighborExchangeV2UnifyMindIR::CreateSplitNodes(const Func
if (neighbor_exchange_v2->size() <= kNeighborExchangeV2InputIdx) {
MS_LOG(EXCEPTION) << "Invalid cnode " << neighbor_exchange_v2->DebugString() << " input size "
<< neighbor_exchange_v2->size();
<< neighbor_exchange_v2->size() << ", should be " << kNeighborExchangeV2InputIdx
<< ". trace: " << trace::DumpSourceLines(neighbor_exchange_v2);
}
std::vector<CNodePtr> split_nodes = {};
@ -379,7 +382,8 @@ std::vector<CNodePtr> NeighborExchangeV2UnifyMindIR::CreateSplitNodes(const Func
auto dtype = AnfAlgo::GetOutputInferDataType(neighbor_exchange_v2_input, 0);
auto shape = AnfAlgo::GetOutputInferShape(neighbor_exchange_v2_input, 0);
if (SizeToLong(shape.size()) != kShapeSize) { // only support NCHW now
MS_LOG(EXCEPTION) << "Invalid shape size " << shape.size() << ", only support NCHW input now!";
MS_LOG(EXCEPTION) << "Invalid shape size " << shape.size()
<< ", only support NCHW input now! trace: " << trace::DumpSourceLines(neighbor_exchange_v2);
}
// splitv for top & bottom
@ -416,7 +420,8 @@ std::vector<CNodePtr> NeighborExchangeV2UnifyMindIR::CreateSplitNodes(const Func
&split_outputs_top_bottom);
if (split_outputs_top_bottom.empty()) {
MS_LOG(EXCEPTION) << "The node " << split_nodes[0]->DebugString()
<< " should have at least one output, but got 0.";
<< " should have at least one output, but got 0. trace: "
<< trace::DumpSourceLines(split_nodes[0]);
}
// for top corner
@ -608,7 +613,8 @@ CNodePtr NeighborExchangeV2UnifyMindIR::CreateConcatNodes(const FuncGraphPtr &gr
CreateMultipleOutputsOfAnfNode(graph, all_to_all_v, static_cast<size_t>(all_to_all_output_num),
&all_to_all_v_outputs);
if (all_to_all_v_outputs.empty()) {
MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString() << " should have at least one output, but got 0.";
MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString()
<< " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(all_to_all_v);
}
if (recv_rank_ids[kRankIdZero] == kInvalidId && recv_rank_ids[kRankIdFour] == kInvalidId) {
@ -644,7 +650,9 @@ CNodePtr NeighborExchangeV2UnifyMindIR::CreateConcatNodes(const FuncGraphPtr &gr
std::vector<AnfNodePtr> concat_left_outputs;
CreateMultipleOutputsOfAnfNode(graph, concat_left, 1, &concat_left_outputs);
if (concat_left_outputs.empty()) {
MS_LOG(EXCEPTION) << "The node " << concat_left->DebugString() << " should have at least one output, but got 0.";
MS_LOG(EXCEPTION) << "The node " << concat_left->DebugString()
<< " should have at least one output, but got 0. trace: "
<< trace::DumpSourceLines(concat_left);
}
concat_input_all.insert(concat_input_all.end(), concat_left_outputs.begin(), concat_left_outputs.end());
++input_nums_all;
@ -655,7 +663,9 @@ CNodePtr NeighborExchangeV2UnifyMindIR::CreateConcatNodes(const FuncGraphPtr &gr
std::vector<AnfNodePtr> concat_middle_outputs;
CreateMultipleOutputsOfAnfNode(graph, concat_middle, 1, &concat_middle_outputs);
if (concat_middle_outputs.empty()) {
MS_LOG(EXCEPTION) << "The node " << concat_middle->DebugString() << " should have at least one output, but got 0.";
MS_LOG(EXCEPTION) << "The node " << concat_middle->DebugString()
<< " should have at least one output, but got 0. trace: "
<< trace::DumpSourceLines(concat_middle);
}
concat_input_all.insert(concat_input_all.end(), concat_middle_outputs.begin(), concat_middle_outputs.end());
++input_nums_all;
@ -667,7 +677,9 @@ CNodePtr NeighborExchangeV2UnifyMindIR::CreateConcatNodes(const FuncGraphPtr &gr
std::vector<AnfNodePtr> concat_right_outputs;
CreateMultipleOutputsOfAnfNode(graph, concat_right, 1, &concat_right_outputs);
if (concat_right_outputs.empty()) {
MS_LOG(EXCEPTION) << "The node " << concat_right->DebugString() << " should have at least one output, but got 0.";
MS_LOG(EXCEPTION) << "The node " << concat_right->DebugString()
<< " should have at least one output, but got 0. trace: "
<< trace::DumpSourceLines(concat_right);
}
concat_input_all.insert(concat_input_all.end(), concat_right_outputs.begin(), concat_right_outputs.end());
++input_nums_all;
@ -694,14 +706,16 @@ std::vector<CNodePtr> NeighborExchangeV2GradUnifyMindIR::CreateSplitNodesForGrad
if (neighbor_exchange_v2_grad->size() <= kNeighborExchangeV2InputIdx) {
MS_LOG(EXCEPTION) << "Invalid cnode " << neighbor_exchange_v2_grad->DebugString() << " input size "
<< neighbor_exchange_v2_grad->size();
<< neighbor_exchange_v2_grad->size() << ", should be " << kNeighborExchangeV2InputIdx
<< ". trace: " << trace::DumpSourceLines(neighbor_exchange_v2_grad);
}
auto neighbor_exchange_v2_grad_input = neighbor_exchange_v2_grad->input(kNeighborExchangeV2InputIdx);
auto dtype = AnfAlgo::GetOutputInferDataType(neighbor_exchange_v2_grad_input, 0);
auto shape = AnfAlgo::GetOutputInferShape(neighbor_exchange_v2_grad_input, 0);
if (SizeToLong(shape.size()) != kShapeSize) {
MS_LOG(EXCEPTION) << "Invalid shape size " << shape.size() << ", only support NCHW input now!";
MS_LOG(EXCEPTION) << "Invalid shape size " << shape.size()
<< ", only support NCHW input now! trace: " << trace::DumpSourceLines(neighbor_exchange_v2_grad);
}
std::vector<CNodePtr> split_nodes = {};
@ -727,7 +741,8 @@ std::vector<CNodePtr> NeighborExchangeV2GradUnifyMindIR::CreateSplitNodesForGrad
CreateMultipleOutputsOfAnfNode(graph, split_nodes[0], static_cast<size_t>(num_split_h), &split_outputs_top_bottom);
if (split_outputs_top_bottom.empty()) {
MS_LOG(EXCEPTION) << "The node " << split_nodes[0]->DebugString()
<< " should have at least one output, but got 0.";
<< " should have at least one output, but got 0. trace: "
<< trace::DumpSourceLines(split_nodes[0]);
}
size_split_h = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(split_nodes[0], kAttrSizeSplits);
} else {
@ -827,7 +842,8 @@ CNodePtr NeighborExchangeV2GradUnifyMindIR::CreateSplitGradNodes(const FuncGraph
CreateMultipleOutputsOfAnfNode(graph, all_to_all_v, static_cast<size_t>(all_to_all_output_num),
&all_to_all_v_outputs);
if (all_to_all_v_outputs.empty()) {
MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString() << " should have at least one output, but got 0.";
MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString()
<< " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(all_to_all_v);
}
// create pad nodes
// slice begin & size
@ -871,7 +887,8 @@ CNodePtr NeighborExchangeV2GradUnifyMindIR::CreateSplitGradNodes(const FuncGraph
std::vector<AnfNodePtr> pad_outputs;
CreateMultipleOutputsOfAnfNode(graph, pad, 1, &pad_outputs);
if (pad_outputs.empty()) {
MS_LOG(EXCEPTION) << "The node " << pad->DebugString() << " should have at least one output, but got 0.";
MS_LOG(EXCEPTION) << "The node " << pad->DebugString()
<< " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(pad);
}
addn_inputs.insert(addn_inputs.end(), pad_outputs.begin(), pad_outputs.end());
++pad_num;

View File

@ -23,6 +23,7 @@
#include "utils/utils.h"
#include "utils/ms_context.h"
#include "utils/trace_base.h"
#include "backend/optimizer/common/helper.h"
#include "runtime/device/kernel_info.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -67,7 +68,7 @@ const AnfNodePtr SliceGradUnifyMindIR::Process(const FuncGraphPtr &graph, const
if (input_num != kSliceGradInputTensorNum && input_num != kSliceGradCangjieInputTensorNum) {
MS_LOG(EXCEPTION) << "The input tensor size[" << input_num
<< "] of node " + slice_grad->DebugString() + " is not equal to " << kSliceGradInputTensorNum
<< " or " << kSliceGradCangjieInputTensorNum;
<< " or " << kSliceGradCangjieInputTensorNum << ". trace: " << trace::DumpSourceLines(node);
}
std::vector<AnfNodePtr> pad_inputs = {NewValueNode(std::make_shared<Primitive>(kPadOpName)),
slice_grad->input(kIndex1)};
@ -89,7 +90,10 @@ const AnfNodePtr SliceGradUnifyMindIR::Process(const FuncGraphPtr &graph, const
sizes = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(slice_grad, kAttrSize);
}
if (x_shape.size() != begins.size() || begins.size() != sizes.size()) {
MS_LOG(EXCEPTION) << "For SliceGrad, x's shape dim number should be equal to len(begin) and len(size).";
MS_LOG(EXCEPTION)
<< "For SliceGrad, x_shape dim number should be equal to len(begin) and len(size), but got x_shape dim: "
<< x_shape.size() << ", len(begin): " << begins.size() << ", len(size): " << sizes.size()
<< ". trace: " << trace::DumpSourceLines(node);
}
std::vector<std::vector<int64_t>> paddings;
for (size_t i = 0; i < x_shape.size(); ++i) {

View File

@ -63,7 +63,8 @@ CNodePtr CreateOneHot(const FuncGraphPtr &graph, const CNodePtr &sparse_softmax_
size_t index = logits_shape.size() - 1;
depth = SizeToLong(logits_shape[index]);
} else {
MS_LOG(EXCEPTION) << "logits's shape of sparse_softmax_cross_entropy_with_logits is empty.";
MS_LOG(EXCEPTION) << "Logits's shape of node [" << sparse_softmax_node->DebugString()
<< "] is empty. trace: " << trace::DumpSourceLines(sparse_softmax_node);
}
auto value_on = std::make_shared<tensor::Tensor>(1.0, kFloat32);
@ -126,7 +127,7 @@ CNodePtr CreateSoftmaxCrossEntropyWithLogits(const FuncGraphPtr &graph, const CN
if (!labels_shape.empty()) {
loss_shape.emplace_back(labels_shape[0]);
} else {
MS_LOG(EXCEPTION) << "one_hot output's shape is empty.";
MS_LOG(EXCEPTION) << "One_hot output's shape is empty. trace: " << trace::DumpSourceLines(one_hot_node);
}
auto shapes = {loss_shape, AnfAlgo::GetOutputInferShape(one_hot_node, 0)};
@ -140,7 +141,8 @@ std::vector<int64_t> GetAxis(const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
std::vector<size_t> output_shape = AnfAlgo::GetOutputInferShape(node, 0);
if (output_shape.empty()) {
MS_LOG(EXCEPTION) << node->fullname_with_scope() << "'s output shape is empty";
MS_LOG(EXCEPTION) << node->fullname_with_scope()
<< "'s output shape is empty. trace: " << trace::DumpSourceLines(node);
}
std::vector<int64_t> range;
for (size_t i = 0; i < output_shape.size(); i++) {
@ -308,7 +310,8 @@ CNodePtr CreateRealDiv(const FuncGraphPtr &graph, const CNodePtr &sparse_softmax
CheckCNodeInputSize(sparse_softmax_node, kSparseSoftmaxCrossEntropyWithLogitsInputTensorNum);
std::vector<size_t> labels_shape = AnfAlgo::GetPrevNodeOutputInferShape(sparse_softmax_node, 1);
if (labels_shape.size() != 1) {
MS_LOG(EXCEPTION) << "label's shape should be 1-D.";
MS_LOG(EXCEPTION) << "Label's shape should be 1-D, but got " << labels_shape.size()
<< ". trace: " << trace::DumpSourceLines(sparse_softmax_node);
}
auto y_value = static_cast<float>(labels_shape[0]);
auto y = std::make_shared<tensor::Tensor>(y_value, kFloat32);
@ -356,13 +359,15 @@ CNodePtr CreateMul(const FuncGraphPtr &graph, const CNodePtr &sparse_softmax_nod
auto softmax_output_shape = AnfAlgo::GetOutputInferShape(softmax_output_node, 0);
if (softmax_output_shape.size() != softmax_output_shape_size) {
MS_LOG(EXCEPTION) << "SoftmaxCrossEntropyWithLogits the second output shape size should be "
<< softmax_output_shape_size << ", but got " << softmax_output_shape.size();
<< softmax_output_shape_size << ", but got " << softmax_output_shape.size()
<< ". trace: " << trace::DumpSourceLines(softmax_output_node);
}
ShapeVector tensor_shape;
tensor_shape.emplace_back(softmax_output_shape[0]);
tensor_shape.emplace_back(1);
if (softmax_output_shape[0] == 0) {
MS_LOG(EXCEPTION) << "output_shape[0] of softmax should not be 0";
MS_LOG(EXCEPTION) << "output_shape[0] of softmax should not be 0. trace: "
<< trace::DumpSourceLines(softmax_output_node);
}
std::vector<float> tensor_value(softmax_output_shape[0], 1.0 / softmax_output_shape[0]);
auto buf_size = sizeof(float) * tensor_value.size();
@ -406,8 +411,9 @@ CNodePtr CreateCast(const FuncGraphPtr &graph, const CNodePtr &cast, const AnfNo
bool IsSparseSoftmaxCrossEntropyWithLogitsGrad(const CNodePtr &sparse, string pass_name) {
MS_EXCEPTION_IF_NULL(sparse);
if (AnfAlgo::GetCNodeName(sparse) != kSparseSoftmaxCrossEntropyWithLogitsOpName) {
MS_LOG(EXCEPTION) << "The pass of " << pass_name << "'s input node is not "
<< kSparseSoftmaxCrossEntropyWithLogitsOpName;
MS_LOG(EXCEPTION) << "The pass of " << pass_name << "'s input node should be "
<< kSparseSoftmaxCrossEntropyWithLogitsOpName << ", but got " << AnfAlgo::GetCNodeName(sparse)
<< ". trace: " << trace::DumpSourceLines(sparse);
}
if (AnfAlgo::HasNodeAttr(kAttrIsGrad, sparse)) {
return AnfAlgo::GetNodeAttr<bool>(sparse, kAttrIsGrad);

View File

@ -32,6 +32,7 @@
#include "utils/convert_utils.h"
#include "runtime/device/kernel_info.h"
#include "utils/ms_context.h"
#include "utils/trace_base.h"
#include "backend/optimizer/common/const_input_to_attr_registry.h"
#include "abstract/primitive_infer_map.h"
@ -151,7 +152,8 @@ void CheckCNodeInputSize(const CNodePtr &cnode, size_t input_tensor_size) {
auto real_input_tensor_num = AnfAlgo::GetInputTensorNum(cnode);
if (real_input_tensor_num != input_tensor_size) {
MS_LOG(EXCEPTION) << "The input tensor size[" << real_input_tensor_num
<< "] of node " + cnode->DebugString() + " is not equal to " << input_tensor_size;
<< "] of node [" + cnode->DebugString() + "] is not equal to " << input_tensor_size
<< ". trace: " << trace::DumpSourceLines(cnode);
}
}
@ -606,7 +608,7 @@ void ConstInputToAttr(const CNodePtr &cnode, const mindspore::HashSet<size_t> &i
MS_EXCEPTION_IF_NULL(value_node);
MS_LOG(DEBUG) << "start erase input[" << i << "] of cnode[" + cnode->DebugString() + "]";
if (i >= input_names_vec.size()) {
MS_LOG(EXCEPTION) << "index " << i << " is larger than input names size [" << input_names_vec.size() << "]";
MS_LOG(EXCEPTION) << "Index " << i << " is larger than input names size [" << input_names_vec.size() << "]";
}
auto value = value_node->value();
if (value->isa<tensor::Tensor>()) {
@ -654,20 +656,20 @@ bool AnfEqual(const BaseRef &a, const BaseRef &b) {
} else if (a_node->isa<ValueNode>() && b_node->isa<ValueNode>()) {
auto a_value_node_ptr = a_node->cast<ValueNodePtr>();
if (a_value_node_ptr == nullptr) {
MS_LOG(EXCEPTION) << "cast value node ptr fail";
MS_LOG(EXCEPTION) << "Cast value node ptr fail.";
}
auto a_value_ptr = a_value_node_ptr->value();
if (a_value_ptr == nullptr) {
MS_LOG(EXCEPTION) << "value ptr is nullptr";
MS_LOG(EXCEPTION) << "Value ptr is nullptr.";
}
auto b_value_node_ptr = b_node->cast<ValueNodePtr>();
if (b_value_node_ptr == nullptr) {
MS_LOG(EXCEPTION) << "cast value node ptr fail";
MS_LOG(EXCEPTION) << "Cast value node ptr fail.";
}
auto b_value_ptr = b_value_node_ptr->value();
if (b_value_ptr == nullptr) {
MS_LOG(EXCEPTION) << "value ptr is nullptr";
MS_LOG(EXCEPTION) << "Value ptr is nullptr.";
}
return (*a_value_ptr) == (*b_value_ptr);
@ -808,8 +810,7 @@ AbstractBasePtrList RectifyAbstractFromRegAttr(const PrimitivePtr &primitive,
continue;
}
if (ori_index > input_abstract.size()) {
MS_LOG(EXCEPTION) << "index is out of range input abstract size " << input_abstract.size()
<< " get index :" << ori_index;
MS_LOG(EXCEPTION) << "Index " << ori_index << " is out of range in input abstract size " << input_abstract.size();
}
rectify_abs_list[index] = input_abstract[ori_index++];
}
@ -829,18 +830,18 @@ AbstractBasePtrList RectifyAbstractFromDynamicInput(const PrimitivePtr &primitiv
for (auto item : dynamic_inputs_index) {
if (item == kNotDynamicFlag) {
if (input_index >= input_abstract.size()) {
MS_LOG(EXCEPTION) << " index " << input_index << " is out of range in input abstract " << input_abstract.size();
MS_LOG(EXCEPTION) << "Index " << input_index << " is out of range in input abstract " << input_abstract.size();
}
(void)rectifyed_abs_list.emplace_back(input_abstract[input_index++]);
} else {
if (item < 0) {
MS_LOG(EXCEPTION) << " the dynamic input size check error the index should be -1 or positive number but got "
MS_LOG(EXCEPTION) << "The dynamic input size check error the index should be -1 or positive number but got "
<< item;
}
AbstractBasePtrList dynamic_inputs_abs;
for (auto index = item; index > 0; --index) {
if (input_index >= input_abstract.size()) {
MS_LOG(EXCEPTION) << " index " << input_index << " is out of range in input abstract "
MS_LOG(EXCEPTION) << "Index " << input_index << " is out of range in input abstract "
<< input_abstract.size();
}
(void)dynamic_inputs_abs.emplace_back(input_abstract[input_index++]);
@ -877,7 +878,7 @@ AnfNodePtr SexpToNode(const BaseRef &sexp, const BaseRef &graph, PrimitiveVarMap
}
auto value_node = CreateValueNodeWithSexp(sexp, primitive_vars);
if (value_node == nullptr) {
MS_LOG(EXCEPTION) << "sexp cannot converted. sexp: " + sexp.ToString();
MS_LOG(EXCEPTION) << "Sexp cannot converted, sexp: " + sexp.ToString();
}
return value_node;
}

View File

@ -16,6 +16,7 @@
#include "backend/optimizer/mem_reuse/mem_reuse_checker.h"
#include <fstream>
#include "utils/trace_base.h"
namespace mindspore {
namespace memreuse {
@ -376,7 +377,8 @@ void MemReuseChecker::CheckNormalIR(const session::KernelGraph *graph) {
for (size_t i = 0; i < input_num; ++i) {
if (i + 1 >= node->inputs().size()) {
MS_LOG(EXCEPTION) << "Input index: " << i
<< " is larger than input number: " << AnfAlgo::GetInputTensorNum(node);
<< " is larger than input number: " << AnfAlgo::GetInputTensorNum(node)
<< ". trace: " << trace::DumpSourceLines(node);
}
auto real_input_index = AnfAlgo::GetRealInputIndex(node, i);
auto input = node->input(real_input_index + 1);

View File

@ -317,7 +317,7 @@ AnfNodePtr CommunicationOpFusion::CreateFusedCommunicationOp(const FuncGraphPtr
std::vector<AnfNodePtr> fusion_inputs = {NewValueNode(prim)};
// get all inputs of current segment
if (end_index >= communication_op_info.communication_op_nodes.size()) {
MS_LOG(EXCEPTION) << "end index out of communication_op_nodes size";
MS_LOG(EXCEPTION) << "End index is out of communication_op_nodes size";
}
std::vector<AnfNodePtr> orig_nodes;
for (size_t idx = start_index; idx <= end_index; ++idx) {
@ -440,7 +440,7 @@ bool CommunicationOpFusion::DoFusion(const FuncGraphPtr &func_graph, const Commu
kernel_graph->ReplaceInternalOutput(communication_op_node_item, new_communication_op, 0, LongToSize(offset));
}
if (!manager->Replace(communication_op_node_item, tuple_getitem)) {
MS_LOG(EXCEPTION) << "manager replace node failed";
MS_LOG(EXCEPTION) << "Manager replace node failed";
}
}
start_index = end_index + 1;

View File

@ -18,13 +18,14 @@
#include <vector>
#include "ir/primitive.h"
#include "utils/utils.h"
#include "utils/trace_base.h"
#include "backend/optimizer/common/helper.h"
namespace mindspore {
namespace opt {
namespace {
constexpr size_t kCNodePrimitiveIdx = 0;
} // namespace
}
const BaseRef ConvTransposeToConvBackpropInputPass::DefinePattern() const {
VarPtr Xs = std::make_shared<SeqVar>();
@ -39,8 +40,9 @@ const AnfNodePtr ConvTransposeToConvBackpropInputPass::Process(const FuncGraphPt
auto conv_transpose = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(conv_transpose);
if (conv_transpose->size() == kCNodePrimitiveIdx) {
MS_LOG(EXCEPTION) << "Invalid cnode " << node->DebugString() << " input size " << conv_transpose->size();
if (conv_transpose->inputs().empty()) {
MS_LOG(EXCEPTION) << "Cnode inputs should not be empty, cnode: " << node->DebugString()
<< ", trace: " << trace::DumpSourceLines(conv_transpose);
}
auto prim = GetValueNode<PrimitivePtr>(conv_transpose->input(kCNodePrimitiveIdx));

View File

@ -103,7 +103,7 @@ void AddMissingAttrs(const CNodePtr &cnode, kernel::OpImplyType imply_type,
auto default_value = attr->default_value();
if (default_value.empty()) {
MS_LOG(EXCEPTION) << "attr [" << attr_name << "] in the registration information of op [" << op_name
<< "] does not have a value.";
<< "] does not have a value. trace: " << trace::DumpSourceLines(cnode);
}
ParseAttrDefaultValue(op_name, attr_name, default_value, attr->type(), primitive);
need_update = true;

View File

@ -22,6 +22,7 @@
#include "backend/optimizer/common/helper.h"
#include "base/core_ops.h"
#include "utils/utils.h"
#include "utils/trace_base.h"
#include "backend/session/kernel_graph.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -192,7 +193,8 @@ const AnfNodePtr OptimizeDependence::Process(const FuncGraphPtr &func_graph, con
bool inputs_changed = false;
for (auto index : candidate_inputs) {
if (index >= new_inputs.size()) {
MS_LOG(EXCEPTION) << "Index is out of the size of " << cnode->DebugString() << " inputs.";
MS_LOG(EXCEPTION) << "Index is out of the size of " << cnode->DebugString()
<< " inputs. trace: " << trace::DumpSourceLines(cnode);
}
auto replace_node = GetConvertNode(func_graph, cnode, index);
if (replace_node != nullptr) {

View File

@ -25,6 +25,7 @@
#include <string>
#include "utils/hash_map.h"
#include "utils/ms_context.h"
#include "utils/trace_base.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/optimizer/trt_pass/trt_op_factory.h"
#include "vm/segment_runner.h"
@ -47,7 +48,7 @@ bool WeightCheck(const AnfNodePtr &node) {
for (auto index : iter->second) {
if (index >= real_inputs.size()) {
MS_LOG(EXCEPTION) << "index out of range. node: " << node->DebugString() << ", index: " << index
<< real_inputs.size();
<< real_inputs.size() << ". trace: " << trace::DumpSourceLines(node);
}
if (real_inputs[index].first->isa<Parameter>() &&

View File

@ -558,7 +558,7 @@ size_t AnfRuntimeAlgorithm::GetInputNum(const CNodePtr &cnode) {
MS_EXCEPTION_IF_NULL(cnode);
size_t input_num = cnode->size();
if (input_num == 0) {
MS_LOG(EXCEPTION) << "Cnode inputs size can't be zero";
MS_LOG(EXCEPTION) << "Cnode inputs size can't be zero. trace: " << trace::DumpSourceLines(cnode);
}
return input_num - 1;
}
@ -2122,7 +2122,8 @@ void AnfRuntimeAlgorithm::GetAllFatherRealNode(const AnfNodePtr &anf_node, std::
auto cnode = anf_node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
if (cnode->inputs().empty()) {
MS_LOG(EXCEPTION) << "Illegal null input of cnode(%s)" << anf_node->DebugString();
MS_LOG(EXCEPTION) << "Illegal null input of cnode(%s)" << anf_node->DebugString()
<< ". trace: " << trace::DumpSourceLines(cnode);
}
auto input0 = cnode->input(0);
if (IsPrimitive(input0, prim::kPrimMakeTuple)) {
@ -2136,7 +2137,7 @@ void AnfRuntimeAlgorithm::GetAllFatherRealNode(const AnfNodePtr &anf_node, std::
GetAllFatherRealNode(cnode->input(kRealInputNodeIndexInTupleGetItem), result, visited);
} else if (IsPrimitive(input0, prim::kPrimDepend)) {
if (cnode->inputs().size() != kDependInputSize) {
MS_LOG(EXCEPTION) << "Depend node must have 2 inputs!";
MS_LOG(EXCEPTION) << "Depend node must have 2 inputs! trace: " << trace::DumpSourceLines(cnode);
}
GetAllFatherRealNode(cnode->input(kRealInputIndexInDepend), result, visited);
GetAllFatherRealNode(cnode->input(kDependAttachNodeIndex), result, visited);
@ -2148,7 +2149,8 @@ void AnfRuntimeAlgorithm::InferShape(const CNodePtr &node, std::map<uint32_t, te
MS_LOG(INFO) << "InferShape start, node:" << node->DebugString();
auto inputs = node->inputs();
if (inputs.empty()) {
MS_LOG(EXCEPTION) << "Invalid inputs";
MS_LOG(EXCEPTION) << "Inputs should not be empty! Cnode: " << node->DebugString()
<< ". trace: " << trace::DumpSourceLines(node);
}
AbstractBasePtrList args_spec_list;
auto primitive = GetValueNode<PrimitivePtr>(inputs[0]);
@ -2182,7 +2184,8 @@ void AnfRuntimeAlgorithm::InferShape(const CNodePtr &node, std::map<uint32_t, te
auto base_shape = real_input->Shape();
if (!base_shape->isa<abstract::TupleShape>()) {
MS_LOG(EXCEPTION) << "Node:" << node->DebugString()
<< " input is a tuple_get_item but real input node shape is not a TupleShape";
<< " input is a tuple_get_item but real input node shape is not a TupleShape. trace: "
<< trace::DumpSourceLines(real_input);
}
auto abs = real_input->abstract()->cast<abstract::AbstractTuplePtr>();
MS_EXCEPTION_IF_NULL(abs);
@ -2430,7 +2433,7 @@ bool AnfRuntimeAlgorithm::IsCallNode(const AnfNodePtr &node) {
const auto &inputs = cnode->inputs();
if (inputs.empty() || inputs[0] == nullptr) {
MS_LOG(EXCEPTION) << "Invalid call node:" << node->DebugString();
MS_LOG(EXCEPTION) << "Invalid call node:" << node->DebugString() << ". trace: " << trace::DumpSourceLines(cnode);
}
return inputs[0]->isa<CNode>() || (inputs[0]->isa<ValueNode>() && IsValueNode<FuncGraph>(inputs[0]));
}

View File

@ -161,7 +161,7 @@ BaseRef GetNodeOutputTensorFromInputs(const session::KernelWithIndex &node_outpu
}
for (size_t input_idx = 0; input_idx < graph->inputs().size(); input_idx++) {
if (input_idx >= input_tensors.size()) {
MS_LOG(EXCEPTION) << "Input idx:" << input_idx << "out of range:" << input_tensors.size();
MS_LOG(EXCEPTION) << "Input idx:" << input_idx << " is out of range:" << input_tensors.size();
}
if (graph->inputs()[input_idx] == node) {
return input_tensors[input_idx];
@ -373,7 +373,7 @@ BaseRef CreateNodeOutputPlaceholder(const session::KernelWithIndex &node_output_
if (node->isa<Parameter>()) {
for (size_t input_idx = 0; input_idx < graph->inputs().size(); input_idx++) {
if (input_idx >= input_tensors.size()) {
MS_LOG(EXCEPTION) << "Input idx:" << input_idx << "out of range:" << input_tensors.size();
MS_LOG(EXCEPTION) << "Input idx:" << input_idx << " is out of range:" << input_tensors.size();
}
if (graph->inputs()[input_idx] == node) {
return input_tensors[input_idx];
@ -424,13 +424,15 @@ void CheckInputTensorShape(const TensorPtr &tensor, const CNodePtr &kernel, size
if (tensor_shape.size() != input_shape.size()) {
MS_LOG(EXCEPTION) << "The input tensor's shape size: " << tensor_shape.size()
<< " is not equal to expected size: " << input_shape.size() << " for input[" << input_index
<< "] of kernel: " << AnfAlgo::GetCNodeName(kernel);
<< "] of kernel: " << AnfAlgo::GetCNodeName(kernel)
<< ", trace: " << trace::DumpSourceLines(kernel);
}
for (size_t i = 0; i < tensor_shape.size(); i++) {
if (tensor_shape[i] < 0 || static_cast<size_t>(tensor_shape[i]) != input_shape[i]) {
MS_LOG(EXCEPTION) << "The input tensor's shape: " << tensor_shape
<< " is not equal to expected shape: " << input_shape << " for input[" << input_index
<< "] of kernel: " << AnfAlgo::GetCNodeName(kernel);
<< "] of kernel: " << AnfAlgo::GetCNodeName(kernel)
<< ", trace: " << trace::DumpSourceLines(kernel);
}
}
}
@ -1377,6 +1379,7 @@ void SessionBasic::HandleOpOutputs(const AnfNodePtr &kernel, const VectorRef &op
}
}
}
TensorPtr SessionBasic::GetValueNodeOutputTensor(const AnfNodePtr &node, size_t output_index) {
MS_EXCEPTION_IF_NULL(node);
if (!node->isa<ValueNode>()) {
@ -1840,7 +1843,8 @@ void SessionBasic::SetSummaryNodes(KernelGraph *graph) {
auto cnode = n->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
if (cnode->inputs().size() <= kSummaryGetItem) {
MS_LOG(EXCEPTION) << "The node Summary should have 2 inputs at least!";
MS_LOG(EXCEPTION) << "The node Summary should have 2 inputs at least, but got " << cnode->inputs().size() - 1
<< ". trace: " << trace::DumpSourceLines(cnode);
}
auto node = cnode->input(kSummaryGetItem);
MS_EXCEPTION_IF_NULL(node);

View File

@ -16,6 +16,7 @@
#include "backend/session/single_kernel_graph.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace session {
@ -45,7 +46,10 @@ std::shared_ptr<session::KernelGraph> SingleKernelGraph::ConstructKernelGraphBas
// get output dynamic shape info
AnfAlgo::SetNodeAttr(kAttrOutputIsDynamicShape, MakeValue(false), cnode);
if (output_dtypes.size() != output_shapes.size()) {
MS_LOG(EXCEPTION) << " output_dtypes size should equal to output_shapes size, the op name is: " << op_name;
MS_LOG(EXCEPTION)
<< "The size of output_dtypes should be equal to size of output_shapes, but got output_dtypes size: "
<< output_dtypes.size() << ", output_shapes size: " << output_shapes.size() << ". The op name is: " << op_name
<< ", trace: " << trace::DumpSourceLines(cnode);
}
AnfAlgo::SetOutputInferTypeAndShape(output_dtypes, output_shapes, cnode.get());
// set execution order