forked from mindspore-Ecosystem/mindspore
move key 'trace' to interface DumpSourceLines
This commit is contained in:
parent
8af53e9ae5
commit
618d4b7dfe
|
@ -641,8 +641,8 @@ bool IsWeightBoundary(const AnfNodePtr &node) {
|
|||
|
||||
std::vector<int64_t> GetReduceAttrAxis(const CNodePtr &cnode) {
|
||||
if (AnfAlgo::GetInputTensorNum(cnode) != 1 || AnfAlgo::GetOutputTensorNum(cnode) != 1) {
|
||||
MS_LOG(EXCEPTION) << "The reduce node [" << cnode->DebugString()
|
||||
<< "] is not single input or single output. trace: " << trace::DumpSourceLines(cnode);
|
||||
MS_LOG(EXCEPTION) << "The reduce node [" << cnode->DebugString() << "] is not single input or single output."
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
std::vector<int64_t> axis;
|
||||
auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(cnode, 0);
|
||||
|
|
|
@ -273,8 +273,8 @@ void HcomUtil::GetHcomGroup(NotNull<const AnfNodePtr &> anf_node, NotNull<std::s
|
|||
if (attr != nullptr) {
|
||||
*group = GetValue<std::string>(attr);
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Get Hcom Group Attr of Op:" << anf_node->fullname_with_scope()
|
||||
<< " failed. trace: " << trace::DumpSourceLines(anf_node);
|
||||
MS_LOG(EXCEPTION) << "Get Hcom Group Attr of Op:" << anf_node->fullname_with_scope() << " failed."
|
||||
<< trace::DumpSourceLines(anf_node);
|
||||
}
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -125,12 +125,11 @@ std::vector<int64_t> GetInputShape(const CNodePtr &cnode, size_t index) {
|
|||
auto shape_x = AnfAlgo::GetPrevNodeOutputInferShape(cnode, index);
|
||||
auto type_x = AnfAlgo::GetOutputInferDataType(cnode, index);
|
||||
if (type_x != TypeId::kNumberTypeInt64) {
|
||||
MS_LOG(EXCEPTION) << "Input x type must be int64, but got " << type_x
|
||||
<< ". trace: " << trace::DumpSourceLines(cnode);
|
||||
MS_LOG(EXCEPTION) << "Input x type must be int64, but got " << type_x << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
if (shape_x.size() != 1) {
|
||||
MS_LOG(EXCEPTION) << "Input" << index << " must be [1-D], but got " << shape_x.size()
|
||||
<< "-D. trace: " << trace::DumpSourceLines(cnode);
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
|
||||
size_t x_num = shape_x[0];
|
||||
|
@ -188,7 +187,7 @@ void DynamicBroadcastGradientArgsKernel::Execute() {
|
|||
auto input_num = AnfAlgo::GetInputTensorNum(cnode);
|
||||
if (input_num != kInputNum) {
|
||||
MS_LOG(EXCEPTION) << "Invalid input num, should be " << kInputNum << ", but got " << input_num
|
||||
<< ". trace: " << trace::DumpSourceLines(cnode);
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
|
||||
std::vector<std::vector<int64_t>> input_shapes(kInputNum);
|
||||
|
|
|
@ -30,14 +30,14 @@ std::vector<int64_t> GetInputValue(const CNodePtr &cnode, size_t index) {
|
|||
auto address_x = AnfAlgo::GetPrevNodeMutableOutputAddr(cnode, index);
|
||||
auto shape_x = AnfAlgo::GetPrevNodeOutputInferShape(cnode, index);
|
||||
if (shape_x.size() != 1) {
|
||||
MS_LOG(EXCEPTION) << "Input" << index << " must be [1-D], but got " << shape_x.size()
|
||||
<< "-D. trace: " << trace::DumpSourceLines(cnode);
|
||||
MS_LOG(EXCEPTION) << "Input" << index << " must be [1-D], but got " << shape_x.size() << "-D."
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
session::KernelWithIndex kernel_with_index = AnfAlgo::GetPrevNodeOutput(cnode, index);
|
||||
auto type_x = AnfAlgo::GetOutputInferDataType(kernel_with_index.first, kernel_with_index.second);
|
||||
if (type_x != TypeId::kNumberTypeInt64 && type_x != TypeId::kNumberTypeInt32) {
|
||||
MS_LOG(EXCEPTION) << "Input x type must be int64 or int32, but got " << TypeIdToType(type_x)
|
||||
<< ". trace: " << trace::DumpSourceLines(cnode);
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
|
||||
size_t x_num = shape_x[0];
|
||||
|
@ -71,7 +71,7 @@ void DynamicReshapeKernel::Execute() {
|
|||
auto input_num = AnfAlgo::GetInputTensorNum(cnode);
|
||||
if (input_num != kInputNum) {
|
||||
MS_LOG(EXCEPTION) << "Invalid input num, should be " << kInputNum << ", but got " << input_num
|
||||
<< ". trace: " << trace::DumpSourceLines(cnode);
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
|
||||
auto address_x = AnfAlgo::GetPrevNodeMutableOutputAddr(cnode, 0);
|
||||
|
|
|
@ -27,7 +27,7 @@ void DynamicShapeKernel::Execute() {
|
|||
auto input_num = AnfAlgo::GetInputTensorNum(cnode);
|
||||
if (input_num != 1) {
|
||||
MS_LOG(EXCEPTION) << "Op [" << cnode->DebugString() << "] has invalid input num, should be 1, but got " << input_num
|
||||
<< ". trace: " << trace::DumpSourceLines(cnode);
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
|
||||
auto prev_output_shape = AnfAlgo::GetPrevNodeOutputInferShape(cnode, 0);
|
||||
|
|
|
@ -283,8 +283,7 @@ void TbeKernelCompileManager::PrintCompileResult(const nlohmann::json &json) {
|
|||
PrintProcessLog(json);
|
||||
auto task_id = GetJsonValue<int>(json, kJobId);
|
||||
auto target_node = job_id_to_node_[task_id];
|
||||
MS_LOG(EXCEPTION) << json_name << " " << job_type
|
||||
<< " running failed, trace: " << trace::DumpSourceLines(target_node);
|
||||
MS_LOG(EXCEPTION) << json_name << " " << job_type << " running failed." << trace::DumpSourceLines(target_node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -490,8 +489,7 @@ void TbeKernelCompileManager::QueryProcess(const std::string &type, const std::s
|
|||
auto target_node = job_id_to_node_[target_status.target_job_id];
|
||||
ClearOldTask();
|
||||
MS_LOG(EXCEPTION) << "Single op compile failed, op: " << kernel_name
|
||||
<< "\n except_msg : " << target_status.except_msg
|
||||
<< "\n Trace: " << trace::DumpSourceLines(target_node);
|
||||
<< "\n except_msg: " << target_status.except_msg << trace::DumpSourceLines(target_node);
|
||||
} else {
|
||||
MS_LOG(INFO) << "Op " << kernel_name << " " << type << " failed,\n except_msg : " << target_status.except_msg;
|
||||
(void)success_job->emplace_back(target_status.target_job_id);
|
||||
|
@ -546,7 +544,7 @@ void TbeKernelCompileManager::GenKernelMod(const std::vector<CNodePtr> &node_lis
|
|||
kernel_pack = bin_map->SearchInFile(json_name);
|
||||
if (kernel_pack == nullptr) {
|
||||
MS_LOG(EXCEPTION) << "Can not find .json file or the .o file for op:" << json_name
|
||||
<< ", trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
}
|
||||
auto kernel_info_json = kernel_pack->kernel_json_info();
|
||||
|
@ -555,8 +553,7 @@ void TbeKernelCompileManager::GenKernelMod(const std::vector<CNodePtr> &node_lis
|
|||
|
||||
auto iter = kernel_io_size_info_.find(json_name);
|
||||
if (iter == kernel_io_size_info_.end() || iter->second.json_name != json_name) {
|
||||
MS_LOG(EXCEPTION) << "Can not find node io size info for: " << full_name
|
||||
<< ", trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Can not find node io size info for: " << full_name << trace::DumpSourceLines(node);
|
||||
}
|
||||
kernel_mod_ptr->SetInputSizeList(iter->second.input_size_list);
|
||||
kernel_mod_ptr->SetOutputSizeList(iter->second.output_size_list);
|
||||
|
@ -612,7 +609,7 @@ std::string TbeKernelCompileManager::ParseSelectAndCheckResult(const nlohmann::j
|
|||
auto all_logs = GetJsonValue<std::vector<nlohmann::json>>(json, kProcessInfo);
|
||||
auto except_msg = FilterExceptionMessage(all_logs);
|
||||
MS_LOG(EXCEPTION) << job_type << " running failed, op: " << json_name << "\nexception message:" << except_msg
|
||||
<< "\ntrace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
}
|
||||
MS_LOG(DEBUG) << json_name << " " << job_type << " success, get: " << res;
|
||||
|
@ -653,8 +650,8 @@ void TbeKernelCompileManager::DistributeCompileTask(const std::vector<CNodePtr>
|
|||
continue; // kernel mode exist, no need build
|
||||
}
|
||||
if (!json_creator->GenJson(node, &kernel_json)) {
|
||||
MS_LOG(EXCEPTION) << "Generate compile json failed, [" << node->fullname_with_scope()
|
||||
<< "], trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Generate compile json failed, [" << node->fullname_with_scope() << "]"
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
auto json_name = json_creator->GetJsonName();
|
||||
full_name_to_json_name_[full_name] = json_name;
|
||||
|
@ -770,7 +767,7 @@ std::string TbeKernelCompileManager::TbeOpSelectFormat(const CNodePtr &node) {
|
|||
nlohmann::json kernel_info;
|
||||
nlohmann::json select_json;
|
||||
if (!json_creator->GenJson(node, &kernel_info)) {
|
||||
MS_LOG(EXCEPTION) << "Gen select json failed. [" << full_name << "], trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Gen select json failed. [" << full_name << "]" << trace::DumpSourceLines(node);
|
||||
}
|
||||
JsonAssemble(kSelectFormat, kernel_info, &select_json);
|
||||
auto select_ret = DispatchCompileTask(select_json);
|
||||
|
@ -787,7 +784,7 @@ bool TbeKernelCompileManager::TbeOpCheckSupported(const CNodePtr &node) {
|
|||
nlohmann::json kernel_info;
|
||||
nlohmann::json check_json;
|
||||
if (!json_creator->GenJson(node, &kernel_info)) {
|
||||
MS_LOG(EXCEPTION) << "Gen check json failed.[" << full_name << "], trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Gen check json failed.[" << full_name << "]" << trace::DumpSourceLines(node);
|
||||
}
|
||||
JsonAssemble(kCheckSupport, kernel_info, &check_json);
|
||||
auto check_ret = DispatchCompileTask(check_json);
|
||||
|
|
|
@ -38,13 +38,13 @@ bool TbeKernelBroadCastSelecter::GetShapeInfo(SupportFormat *support_format) {
|
|||
auto dynamic_size_vec = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(cnode_ptr_, kAttrDynInputSizes);
|
||||
constexpr int64_t DYNAMIC_INPUT_NUM = 2;
|
||||
if (dynamic_size_vec.empty()) {
|
||||
MS_LOG(EXCEPTION) << "Node [" << AnfAlgo::GetCNodeName(cnode_ptr_)
|
||||
<< "]'s attr [dyn_input_sizes] is empty. trace: " << trace::DumpSourceLines(cnode_ptr_);
|
||||
MS_LOG(EXCEPTION) << "Node [" << AnfAlgo::GetCNodeName(cnode_ptr_) << "]'s attr [dyn_input_sizes] is empty"
|
||||
<< trace::DumpSourceLines(cnode_ptr_);
|
||||
}
|
||||
if (dynamic_size_vec[0] < DYNAMIC_INPUT_NUM) {
|
||||
MS_LOG(EXCEPTION) << "Node [" << AnfAlgo::GetCNodeName(cnode_ptr_)
|
||||
<< "]'s attr [dyn_input_sizes] value less than " << DYNAMIC_INPUT_NUM
|
||||
<< ". trace: " << trace::DumpSourceLines(cnode_ptr_);
|
||||
<< trace::DumpSourceLines(cnode_ptr_);
|
||||
}
|
||||
auto dynamic_input_shape0_ = AnfAlgo::GetPrevNodeOutputInferShape(cnode_ptr_, kInputIndex_0);
|
||||
PadScalarShape(&dynamic_input_shape0_);
|
||||
|
|
|
@ -52,7 +52,7 @@ static bool CheckStridedSlice(const CNodePtr &cnode) {
|
|||
MS_EXCEPTION_IF_NULL(input_value);
|
||||
if (!input_value->isa<Tensor>()) {
|
||||
MS_LOG(EXCEPTION) << "For 'StrideSlice', the first input value should be a tensor, but got "
|
||||
<< input_value->ToString() << ". trace: " << trace::DumpSourceLines(cnode);
|
||||
<< input_value->ToString() << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
input_dims = SizeToInt(input_value->cast<TensorPtr>()->shape().size());
|
||||
} else if (input->isa<CNode>() || input->isa<Parameter>()) {
|
||||
|
@ -60,12 +60,12 @@ static bool CheckStridedSlice(const CNodePtr &cnode) {
|
|||
MS_EXCEPTION_IF_NULL(input_abstract);
|
||||
if (!input_abstract->isa<AbstractTensor>()) {
|
||||
MS_LOG(EXCEPTION) << "For 'StrideSlice', the first input value should be a tensor, but got "
|
||||
<< input_abstract->ToString() << ". trace: " << trace::DumpSourceLines(cnode);
|
||||
<< input_abstract->ToString() << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
input_dims = SizeToInt(input_abstract->cast<AbstractTensorPtr>()->shape()->shape().size());
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "For 'StrideSlice', the first input node should be a 'ValueNode' or a 'CNode', but got "
|
||||
<< input->ToString() << ". trace: " << trace::DumpSourceLines(cnode);
|
||||
<< input->ToString() << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
const int base_number = 2;
|
||||
if (shrink_axis_mask >= std::pow<int, int>(base_number, input_dims - 1) && input_dims > 1) {
|
||||
|
@ -80,7 +80,7 @@ static bool CheckTopK(const CNodePtr &cnode) {
|
|||
auto sorted = AnfAlgo::GetNodeAttr<bool>(cnode, kAttrSorted);
|
||||
return sorted;
|
||||
}
|
||||
MS_LOG(EXCEPTION) << "For 'TopK', it should be have attribute 'sorted'. trace: " << trace::DumpSourceLines(cnode);
|
||||
MS_LOG(EXCEPTION) << "For 'TopK', it should be have attribute 'sorted'." << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
|
||||
bool TbePropertyChecker::CheckTbeProperties(const mindspore::CNodePtr &cnode) {
|
||||
|
|
|
@ -144,7 +144,7 @@ AnfNodePtr InsertTransOpForSingleOutput(const FuncGraphPtr &func_graph, const An
|
|||
std::vector<size_t> origin_shape = AnfAlgo::GetOutputInferShape(node, 0);
|
||||
if (output_format == kOpFormat_NC1KHKWHWC0) {
|
||||
MS_LOG(EXCEPTION) << "Got the hw format " << output_format << "when insert the transdata node "
|
||||
<< node->DebugString() << " trace: " << trace::DumpSourceLines(node);
|
||||
<< node->DebugString() << trace::DumpSourceLines(node);
|
||||
}
|
||||
if (NeedInsertTransData(origin_shape, output_format)) {
|
||||
MS_LOG(DEBUG) << "Inserted transdata " << output_format << " to default , index :0";
|
||||
|
@ -173,7 +173,7 @@ AnfNodePtr InsertTransOpForMultipleOutput(const FuncGraphPtr &func_graph, const
|
|||
std::string output_format = AnfAlgo::GetOutputFormat(node, output_idx);
|
||||
if (output_format == kOpFormat_NC1KHKWHWC0) {
|
||||
MS_LOG(EXCEPTION) << "Got the special format" << output_format << " when insert the transdata node "
|
||||
<< node->DebugString() << " trace: " << trace::DumpSourceLines(node);
|
||||
<< node->DebugString() << trace::DumpSourceLines(node);
|
||||
}
|
||||
auto tuple_getitem = CreatTupleGetItemNode(func_graph, node, output_idx);
|
||||
std::vector<size_t> origin_shape = AnfAlgo::GetOutputInferShape(node, output_idx);
|
||||
|
|
|
@ -46,8 +46,7 @@ bool IsPartOutputsOfHcclOp(const AnfNodePtr &node, const CNodePtr &cur_hccl, con
|
|||
auto &node_users = manager->node_users();
|
||||
auto iter = node_users.find(prev_hccl_op);
|
||||
if (iter == node_users.end()) {
|
||||
MS_LOG(EXCEPTION) << "Node has no output in manager"
|
||||
<< " trace: " << trace::DumpSourceLines(cur_hccl);
|
||||
MS_LOG(EXCEPTION) << "Node has no output in manager" << trace::DumpSourceLines(cur_hccl);
|
||||
}
|
||||
for (const auto &node_index : iter->second) {
|
||||
AnfNodePtr output = node_index.first;
|
||||
|
@ -81,8 +80,7 @@ AnfNodePtr InsertTensorMoveForCascade::InsertTensorMove(const FuncGraphPtr &grap
|
|||
if (IsPartOutputsOfHcclOp(input, hccl_node, graph)) {
|
||||
auto tensor_move = CreateTensorMoveOp(graph, input);
|
||||
if (tensor_move == nullptr) {
|
||||
MS_LOG(EXCEPTION) << "Create tensor_move op failed."
|
||||
<< " trace: " << trace::DumpSourceLines(hccl_node);
|
||||
MS_LOG(EXCEPTION) << "Create tensor_move op failed." << trace::DumpSourceLines(hccl_node);
|
||||
}
|
||||
if (AnfAlgo::IsNodeDynamicShape(input)) {
|
||||
AnfAlgo::SetNodeAttr(kAttrIsDynamicShape, MakeValue(true), tensor_move);
|
||||
|
|
|
@ -47,7 +47,7 @@ bool IsNodeOutPutUsedByOtherRealKernel(const FuncGraphPtr &graph, const AnfNodeP
|
|||
auto &node_users = manager->node_users();
|
||||
auto iter = node_users.find(input);
|
||||
if (iter == node_users.end()) {
|
||||
MS_LOG(EXCEPTION) << "node has no output in manager, trace: " << trace::DumpSourceLines(input);
|
||||
MS_LOG(EXCEPTION) << "node has no output in manager." << trace::DumpSourceLines(input);
|
||||
}
|
||||
auto user_items = iter->second;
|
||||
if (user_items.size() == 1) {
|
||||
|
|
|
@ -41,12 +41,12 @@ const std::map<std::string, ConvertFunction> kReduceConvertMap = {{kOpFormat_FRA
|
|||
void SafeCheckFunction(const CNodePtr &cnode, const std::vector<int64_t> &reduce_axis) {
|
||||
MS_EXCEPTION_IF_NULL(cnode);
|
||||
if (reduce_axis.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The node " << cnode->DebugString()
|
||||
<< "'s reduce axis got a empty vector, trace: " << trace::DumpSourceLines(cnode);
|
||||
MS_LOG(EXCEPTION) << "The node " << cnode->DebugString() << "'s reduce axis got a empty vector"
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
if (AnfAlgo::GetInputTensorNum(cnode) != 1 || AnfAlgo::GetOutputTensorNum(cnode) != 1) {
|
||||
MS_LOG(EXCEPTION) << "The kind of reduce node [" << cnode->DebugString()
|
||||
<< "] is not single input or single output. trace: " << trace::DumpSourceLines(cnode);
|
||||
<< "] is not single input or single output." << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
for (auto elem : reduce_axis) {
|
||||
if (elem > kAxisDim) {
|
||||
|
|
|
@ -48,7 +48,7 @@ const AnfNodePtr ConvertUnSupportNodeToAICPU::Process(const mindspore::FuncGraph
|
|||
AnfAlgo::SetNodeAttr(kAttrIsAICPUKernel, MakeValue(true), node);
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Kernel " << kernel_builder_info->ToString() << "is not supported in AiCPU & AiCore : node ["
|
||||
<< node->DebugString() << "]. trace:" << trace::DumpSourceLines(node);
|
||||
<< node->DebugString() << "]" << trace::DumpSourceLines(node);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -181,8 +181,7 @@ AnfNodePtr MergeCastToNextOp(const FuncGraphPtr &graph, const CNodePtr &node, co
|
|||
AnfAlgo::SetSelectKernelBuildInfo(*alternative_kernel_info, next_cnode.get());
|
||||
if (AnfAlgo::GetInputTensorNum(node) < kCastInputTensorNum) {
|
||||
MS_LOG(EXCEPTION) << "Op[" << node->DebugString() << "] has wrong input num:" << AnfAlgo::GetInputTensorNum(node)
|
||||
<< ", should be not less than " << kCastInputTensorNum
|
||||
<< ". trace: " << trace::DumpSourceLines(node);
|
||||
<< ", should be not less than " << kCastInputTensorNum << trace::DumpSourceLines(node);
|
||||
}
|
||||
return node->input(1);
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ AnfNodePtr BatchNormBertFission::CreateBNTrainingReduce(const FuncGraphPtr &func
|
|||
MS_EXCEPTION_IF_NULL(bn_cnode);
|
||||
if (bn_cnode->inputs().size() < kBatchNormRealInputNum + 1) {
|
||||
MS_LOG(EXCEPTION) << "The input size of node " + bn_cnode->DebugString() + " is less than "
|
||||
<< (kBatchNormRealInputNum + 1) << " trace: " << trace::DumpSourceLines(bn);
|
||||
<< (kBatchNormRealInputNum + 1) << trace::DumpSourceLines(bn);
|
||||
}
|
||||
std::vector<AnfNodePtr> bn_training_reduce_inputs = {
|
||||
NewValueNode(std::make_shared<Primitive>(kBNTrainingReduceOpName)), bn_cnode->input(kIndex1)};
|
||||
|
@ -94,12 +94,11 @@ AnfNodePtr BatchNormBertFission::CreateBNTrainingUpdateV2(
|
|||
MS_EXCEPTION_IF_NULL(bn_cnode);
|
||||
if (bn_cnode->inputs().size() < kBatchNormRealInputNum + 1) {
|
||||
MS_LOG(EXCEPTION) << "The input size of node " + bn_cnode->DebugString() + " is less than "
|
||||
<< (kBatchNormRealInputNum + 1) << " trace: " << trace::DumpSourceLines(bn);
|
||||
<< (kBatchNormRealInputNum + 1) << trace::DumpSourceLines(bn);
|
||||
}
|
||||
if (bn_training_reduce_outputs.size() != kBNTrainingReduceOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "The output size of node bn_training_reduce must be " << kBNTrainingReduceOutputNum
|
||||
<< ", but it is " << bn_training_reduce_outputs.size()
|
||||
<< " trace: " << trace::DumpSourceLines(bn);
|
||||
<< ", but it is " << bn_training_reduce_outputs.size() << trace::DumpSourceLines(bn);
|
||||
}
|
||||
std::vector<AnfNodePtr> bn_training_update_v2_inputs = {
|
||||
NewValueNode(std::make_shared<Primitive>(kBNTrainingUpdateV2OpName)),
|
||||
|
@ -115,7 +114,7 @@ AnfNodePtr BatchNormBertFission::CreateBNTrainingUpdateV2(
|
|||
MS_EXCEPTION_IF_NULL(bn_abstract_tuple);
|
||||
if (bn_abstract_tuple->elements().size() != kBnOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "The abstract size of node bn must be " << kBnOutputNum << ", but it is "
|
||||
<< bn_abstract_tuple->elements().size() << " trace: " << trace::DumpSourceLines(bn);
|
||||
<< bn_abstract_tuple->elements().size() << trace::DumpSourceLines(bn);
|
||||
}
|
||||
std::vector<AbstractBasePtr> abstract_list{bn_abstract_tuple->elements()[kIndex0],
|
||||
bn_abstract_tuple->elements()[kIndex3],
|
||||
|
@ -159,8 +158,7 @@ const AnfNodePtr BatchNormBertFission::Process(const FuncGraphPtr &func_graph, c
|
|||
&bn_training_update_v2_outputs);
|
||||
if (bn_training_update_v2_outputs.size() != kBNTrainingUpdateV2OutputNum) {
|
||||
MS_LOG(EXCEPTION) << "The output size of node bn_training_reduce must be " << kBNTrainingUpdateV2OutputNum
|
||||
<< ", but it is " << bn_training_update_v2_outputs.size()
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< ", but it is " << bn_training_update_v2_outputs.size() << trace::DumpSourceLines(node);
|
||||
}
|
||||
auto manager = func_graph->manager();
|
||||
MS_EXCEPTION_IF_NULL(manager);
|
||||
|
|
|
@ -64,17 +64,17 @@ AnfNodePtr BatchNormGradInferFission::CreateBNInferGrad(const FuncGraphPtr &func
|
|||
auto iter_input0 = (*equiv).find(input0_var_);
|
||||
if (iter_input0 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the input0 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(bn_grad);
|
||||
<< trace::DumpSourceLines(bn_grad);
|
||||
}
|
||||
auto iter_input2 = (*equiv).find(input2_var_);
|
||||
if (iter_input2 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the input2 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(bn_grad);
|
||||
<< trace::DumpSourceLines(bn_grad);
|
||||
}
|
||||
auto iter_input4 = (*equiv).find(input4_var_);
|
||||
if (iter_input4 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the input4 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(bn_grad);
|
||||
<< trace::DumpSourceLines(bn_grad);
|
||||
}
|
||||
std::vector<AnfNodePtr> bn_infer_grad_inputs = {
|
||||
NewValueNode(std::make_shared<Primitive>(kBNInferGradOpName)), utils::cast<AnfNodePtr>(iter_input0->second),
|
||||
|
@ -86,7 +86,7 @@ AnfNodePtr BatchNormGradInferFission::CreateBNInferGrad(const FuncGraphPtr &func
|
|||
MS_EXCEPTION_IF_NULL(bn_grad_abstract_tuple);
|
||||
if (bn_grad_abstract_tuple->elements().empty()) {
|
||||
MS_LOG(EXCEPTION) << "The abstract tuple of node " << bn_grad->DebugString() << "should not be empty"
|
||||
<< " trace: " << trace::DumpSourceLines(bn_grad);
|
||||
<< trace::DumpSourceLines(bn_grad);
|
||||
}
|
||||
bn_infer_grad->set_abstract(bn_grad_abstract_tuple->elements()[0]);
|
||||
AnfAlgo::CopyNodeAttr(kAttrEpsilon, bn_grad, bn_infer_grad);
|
||||
|
@ -104,22 +104,22 @@ AnfNodePtr BatchNormGradInferFission::CreateBNTrainingUpdateGrad(const FuncGraph
|
|||
auto iter_input0 = (*equiv).find(input0_var_);
|
||||
if (iter_input0 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the input0 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(bn_grad);
|
||||
<< trace::DumpSourceLines(bn_grad);
|
||||
}
|
||||
auto iter_input1 = (*equiv).find(input1_var_);
|
||||
if (iter_input1 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the input1 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(bn_grad);
|
||||
<< trace::DumpSourceLines(bn_grad);
|
||||
}
|
||||
auto iter_input3 = (*equiv).find(input3_var_);
|
||||
if (iter_input3 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the input3 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(bn_grad);
|
||||
<< trace::DumpSourceLines(bn_grad);
|
||||
}
|
||||
auto iter_input4 = (*equiv).find(input4_var_);
|
||||
if (iter_input4 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the input4 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(bn_grad);
|
||||
<< trace::DumpSourceLines(bn_grad);
|
||||
}
|
||||
std::vector<AnfNodePtr> bn_training_update_grad_inputs = {
|
||||
NewValueNode(std::make_shared<Primitive>(kBNTrainingUpdateGradOpName)),
|
||||
|
|
|
@ -59,8 +59,7 @@ void BatchNormGradSplit::CreateOutputsOfReduceGrad(const FuncGraphPtr &graph, co
|
|||
CheckCNodeInputSize(bn_grad_node, kBNGradInputTensorNum);
|
||||
if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Outputs of BNTrainingReduceGrad has wrong size, should be " << kBNTrainingUpdateGradOutputNum
|
||||
<< ", but got " << bn_update_grad_outputs.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(bn_grad_node);
|
||||
<< ", but got " << bn_update_grad_outputs.size() << trace::DumpSourceLines(bn_grad_node);
|
||||
}
|
||||
std::vector<AnfNodePtr> bn_reduce_grad_inputs = {
|
||||
NewValueNode(std::make_shared<Primitive>(kBNTrainingReduceGradOpName)),
|
||||
|
@ -99,8 +98,7 @@ const AnfNodePtr BatchNormGradSplit::Process(const FuncGraphPtr &func_graph, con
|
|||
auto primitive = AnfAlgo::GetCNodePrimitive(cnode);
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
if (!primitive->HasAttr(kAttrIsTraining)) {
|
||||
MS_LOG(INFO) << "Op BatchNormGrad must have attrs of is_training"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(INFO) << "Op BatchNormGrad must have attrs of is_training" << trace::DumpSourceLines(node);
|
||||
return nullptr;
|
||||
}
|
||||
if (!AnfAlgo::GetNodeAttr<bool>(cnode, kAttrIsTraining)) {
|
||||
|
@ -112,14 +110,14 @@ const AnfNodePtr BatchNormGradSplit::Process(const FuncGraphPtr &func_graph, con
|
|||
CreateOutputsOfUpdateGrad(func_graph, cnode, &bn_update_grad_outputs);
|
||||
if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Outputs of bn_update_grad has wrong size, should be " << kBNTrainingUpdateGradOutputNum
|
||||
<< ", but got " << bn_update_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(node);
|
||||
<< ", but got " << bn_update_grad_outputs.size() << trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
std::vector<AnfNodePtr> bn_reduce_grad_outputs;
|
||||
CreateOutputsOfReduceGrad(func_graph, cnode, bn_update_grad_outputs, &bn_reduce_grad_outputs);
|
||||
if (bn_reduce_grad_outputs.size() != kSingleOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Outputs of bn_reduce_grad has wrong size, should be " << kSingleOutputNum << ", but got "
|
||||
<< bn_reduce_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(node);
|
||||
<< bn_reduce_grad_outputs.size() << trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), bn_reduce_grad_outputs[0],
|
||||
|
|
|
@ -59,8 +59,7 @@ void BnGradSplit::CreateOutputsOfReduceGrad(const FuncGraphPtr &graph, const CNo
|
|||
CheckCNodeInputSize(bn_grad_node, kBNGradInputTensorNum);
|
||||
if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Outputs of bn_update_grad has wrong size, should be " << kBNTrainingUpdateGradOutputNum
|
||||
<< ", but got " << bn_update_grad_outputs.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(bn_grad_node);
|
||||
<< ", but got " << bn_update_grad_outputs.size() << trace::DumpSourceLines(bn_grad_node);
|
||||
}
|
||||
std::vector<AnfNodePtr> bn_reduce_grad_inputs = {
|
||||
NewValueNode(std::make_shared<Primitive>(kBNTrainingReduceGradOpName)),
|
||||
|
@ -90,14 +89,14 @@ CNodePtr BnGradSplit::BNGradSplitForTBE(const FuncGraphPtr &func_graph, const CN
|
|||
CreateOutputsOfUpdateGrad(func_graph, cnode, &bn_update_grad_outputs);
|
||||
if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Outputs of bn_update_grad has wrong size, should be " << kBNTrainingUpdateGradOutputNum
|
||||
<< ", but got " << bn_update_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(cnode);
|
||||
<< ", but got " << bn_update_grad_outputs.size() << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
|
||||
std::vector<AnfNodePtr> bn_reduce_grad_outputs;
|
||||
CreateOutputsOfReduceGrad(func_graph, cnode, bn_update_grad_outputs, &bn_reduce_grad_outputs);
|
||||
if (bn_reduce_grad_outputs.size() != 1) {
|
||||
MS_LOG(EXCEPTION) << "Outputs of bn_reduce_grad has wrong size, should be " << 1 << ", but got "
|
||||
<< bn_reduce_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(cnode);
|
||||
<< bn_reduce_grad_outputs.size() << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
|
||||
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), bn_reduce_grad_outputs[0],
|
||||
|
@ -115,7 +114,7 @@ CNodePtr SyncBnGradSplit::SyncBNGradSplitForTBE(const FuncGraphPtr &func_graph,
|
|||
CreateOutputsOfUpdateGrad(func_graph, cnode, &bn_update_grad_outputs);
|
||||
if (bn_update_grad_outputs.size() != kBNTrainingUpdateGradOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Outputs of bn_update_grad has wrong size, should be " << kBNTrainingUpdateGradOutputNum
|
||||
<< ", but got " << bn_update_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(cnode);
|
||||
<< ", but got " << bn_update_grad_outputs.size() << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
|
||||
std::vector<AnfNodePtr> allreduce_mul_outputs;
|
||||
|
@ -128,7 +127,7 @@ CNodePtr SyncBnGradSplit::SyncBNGradSplitForTBE(const FuncGraphPtr &func_graph,
|
|||
CreateOutputsOfReduceGrad(func_graph, cnode, allreduce_mul_outputs, &bn_reduce_grad_outputs);
|
||||
if (bn_reduce_grad_outputs.size() != 1) {
|
||||
MS_LOG(EXCEPTION) << "Outputs of bn_reduce_grad has wrong size, should be " << 1 << ", but got "
|
||||
<< bn_reduce_grad_outputs.size() << ". trace: " << trace::DumpSourceLines(cnode);
|
||||
<< bn_reduce_grad_outputs.size() << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
|
||||
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), bn_reduce_grad_outputs[0],
|
||||
|
|
|
@ -74,8 +74,7 @@ AnfNodePtr BnSplit::CreateOutputsOfBNTrainingUpdate(const FuncGraphPtr &graph, c
|
|||
MS_EXCEPTION_IF_NULL(bn_cnode);
|
||||
CheckCNodeInputSize(bn_cnode, kBnInputTensorNum);
|
||||
if (bn_training_reduce_outputs.size() != kBNTrainingReduceOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "BN1 outputs has wrong input size"
|
||||
<< " trace: " << trace::DumpSourceLines(bn_cnode);
|
||||
MS_LOG(EXCEPTION) << "BN1 outputs has wrong input size" << trace::DumpSourceLines(bn_cnode);
|
||||
}
|
||||
// the inputs of BNTrainingUpdate are from the outputs of BNTrainingReduce and the inputs of BN
|
||||
std::vector<AnfNodePtr> bn_training_update_inputs = {
|
||||
|
@ -118,8 +117,7 @@ AnfNodePtr BnSplit::SplitBatchNormForTBE(const FuncGraphPtr &func_graph, const A
|
|||
return nullptr;
|
||||
}
|
||||
if (bn_training_reduce_outputs.size() != kBN1OutputNum) {
|
||||
MS_LOG(EXCEPTION) << "make outputs of op BNTrainingReduce fail"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "make outputs of op BNTrainingReduce fail" << trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
// Create BNTrainingUpdate node
|
||||
|
@ -143,8 +141,7 @@ AnfNodePtr SyncBnSplit::SyncBNSplitForTBE(const FuncGraphPtr &func_graph, const
|
|||
return nullptr;
|
||||
}
|
||||
if (bn_training_reduce_outputs.size() != kBN1OutputNum) {
|
||||
MS_LOG(EXCEPTION) << "make outputs of op BNTrainingReduce fail"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "make outputs of op BNTrainingReduce fail" << trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
std::vector<AnfNodePtr> allreduce_mul_outputs;
|
||||
|
@ -161,13 +158,13 @@ AnfNodePtr CreateValueNodeOfDeviceNumReciprocal(const FuncGraphPtr &graph, const
|
|||
MS_EXCEPTION_IF_NULL(graph);
|
||||
MS_EXCEPTION_IF_NULL(sync_bn_cnode);
|
||||
if (!AnfAlgo::HasNodeAttr(kDeviceNum, sync_bn_cnode)) {
|
||||
MS_LOG(EXCEPTION) << "The node [" << sync_bn_cnode->DebugString()
|
||||
<< "] does not have attr device_num. trace: " << trace::DumpSourceLines(sync_bn_cnode);
|
||||
MS_LOG(EXCEPTION) << "The node [" << sync_bn_cnode->DebugString() << "] does not have attr device_num."
|
||||
<< trace::DumpSourceLines(sync_bn_cnode);
|
||||
}
|
||||
auto device_num = AnfAlgo::GetNodeAttr<int64_t>(sync_bn_cnode, kDeviceNum);
|
||||
if (device_num == 0) {
|
||||
MS_LOG(EXCEPTION) << "The device_num attr of node [" << sync_bn_cnode->DebugString()
|
||||
<< "] should not be 0. trace: " << trace::DumpSourceLines(sync_bn_cnode);
|
||||
MS_LOG(EXCEPTION) << "The device_num attr of node [" << sync_bn_cnode->DebugString() << "] should not be 0."
|
||||
<< trace::DumpSourceLines(sync_bn_cnode);
|
||||
}
|
||||
MS_LOG(INFO) << "device_num value: " << device_num;
|
||||
const float device_num_reciprocal = 1.0 / device_num;
|
||||
|
@ -226,8 +223,8 @@ AnfNodePtr CreateAllReduceAndMul(const FuncGraphPtr &graph, const AnfNodePtr &al
|
|||
auto sync_bn_opname = sync_bn_cnode->fullname_with_scope();
|
||||
auto opid_pos = sync_bn_opname.rfind("-op");
|
||||
if (opid_pos == std::string::npos || opid_pos + kPositionOffset >= sync_bn_opname.size()) {
|
||||
MS_LOG(EXCEPTION) << "Op[" << sync_bn_cnode->DebugString()
|
||||
<< "] has no opid. trace: " << trace::DumpSourceLines(sync_bn_cnode);
|
||||
MS_LOG(EXCEPTION) << "Op[" << sync_bn_cnode->DebugString() << "] has no opid."
|
||||
<< trace::DumpSourceLines(sync_bn_cnode);
|
||||
return nullptr;
|
||||
}
|
||||
int64_t opid = std::stol(sync_bn_opname.substr(opid_pos + kPositionOffset));
|
||||
|
|
|
@ -53,7 +53,7 @@ AnfNodePtr ConcatFission::CreateNewConcat(const FuncGraphPtr &func_graph, const
|
|||
if (axis_from_attr < 0 || axis_from_attr >= SizeToLong(output_shape.size()) ||
|
||||
axis_from_attr >= SizeToLong(input_shape.size())) {
|
||||
MS_LOG(EXCEPTION) << "The concat_dim value " << axis_from_attr << "is out of range"
|
||||
<< " trace: " << trace::DumpSourceLines(origin_concat_cnode);
|
||||
<< trace::DumpSourceLines(origin_concat_cnode);
|
||||
}
|
||||
auto axis = LongToSize(axis_from_attr);
|
||||
output_shape[axis] = 0;
|
||||
|
|
|
@ -270,7 +270,7 @@ AnfNodePtr DynamicGRUV2GradFission::AddHConcatNode(const FuncGraphPtr &func_grap
|
|||
CreateMultipleOutputsOfAnfNode(func_graph, splitv, kSplitVOutputNum, &splitv_outputs);
|
||||
if (splitv_outputs.size() != kSplitVOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Create outputs of node " << splitv->DebugString() << " failed"
|
||||
<< " trace: " << trace::DumpSourceLines(splitv);
|
||||
<< trace::DumpSourceLines(splitv);
|
||||
}
|
||||
std::vector<AnfNodePtr> concat_inputs = {NewValueNode(std::make_shared<Primitive>(prim::kPrimConcat->name()))};
|
||||
auto init_h_reshape = CreateHReshape(func_graph, dynamic_gru_v2_grad_cnode->input(input_index["init_h"]));
|
||||
|
|
|
@ -398,7 +398,7 @@ AnfNodePtr DynamicRnnGradFissionV2::CreateHConcat(const FuncGraphPtr &func_graph
|
|||
CreateMultipleOutputsOfAnfNode(func_graph, splitv, kSplitVOutputNum, &splitv_outputs);
|
||||
if (splitv_outputs.size() != kSplitVOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Create outputs of node " << splitv->DebugString() << " failed"
|
||||
<< " trace: " << trace::DumpSourceLines(dynamic_rnn_grad_cnode);
|
||||
<< trace::DumpSourceLines(dynamic_rnn_grad_cnode);
|
||||
}
|
||||
auto origin_input4 = dynamic_rnn_grad_cnode->input(kIndex5);
|
||||
auto origin_input4_shape = AnfAlgo::GetOutputInferShape(origin_input4, 0);
|
||||
|
|
|
@ -65,19 +65,19 @@ CNodePtr GatherV2DsFission::CreatePad(const FuncGraphPtr &graph, const CNodePtr
|
|||
auto param_abstract_shape = origin_node->input(1)->Shape();
|
||||
MS_EXCEPTION_IF_NULL(param_abstract_shape);
|
||||
if (!param_abstract_shape->isa<abstract::Shape>()) {
|
||||
MS_LOG(EXCEPTION) << "The node [" << origin_node->DebugString()
|
||||
<< "]'s first input has wrong shape type. trace: " << trace::DumpSourceLines(origin_node);
|
||||
MS_LOG(EXCEPTION) << "The node [" << origin_node->DebugString() << "]'s first input has wrong shape type."
|
||||
<< trace::DumpSourceLines(origin_node);
|
||||
}
|
||||
auto param_dyn_shape = param_abstract_shape->cast<abstract::ShapePtr>();
|
||||
ShapeVector shape(param_dyn_shape->shape());
|
||||
if (shape.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The shape of node [" << origin_node->DebugString()
|
||||
<< "]'s first input is empty. trace: " << trace::DumpSourceLines(origin_node);
|
||||
MS_LOG(EXCEPTION) << "The shape of node [" << origin_node->DebugString() << "]'s first input is empty."
|
||||
<< trace::DumpSourceLines(origin_node);
|
||||
}
|
||||
if (shape[shape.size() - 1] == -1) {
|
||||
MS_LOG(EXCEPTION) << "The node [" << origin_node->DebugString()
|
||||
<< "]'s first input should not be dynamic, but got shape:" << shape
|
||||
<< ". trace: " << trace::DumpSourceLines(origin_node);
|
||||
<< trace::DumpSourceLines(origin_node);
|
||||
}
|
||||
shape[shape.size() - 1] = SizeToLong(pad_dim_size);
|
||||
auto type_id = AnfAlgo::GetPrevNodeOutputInferDataType(origin_node, 0);
|
||||
|
@ -118,7 +118,7 @@ CNodePtr GatherV2DsFission::CreateGatherV2Ds(const FuncGraphPtr &graph, const CN
|
|||
MS_EXCEPTION_IF_NULL(pad);
|
||||
if (origin_node->size() != kGatherInputNum) {
|
||||
MS_LOG(EXCEPTION) << "In dynamic shape scene, gatherv2 should have 3 inputs, but got " << origin_node->size()
|
||||
<< ". trace: " << trace::DumpSourceLines(origin_node);
|
||||
<< trace::DumpSourceLines(origin_node);
|
||||
}
|
||||
std::vector<AnfNodePtr> gatherv2_inputs = {NewValueNode(std::make_shared<Primitive>(prim::kPrimGather->name())), pad,
|
||||
origin_node->input(kGatherInputIndicesIndex),
|
||||
|
|
|
@ -53,8 +53,7 @@ CNodePtr LarsV2Fission::CreateLarsV2Update(const FuncGraphPtr &graph, const CNod
|
|||
MS_EXCEPTION_IF_NULL(graph);
|
||||
MS_EXCEPTION_IF_NULL(lars_v2);
|
||||
if (square_sum_all_outputs.size() != kSquareSumOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "square_sum_all_outputs' size not equal 2"
|
||||
<< " trace: " << trace::DumpSourceLines(lars_v2);
|
||||
MS_LOG(EXCEPTION) << "square_sum_all_outputs' size not equal 2" << trace::DumpSourceLines(lars_v2);
|
||||
}
|
||||
CheckCNodeInputSize(lars_v2, kLarsV2InputTensorNum);
|
||||
std::vector<AnfNodePtr> inputs = {NewValueNode(std::make_shared<Primitive>(kLarsV2UpdateOpName)),
|
||||
|
|
|
@ -111,8 +111,7 @@ const AnfNodePtr LayerNormGradSplit::Process(const FuncGraphPtr &graph, const An
|
|||
std::vector<AnfNodePtr> layer_norm_x_backprop_outputs;
|
||||
CreateOutputsOfLayerNormXBackpropV2(graph, cnode, &layer_norm_x_backprop_outputs, is_dynamic_shape);
|
||||
if (layer_norm_x_backprop_outputs.size() != kLayerNormXBackpropV2OutputNum) {
|
||||
MS_LOG(EXCEPTION) << "layer_norm_grad_outputs has wrong size"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "layer_norm_grad_outputs has wrong size" << trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
// create layer_norm_beta_gamma_backprop
|
||||
|
@ -120,8 +119,7 @@ const AnfNodePtr LayerNormGradSplit::Process(const FuncGraphPtr &graph, const An
|
|||
CreateOutputsOfLayerNormBetaGammaBackpropV2(graph, cnode, layer_norm_x_backprop_outputs[1],
|
||||
&layer_norm_beta_gamma_backprop_outputs, is_dynamic_shape);
|
||||
if (layer_norm_beta_gamma_backprop_outputs.size() != kLayerNormBetaGammaBackpropOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "layer_norm_beta_gamma_outputs has wrong size"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "layer_norm_beta_gamma_outputs has wrong size" << trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), layer_norm_x_backprop_outputs[0],
|
||||
|
|
|
@ -40,7 +40,7 @@ tensor::TensorPtr CreateTensor(const AnfNodePtr &node) {
|
|||
}
|
||||
if (ksize.size() != kKernelSizeNum) {
|
||||
MS_LOG(EXCEPTION) << "kernel_size of MaxPool3DGradGrad must be five, but got " << ksize
|
||||
<< ". trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
int64_t d = ksize[kDim2];
|
||||
int64_t h = ksize[kDim3];
|
||||
|
|
|
@ -46,7 +46,7 @@ AnfNodePtr PackFission::CreateNewPack(const FuncGraphPtr &func_graph, const CNod
|
|||
}
|
||||
if (axis < 0) {
|
||||
MS_LOG(EXCEPTION) << "The concat_dim value " << axis << "is out of range"
|
||||
<< " trace: " << trace::DumpSourceLines(origin_pack_cnode);
|
||||
<< trace::DumpSourceLines(origin_pack_cnode);
|
||||
}
|
||||
std::vector<size_t> new_shape = output_shape;
|
||||
auto axis_l = LongToSize(axis);
|
||||
|
|
|
@ -71,25 +71,25 @@ std::vector<ValueNodePtr> ConvertAttrToValueNode(const std::shared_ptr<kernel::O
|
|||
std::vector<ValueNodePtr> ret = {};
|
||||
auto attrs = op_info->attrs_ptr();
|
||||
if (attrs.empty()) {
|
||||
MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString()
|
||||
<< ") doesn't have any attrs. trace: " << trace::DumpSourceLines(cnode);
|
||||
MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() << ") doesn't have any attrs."
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
for (const auto &attr : attrs) {
|
||||
if (!AnfAlgo::HasNodeAttr(attr->name(), cnode)) {
|
||||
MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() << ") doesn't have attr(" << attr->name()
|
||||
<< "). trace: " << trace::DumpSourceLines(cnode);
|
||||
MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() << ") doesn't have attr(" << attr->name() << ")."
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
auto attr_value = AnfAlgo::GetNodeAttr<int64_t>(cnode, attr->name());
|
||||
auto value_node = CreateValueNode(attr_value);
|
||||
if (value_node == nullptr) {
|
||||
MS_LOG(EXCEPTION) << "Create value node error, node: " << cnode->DebugString() << ", seed value: " << attr_value
|
||||
<< ". trace: " << trace::DumpSourceLines(cnode);
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
ret.emplace_back(value_node);
|
||||
}
|
||||
if (ret.empty()) {
|
||||
MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString()
|
||||
<< ") doesn't have any matched attrs. trace: " << trace::DumpSourceLines(cnode);
|
||||
MS_LOG(EXCEPTION) << "Node(" << cnode->DebugString() << ") doesn't have any matched attrs."
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ AnfNodePtr SingleBatchNormFission::CreateBNTrainingReduce(const FuncGraphPtr &fu
|
|||
MS_EXCEPTION_IF_NULL(bn_cnode);
|
||||
if (bn_cnode->inputs().size() < kBatchNormRealInputNum + 1) {
|
||||
MS_LOG(EXCEPTION) << "The input size of node " + bn_cnode->DebugString() + " is less than "
|
||||
<< (kBatchNormRealInputNum + 1) << " trace: " << trace::DumpSourceLines(bn);
|
||||
<< (kBatchNormRealInputNum + 1) << trace::DumpSourceLines(bn);
|
||||
}
|
||||
std::vector<AnfNodePtr> bn_training_reduce_inputs = {
|
||||
NewValueNode(std::make_shared<Primitive>(kBNTrainingReduceOpName)), bn_cnode->input(1)};
|
||||
|
@ -59,12 +59,11 @@ AnfNodePtr SingleBatchNormFission::CreateBNTrainingUpdateV3(
|
|||
MS_EXCEPTION_IF_NULL(bn_cnode);
|
||||
if (bn_cnode->inputs().size() < kBatchNormRealInputNum + 1) {
|
||||
MS_LOG(EXCEPTION) << "The input size of node " + bn_cnode->DebugString() + " is less than "
|
||||
<< (kBatchNormRealInputNum + 1) << " trace: " << trace::DumpSourceLines(bn);
|
||||
<< (kBatchNormRealInputNum + 1) << trace::DumpSourceLines(bn);
|
||||
}
|
||||
if (bn_training_reduce_outputs.size() != kBNTrainingReduceOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "The output size of node bn_training_reduce must be " << kBNTrainingReduceOutputNum
|
||||
<< ", but it is " << bn_training_reduce_outputs.size()
|
||||
<< " trace: " << trace::DumpSourceLines(bn);
|
||||
<< ", but it is " << bn_training_reduce_outputs.size() << trace::DumpSourceLines(bn);
|
||||
}
|
||||
std::vector<AnfNodePtr> bn_training_update_v3_inputs = {
|
||||
NewValueNode(std::make_shared<Primitive>(kBNTrainingUpdateV3OpName)),
|
||||
|
@ -80,7 +79,7 @@ AnfNodePtr SingleBatchNormFission::CreateBNTrainingUpdateV3(
|
|||
MS_EXCEPTION_IF_NULL(bn_abstract_tuple);
|
||||
if (bn_abstract_tuple->elements().size() != kBnOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "The abstract size of node bn must be " << kBnOutputNum << ", but it is "
|
||||
<< bn_abstract_tuple->elements().size() << " trace: " << trace::DumpSourceLines(bn);
|
||||
<< bn_abstract_tuple->elements().size() << trace::DumpSourceLines(bn);
|
||||
}
|
||||
bn_training_update_v3->set_abstract(bn->abstract());
|
||||
bn_training_update_v3->set_scope(bn->scope());
|
||||
|
|
|
@ -35,11 +35,11 @@ size_t GetSmallSplitSize(const AnfNodePtr &split_node, int64_t split_dim, int64_
|
|||
split_dim += SizeToLong(input_shape.size());
|
||||
}
|
||||
if (LongToSize(split_dim) >= input_shape.size()) {
|
||||
MS_LOG(EXCEPTION) << "The split_dim value should be less than the shape size of input 0. trace: "
|
||||
MS_LOG(EXCEPTION) << "The split_dim value should be less than the shape size of input 0."
|
||||
<< trace::DumpSourceLines(split_node);
|
||||
}
|
||||
if (num_split == 0) {
|
||||
MS_LOG(EXCEPTION) << "Divisor 'num_split' should not be 0. trace: " << trace::DumpSourceLines(split_node);
|
||||
MS_LOG(EXCEPTION) << "Divisor 'num_split' should not be 0." << trace::DumpSourceLines(split_node);
|
||||
}
|
||||
return input_shape[LongToSize(split_dim)] / LongToSize(num_split);
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ void SetAttrAndAbstractForBaseSplitv(const CNodePtr &origin_cnode, const CNodePt
|
|||
split_dim += SizeToLong(output_shape.size());
|
||||
}
|
||||
if (split_dim < 0) {
|
||||
MS_LOG(EXCEPTION) << "Error split dim: " << split_dim << ", trace: " << trace::DumpSourceLines(origin_cnode);
|
||||
MS_LOG(EXCEPTION) << "Error split dim: " << split_dim << trace::DumpSourceLines(origin_cnode);
|
||||
}
|
||||
auto split_dim_l = LongToSize(split_dim);
|
||||
auto num_split_l = LongToSize(num_split);
|
||||
|
|
|
@ -240,7 +240,7 @@ const AnfNodePtr AdamApplyOneFusion::Process(const FuncGraphPtr &func_graph, con
|
|||
auto iter_sub0 = (*equiv).find(sub0_var_);
|
||||
if (iter_sub0 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the sub0 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
sub0 = utils::cast<AnfNodePtr>(iter_sub0->second);
|
||||
}
|
||||
|
@ -256,12 +256,12 @@ const AnfNodePtr AdamApplyOneFusion::Process(const FuncGraphPtr &func_graph, con
|
|||
auto iter_add0 = (*equiv).find(add0_var_);
|
||||
if (iter_add0 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the add0 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
auto iter_add1 = (*equiv).find(add1_var_);
|
||||
if (iter_add1 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the add1 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
auto add0 = utils::cast<AnfNodePtr>(iter_add0->second);
|
||||
MS_EXCEPTION_IF_NULL(add0);
|
||||
|
@ -277,7 +277,7 @@ const AnfNodePtr AdamApplyOneFusion::Process(const FuncGraphPtr &func_graph, con
|
|||
CreateMultipleOutputsOfAnfNode(func_graph, new_node, kAdamApplyOneOutputNum, &new_node_outputs);
|
||||
if (new_node_outputs.size() != kAdamApplyOneOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "The output size of node " << new_node->DebugString() << " should be "
|
||||
<< kAdamApplyOneOutputNum << " trace: " << trace::DumpSourceLines(node);
|
||||
<< kAdamApplyOneOutputNum << trace::DumpSourceLines(node);
|
||||
}
|
||||
auto manager = func_graph->manager();
|
||||
MS_EXCEPTION_IF_NULL(manager);
|
||||
|
|
|
@ -279,7 +279,7 @@ const AnfNodePtr AdamApplyOneWithDecayRule::Process(const FuncGraphPtr &graph, c
|
|||
auto iter_sub0 = (*equiv).find(sub0_var_);
|
||||
if (iter_sub0 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the sub0 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
sub0 = utils::cast<AnfNodePtr>(iter_sub0->second);
|
||||
}
|
||||
|
@ -295,12 +295,12 @@ const AnfNodePtr AdamApplyOneWithDecayRule::Process(const FuncGraphPtr &graph, c
|
|||
auto iter_add0 = (*equiv).find(add0_var_);
|
||||
if (iter_add0 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the add0 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
auto iter_add1 = (*equiv).find(add1_var_);
|
||||
if (iter_add1 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the add1 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
auto add0 = utils::cast<AnfNodePtr>(iter_add0->second);
|
||||
MS_EXCEPTION_IF_NULL(add0);
|
||||
|
|
|
@ -108,8 +108,7 @@ const AnfNodePtr AddInputToOutput::Process(const FuncGraphPtr &func_graph, const
|
|||
MS_EXCEPTION_IF_NULL(new_abstract_tuple);
|
||||
CreateMultipleOutputsOfAnfNode(func_graph, cnode, new_abstract_tuple->size(), &new_outputs);
|
||||
if (new_outputs.size() != new_abstract_tuple->size()) {
|
||||
MS_LOG(EXCEPTION) << "Failed to create outputs of " << cnode->DebugString()
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Failed to create outputs of " << cnode->DebugString() << trace::DumpSourceLines(node);
|
||||
}
|
||||
return new_outputs[0];
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ bool GetKernelSize(const AnfNodePtr &node, int64_t *kd, int64_t *kh, int64_t *kw
|
|||
*kh = kernel_size[kDim3];
|
||||
*kw = kernel_size[kDim4];
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Unknown kernel size " << kernel_size.size() << ", trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Unknown kernel size " << kernel_size.size() << trace::DumpSourceLines(node);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ bool GetStrideSize(const AnfNodePtr &node, int64_t *sd, int64_t *sh, int64_t *sw
|
|||
*sh = stride_size[kDim3];
|
||||
*sw = stride_size[kDim4];
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Unknown strides size " << stride_size.size() << ", trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Unknown strides size " << stride_size.size() << trace::DumpSourceLines(node);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ void GetAttrs(const AnfNodePtr &node, std::vector<int64_t> *pad_list, bool *coun
|
|||
int64_t *divisor_override) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!AnfAlgo::HasNodeAttr("pad_list", node->cast<CNodePtr>())) {
|
||||
MS_LOG(EXCEPTION) << "AvgPool3D should has attr pad_list, trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "AvgPool3D should has attr pad_list" << trace::DumpSourceLines(node);
|
||||
}
|
||||
*pad_list = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "pad_list");
|
||||
if (AnfAlgo::HasNodeAttr("count_include_pad", node->cast<CNodePtr>())) {
|
||||
|
@ -262,7 +262,7 @@ const AnfNodePtr AvgPool3DFusion::Process(const FuncGraphPtr &func_graph, const
|
|||
if (dims_in.size() < k5DInferDims || dims_out.size() < k5DInferDims) {
|
||||
MS_LOG(EXCEPTION) << "AvgPool3D's in_out infer shape dims can not be less " << k5DInferDims
|
||||
<< ", but got in_shape is " << dims_in.size() << "-D, out_shape is " << dims_out.size()
|
||||
<< "-D. trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
auto fn = SizeToLong(dims_in[kDim0]);
|
||||
auto fc = SizeToLong(dims_in[kDim1]);
|
||||
|
@ -277,14 +277,14 @@ const AnfNodePtr AvgPool3DFusion::Process(const FuncGraphPtr &func_graph, const
|
|||
int64_t kh;
|
||||
int64_t kw;
|
||||
if (!GetKernelSize(avg_pool_3d_node, &kd, &kh, &kw)) {
|
||||
MS_LOG(EXCEPTION) << "Get kernel size failed, trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Get kernel size failed" << trace::DumpSourceLines(node);
|
||||
}
|
||||
// strides
|
||||
int64_t sd;
|
||||
int64_t sh;
|
||||
int64_t sw;
|
||||
if (!GetStrideSize(avg_pool_3d_node, &sd, &sh, &sw)) {
|
||||
MS_LOG(EXCEPTION) << "Get stride size failed, trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Get stride size failed" << trace::DumpSourceLines(node);
|
||||
}
|
||||
std::vector<int64_t> pad_list;
|
||||
bool count_include_pad = false;
|
||||
|
|
|
@ -44,22 +44,22 @@ void GetAttrs(const AnfNodePtr &node, std::vector<int64_t> *kernel_size, std::ve
|
|||
MS_EXCEPTION_IF_NULL(node);
|
||||
// attr kernel size
|
||||
if (!AnfAlgo::HasNodeAttr("kernel_size", node->cast<CNodePtr>())) {
|
||||
MS_LOG(EXCEPTION) << "AvgPool3D should has attr kernel_size, trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "AvgPool3D should has attr kernel_size" << trace::DumpSourceLines(node);
|
||||
}
|
||||
*kernel_size = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "kernel_size");
|
||||
// attr strides
|
||||
if (!AnfAlgo::HasNodeAttr("strides", node->cast<CNodePtr>())) {
|
||||
MS_LOG(EXCEPTION) << "AvgPool3D should has attr strides, trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "AvgPool3D should has attr strides" << trace::DumpSourceLines(node);
|
||||
}
|
||||
*strides = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "strides");
|
||||
// sttr pad_list
|
||||
if (!AnfAlgo::HasNodeAttr("pad_list", node->cast<CNodePtr>())) {
|
||||
MS_LOG(EXCEPTION) << "AvgPool3D should has attr pad_list, trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "AvgPool3D should has attr pad_list" << trace::DumpSourceLines(node);
|
||||
}
|
||||
*pad_list = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "pad_list");
|
||||
// attr origin input shape
|
||||
if (!AnfAlgo::HasNodeAttr("origin_input_shape", node->cast<CNodePtr>())) {
|
||||
MS_LOG(EXCEPTION) << "AvgPool3D should has attr origin_input_shape, trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "AvgPool3D should has attr origin_input_shape" << trace::DumpSourceLines(node);
|
||||
}
|
||||
*origin_input_shape = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "origin_input_shape");
|
||||
// attr count include pad
|
||||
|
|
|
@ -41,7 +41,7 @@ bool CheckSupported(const CNodePtr &conv_back_filter) {
|
|||
if (y_shape.size() != kNCHWShapeSize || x_shape.size() != kNCHWShapeSize || out_shape.size() != kNCHWShapeSize) {
|
||||
MS_LOG(EXCEPTION) << "The dim of Conv2dBackpropFilter's input and output should be 4, but got y_shape is "
|
||||
<< y_shape.size() << "-D, x_shape is " << x_shape.size() << "-D, out_shape is "
|
||||
<< out_shape.size() << "-D. trace: " << trace::DumpSourceLines(conv_back_filter);
|
||||
<< out_shape.size() << trace::DumpSourceLines(conv_back_filter);
|
||||
}
|
||||
const std::set<size_t> kSupportedBatchSize = {32, 256};
|
||||
if (kSupportedBatchSize.find(x_shape[0]) == kSupportedBatchSize.end()) {
|
||||
|
|
|
@ -36,8 +36,7 @@ AnfNodePtr GetMul0(const FuncGraphPtr &graph, const AnfNodePtr &input2, const An
|
|||
auto manager = graph->manager();
|
||||
MS_EXCEPTION_IF_NULL(manager);
|
||||
if (manager->node_users().find(input2) == manager->node_users().end()) {
|
||||
MS_LOG(EXCEPTION) << "node has no output in manager"
|
||||
<< " trace: " << trace::DumpSourceLines(input2);
|
||||
MS_LOG(EXCEPTION) << "node has no output in manager" << trace::DumpSourceLines(input2);
|
||||
}
|
||||
|
||||
AnfNodePtr mul0 = nullptr;
|
||||
|
|
|
@ -43,7 +43,7 @@ void GetBNOutput(const FuncGraphPtr &func_graph, const AnfNodePtr &bn, std::vect
|
|||
MS_EXCEPTION_IF_NULL(manager);
|
||||
if (manager->node_users().find(bn) == manager->node_users().end()) {
|
||||
MS_LOG(EXCEPTION) << "The bn node " << bn->DebugString() << " should has some outputs"
|
||||
<< " trace: " << trace::DumpSourceLines(bn);
|
||||
<< trace::DumpSourceLines(bn);
|
||||
}
|
||||
for (const auto &node_index : manager->node_users()[bn]) {
|
||||
const AnfNodePtr &output = node_index.first;
|
||||
|
@ -132,7 +132,7 @@ void FusedBatchNormFusion::GetBNTrainingUpdateAbstractList(const EquivPtr &equiv
|
|||
MS_EXCEPTION_IF_NULL(bn_abstract_tuple);
|
||||
if (bn_abstract_tuple->elements().size() < kBnOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "The abstract size of node bn must not be less than " << kBnOutputNum << ", but it is "
|
||||
<< bn_abstract_tuple->elements().size() << " trace: " << trace::DumpSourceLines(bn);
|
||||
<< bn_abstract_tuple->elements().size() << trace::DumpSourceLines(bn);
|
||||
}
|
||||
auto variable_input0 = GetAnfNodeByVar(equiv, variable_input0_var_);
|
||||
auto variable_input1 = GetAnfNodeByVar(equiv, variable_input1_var_);
|
||||
|
@ -208,7 +208,7 @@ const AnfNodePtr FusedBatchNormFusion::Process(const FuncGraphPtr &func_graph, c
|
|||
&bn_training_update_outputs);
|
||||
if (bn_training_update_outputs.size() < kBNTrainingUpdateOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "The output size of node bn must be " << kBNTrainingUpdateOutputNum << ", but it is "
|
||||
<< bn_training_update_outputs.size() << " trace: " << trace::DumpSourceLines(node);
|
||||
<< bn_training_update_outputs.size() << trace::DumpSourceLines(node);
|
||||
}
|
||||
// Replace old bn outputs with new outputs
|
||||
std::vector<AnfNodePtr> bn_outputs;
|
||||
|
|
|
@ -96,8 +96,7 @@ const AnfNodePtr LambNextMVWithDecayRule::Process(const FuncGraphPtr &func_graph
|
|||
auto manager = func_graph->manager();
|
||||
MS_EXCEPTION_IF_NULL(manager);
|
||||
if (manager->node_users().find(mul4) == manager->node_users().end()) {
|
||||
MS_LOG(EXCEPTION) << "The Mul4 should be used by at least another node input."
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "The Mul4 should be used by at least another node input." << trace::DumpSourceLines(node);
|
||||
}
|
||||
AnfNodeIndexSet mul4_outputs = manager->node_users()[mul4];
|
||||
auto iter = std::find_if(mul4_outputs.begin(), mul4_outputs.end(),
|
||||
|
|
|
@ -70,7 +70,7 @@ const AnfNodePtr LambNextRightRule::Process(const FuncGraphPtr &func_graph, cons
|
|||
auto iter_add1 = (*equiv).find(add1_var_);
|
||||
if (iter_add1 == (*equiv).end()) {
|
||||
MS_LOG(EXCEPTION) << "The equiv map is expected to contains the add1 var after matched."
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
auto add1 = utils::cast<AnfNodePtr>(iter_add1->second);
|
||||
MS_EXCEPTION_IF_NULL(add1);
|
||||
|
|
|
@ -36,7 +36,7 @@ void GetOutputCastNodes(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
|
|||
MS_EXCEPTION_IF_NULL(output_cnode);
|
||||
if (AnfAlgo::GetCNodeName(output_cnode) != prim::kPrimTupleGetItem->name()) {
|
||||
MS_LOG(EXCEPTION) << "The output of node " << node->DebugString() << " should be "
|
||||
<< prim::kPrimTupleGetItem->name() << " trace: " << trace::DumpSourceLines(node);
|
||||
<< prim::kPrimTupleGetItem->name() << trace::DumpSourceLines(node);
|
||||
}
|
||||
if (manager->node_users().find(output) == manager->node_users().end() ||
|
||||
manager->node_users()[output].size() != 1) {
|
||||
|
|
|
@ -26,8 +26,7 @@ AnfNodePtr MatmulBiasaddFusion::CreateMatmulWithBias(const FuncGraphPtr &graph,
|
|||
const EquivPtr &equiv) const {
|
||||
auto matmul = GetAnfNodeByVar(equiv, matmul_var_);
|
||||
if (matmul == nullptr || !matmul->isa<CNode>()) {
|
||||
MS_LOG(EXCEPTION) << "Get CNode MatMul failed!"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Get CNode MatMul failed!" << trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
// If there is a side-effect operator in the fusion, do not merge
|
||||
|
|
|
@ -110,8 +110,7 @@ const AnfNodePtr SquareSumFusion::Process(const FuncGraphPtr &graph, const AnfNo
|
|||
auto manager = graph->manager();
|
||||
MS_EXCEPTION_IF_NULL(manager);
|
||||
if (manager->node_users().find(square_anf) == manager->node_users().end()) {
|
||||
MS_LOG(EXCEPTION) << "Square node has no output in NodeUsersMap"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Square node has no output in NodeUsersMap" << trace::DumpSourceLines(node);
|
||||
}
|
||||
AnfNodePtr ret_node = nullptr;
|
||||
if (manager->node_users()[square_anf].size() == 1) {
|
||||
|
@ -122,8 +121,7 @@ const AnfNodePtr SquareSumFusion::Process(const FuncGraphPtr &graph, const AnfNo
|
|||
std::vector<AnfNodePtr> square_sumv2_outputs;
|
||||
CreateMultipleOutputsOfAnfNode(graph, square_sumv2, kSquareSumv2OutputNum, &square_sumv2_outputs);
|
||||
if (square_sumv2_outputs.size() != kSquareSumv2OutputNum) {
|
||||
MS_LOG(EXCEPTION) << "make SquareSumV2 outputs fail"
|
||||
<< " trace: " << trace::DumpSourceLines(square_sumv2);
|
||||
MS_LOG(EXCEPTION) << "make SquareSumV2 outputs fail" << trace::DumpSourceLines(square_sumv2);
|
||||
}
|
||||
(void)manager->Replace(square, square_sumv2_outputs[1]);
|
||||
ret_node = square_sumv2_outputs[0];
|
||||
|
|
|
@ -39,7 +39,7 @@ void ChangePrimitiveToAllToAllV(const AnfNodePtr &node) {
|
|||
|
||||
if (neighbor_exchange->size() == kCNodePrimitiveIdx) {
|
||||
MS_LOG(EXCEPTION) << "Inputs should not be empty for cnode " << node->DebugString()
|
||||
<< ". trace: " << trace::DumpSourceLines(neighbor_exchange);
|
||||
<< trace::DumpSourceLines(neighbor_exchange);
|
||||
}
|
||||
|
||||
auto prim = GetValueNode<PrimitivePtr>(neighbor_exchange->input(kCNodePrimitiveIdx));
|
||||
|
@ -65,7 +65,7 @@ CNodePtr AllToAllUnifyMindIR::CreateSplitNode(const FuncGraphPtr &graph, const C
|
|||
|
||||
if (all_to_all->size() <= kAllToAllInputIdx) {
|
||||
MS_LOG(EXCEPTION) << "Inputs should not be empty for cnode " << all_to_all->DebugString()
|
||||
<< ". trace: " << trace::DumpSourceLines(all_to_all);
|
||||
<< trace::DumpSourceLines(all_to_all);
|
||||
}
|
||||
auto all_to_all_input = all_to_all->input(kAllToAllInputIdx);
|
||||
std::vector<AnfNodePtr> split_input = {NewValueNode(std::make_shared<Primitive>(prim::kPrimSplitV->name())),
|
||||
|
@ -77,11 +77,11 @@ CNodePtr AllToAllUnifyMindIR::CreateSplitNode(const FuncGraphPtr &graph, const C
|
|||
split_dim = NormalizeDim(shape, split_dim);
|
||||
if (SizeToLong(shape.size()) <= split_dim) {
|
||||
MS_LOG(EXCEPTION) << "Invalid split dim " << split_dim << " is over the shape size " << shape.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(all_to_all);
|
||||
<< trace::DumpSourceLines(all_to_all);
|
||||
}
|
||||
if (split_count == 0 || shape[LongToSize(split_dim)] % static_cast<size_t>(split_count) != 0) {
|
||||
MS_LOG(EXCEPTION) << "Invalid split count " << split_count << " cannot be divisible by shape[" << split_dim
|
||||
<< "] = " << shape[LongToSize(split_dim)] << ". trace: " << trace::DumpSourceLines(all_to_all);
|
||||
<< "] = " << shape[LongToSize(split_dim)] << trace::DumpSourceLines(all_to_all);
|
||||
}
|
||||
shape[LongToSize(split_dim)] /= static_cast<size_t>(split_count);
|
||||
std::vector<TypeId> dtypes(split_count, dtype);
|
||||
|
@ -105,8 +105,8 @@ CNodePtr AllToAllUnifyMindIR::CreateAllToAllvNode(const FuncGraphPtr &graph, con
|
|||
std::vector<AnfNodePtr> split_outputs;
|
||||
CreateMultipleOutputsOfAnfNode(graph, split, static_cast<size_t>(split_count), &split_outputs);
|
||||
if (split_outputs.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The node " << split->DebugString()
|
||||
<< " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(split);
|
||||
MS_LOG(EXCEPTION) << "The node " << split->DebugString() << " should have at least one output, but got 0."
|
||||
<< trace::DumpSourceLines(split);
|
||||
}
|
||||
std::vector<AnfNodePtr> all_to_all_v_input = {NewValueNode(std::make_shared<Primitive>(kAllToAllVOpName))};
|
||||
(void)all_to_all_v_input.insert(all_to_all_v_input.end(), split_outputs.begin(), split_outputs.end());
|
||||
|
@ -140,8 +140,8 @@ CNodePtr AllToAllUnifyMindIR::CreateConcatNode(const FuncGraphPtr &graph, const
|
|||
std::vector<AnfNodePtr> all_to_all_v_outputs;
|
||||
CreateMultipleOutputsOfAnfNode(graph, all_to_all_v, static_cast<size_t>(split_count), &all_to_all_v_outputs);
|
||||
if (all_to_all_v_outputs.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString()
|
||||
<< " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(all_to_all_v);
|
||||
MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString() << " should have at least one output, but got 0."
|
||||
<< trace::DumpSourceLines(all_to_all_v);
|
||||
}
|
||||
std::vector<AnfNodePtr> concat_input = {NewValueNode(std::make_shared<Primitive>(kConcatOpName))};
|
||||
(void)concat_input.insert(concat_input.end(), all_to_all_v_outputs.begin(), all_to_all_v_outputs.end());
|
||||
|
@ -151,7 +151,7 @@ CNodePtr AllToAllUnifyMindIR::CreateConcatNode(const FuncGraphPtr &graph, const
|
|||
concat_dim = NormalizeDim(single_shape, concat_dim);
|
||||
if (LongToSize(concat_dim) >= single_shape.size()) {
|
||||
MS_LOG(EXCEPTION) << "Invalid concat dim " << concat_dim << " is greater than shape size " << single_shape.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(all_to_all);
|
||||
<< trace::DumpSourceLines(all_to_all);
|
||||
}
|
||||
single_shape[LongToSize(concat_dim)] *= static_cast<size_t>(split_count);
|
||||
AnfAlgo::SetOutputInferTypeAndShape({AnfAlgo::GetOutputInferDataType(all_to_all_v_outputs[0], 0)}, {single_shape},
|
||||
|
|
|
@ -52,7 +52,7 @@ int64_t windowed_output_size(const AnfNodePtr &node, int64_t input_size, int64_t
|
|||
*pad_before = 0;
|
||||
*pad_after = 0;
|
||||
if (stride == 0) {
|
||||
MS_LOG(EXCEPTION) << "The stride of AvgPoolGrad should not be 0. trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "The stride of AvgPoolGrad should not be 0" << trace::DumpSourceLines(node);
|
||||
return 0;
|
||||
}
|
||||
if (pad_mode == PadMode::VALID) {
|
||||
|
@ -79,7 +79,7 @@ std::vector<std::vector<float>> GetAssistInputMatrix(const AnfNodePtr &node, con
|
|||
// number of input that associate with output element.
|
||||
std::vector<std::vector<float>> assist_input_matrix;
|
||||
if (x_shape.size() < kShapeDimNum) {
|
||||
MS_LOG(EXCEPTION) << "The dim of x_shape should not be less than 4. trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "The dim of x_shape should not be less than 4" << trace::DumpSourceLines(node);
|
||||
}
|
||||
std::vector<int64_t> in_shape_after_padding_2d = {x_shape[kDim2] + pad_top + pad_bottom,
|
||||
x_shape[kDim3] + pad_left + pad_right};
|
||||
|
@ -110,7 +110,7 @@ ValueNodePtr CreateMeanMatrixValueNode(const FuncGraphPtr &func_graph, const Anf
|
|||
if (x_shape.size() != kShapeDimNum || k_size.size() != kShapeDimNum || stride.size() != kShapeDimNum) {
|
||||
MS_LOG(EXCEPTION) << "The dim of x_shape, kernel_size and strides of AvgPoolGrad should be 4, but got x_shape:"
|
||||
<< x_shape << ", kernel_size:" << k_size << ", strides:" << stride
|
||||
<< ". trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
int64_t pad_top, pad_bottom, pad_left, pad_right;
|
||||
int64_t h_output =
|
||||
|
@ -166,7 +166,7 @@ ValueNodePtr CreateKernelMatrixValueNode(const FuncGraphPtr &func_graph, const A
|
|||
MS_EXCEPTION_IF_NULL(kernel_graph);
|
||||
if (x_shape.size() != kShapeDimNum || k_size.size() != kShapeDimNum) {
|
||||
MS_LOG(EXCEPTION) << "The dim of x_shape and kernel_size of AvgPoolGrad should be 4, but got x_shape:" << x_shape
|
||||
<< ", kernel_size:" << k_size << ". trace: " << trace::DumpSourceLines(node);
|
||||
<< ", kernel_size:" << k_size << trace::DumpSourceLines(node);
|
||||
}
|
||||
std::vector<int64_t> kernel_shape = {1, x_shape[kDim1], k_size[kDim2], k_size[kDim3]};
|
||||
auto data_size = std::accumulate(kernel_shape.begin(), kernel_shape.end(), int64_t(1), std::multiplies<int64_t>());
|
||||
|
|
|
@ -57,11 +57,11 @@ bool NeedUpdate(const CNodePtr &conv2d, std::vector<size_t> in_shape, std::vecto
|
|||
int64_t data_format;
|
||||
bool result = CheckAndConvertUtils::GetDataFormatEnumValue(data_format_ptr, &data_format);
|
||||
if (!result || data_format != Format::NCHW) {
|
||||
MS_LOG(EXCEPTION) << "Conv2D only supports NCHW when group > 1, trace: " << trace::DumpSourceLines(conv2d);
|
||||
MS_LOG(EXCEPTION) << "Conv2D only supports NCHW when group > 1" << trace::DumpSourceLines(conv2d);
|
||||
}
|
||||
if (in_shape.size() != kConv2DAxisNum || out_shape.size() != kConv2DAxisNum) {
|
||||
MS_LOG(EXCEPTION) << "Conv2D's input and output should have 4 axis, but got input axis num: " << in_shape.size()
|
||||
<< "output axis num: " << out_shape.size() << ". trace: " << trace::DumpSourceLines(conv2d);
|
||||
<< "output axis num: " << out_shape.size() << trace::DumpSourceLines(conv2d);
|
||||
}
|
||||
auto in_channel = in_shape[kDim1];
|
||||
auto out_channel = out_shape[kDim1];
|
||||
|
@ -115,7 +115,7 @@ CNodePtr CreateTranspose(const FuncGraphPtr &graph, const CNodePtr &conv2d, cons
|
|||
auto out_shape = AnfAlgo::GetOutputInferShape(input_node, 0);
|
||||
if (out_shape.size() != kConv2DAxisNum) {
|
||||
MS_LOG(EXCEPTION) << "Conv2D's output axis number should be " << kConv2DAxisNum << ", but got "
|
||||
<< out_shape.size() << ". trace: " << trace::DumpSourceLines(conv2d);
|
||||
<< out_shape.size() << trace::DumpSourceLines(conv2d);
|
||||
}
|
||||
std::swap(out_shape[kDim0], out_shape[kDim1]);
|
||||
auto shapes = {out_shape};
|
||||
|
@ -270,8 +270,7 @@ const AnfNodePtr Conv2DBackpropInputUnifyMindIR::Process(const FuncGraphPtr &gra
|
|||
// In pynative mode, input_sizes input will be convert to attr if Conv2DBackpropInput is a forward op.
|
||||
if (input_size != kConv2DBackpropInputNum && input_size != kConv2DBackpropInputNum - 1) {
|
||||
MS_LOG(EXCEPTION) << "Conv2DBackpropInput's input number should be " << kConv2DBackpropInputNum << " or "
|
||||
<< (kConv2DBackpropInputNum - 1) << ", but got " << input_size
|
||||
<< ". trace: " << trace::DumpSourceLines(node);
|
||||
<< (kConv2DBackpropInputNum - 1) << ", but got " << input_size << trace::DumpSourceLines(node);
|
||||
}
|
||||
auto transpose = CreateTranspose(graph, conv2d_backin, conv2d_backin->input(kIndex2), true, *this);
|
||||
auto depth_conv_backin = CreateDepthwiseConv2DBackpropInput(graph, conv2d_backin, transpose);
|
||||
|
@ -285,8 +284,7 @@ CNodePtr Conv2DBackpropFilterUnifyMindIR::CreateDepthwiseConv2DBackpropFilter(co
|
|||
MS_EXCEPTION_IF_NULL(conv2d_backfil);
|
||||
if (AnfUtils::GetInputTensorNum(conv2d_backfil) != kConv2DBackpropInputNum) {
|
||||
MS_LOG(EXCEPTION) << "Conv2DBackpropFilter's input number should be " << kConv2DBackpropInputNum << ", but got "
|
||||
<< AnfUtils::GetInputTensorNum(conv2d_backfil)
|
||||
<< ". trace: " << trace::DumpSourceLines(conv2d_backfil);
|
||||
<< AnfUtils::GetInputTensorNum(conv2d_backfil) << trace::DumpSourceLines(conv2d_backfil);
|
||||
}
|
||||
auto filter_size_node = conv2d_backfil->input(kIndex3);
|
||||
MS_EXCEPTION_IF_NULL(filter_size_node);
|
||||
|
@ -310,7 +308,7 @@ CNodePtr Conv2DBackpropFilterUnifyMindIR::CreateDepthwiseConv2DBackpropFilter(co
|
|||
std::vector<size_t> out_shape = AnfAlgo::GetOutputInferShape(conv2d_backfil, 0);
|
||||
if (out_shape.size() != kConv2DAxisNum) {
|
||||
MS_LOG(EXCEPTION) << "Conv2DBackpropFilter's output axis number should be " << kConv2DAxisNum << ", but got "
|
||||
<< out_shape.size() << ". trace: " << trace::DumpSourceLines(conv2d_backfil);
|
||||
<< out_shape.size() << trace::DumpSourceLines(conv2d_backfil);
|
||||
}
|
||||
std::swap(out_shape[0], out_shape[1]);
|
||||
auto shapes = {out_shape};
|
||||
|
|
|
@ -69,11 +69,11 @@ ValueNodePtr CreateKeepPorbValueNode(const FuncGraphPtr &func_graph, const AnfNo
|
|||
MS_EXCEPTION_IF_NULL(cnode);
|
||||
// Step1: get keep_prob
|
||||
if (!AnfAlgo::HasNodeAttr(kKeepProb, cnode)) {
|
||||
MS_LOG(EXCEPTION) << "Dropout node does not have attr: keep_prob. trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Dropout node does not have attr: keep_prob." << trace::DumpSourceLines(node);
|
||||
}
|
||||
if (AnfAlgo::GetCNodeName(cnode) == kDropoutOpName) {
|
||||
if (!AnfAlgo::HasNodeAttr(kSeed0, cnode) || !AnfAlgo::HasNodeAttr(kSeed1, cnode)) {
|
||||
MS_LOG(EXCEPTION) << "Dropout node does not have attr: seed0 or seed1. trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Dropout node does not have attr: seed0 or seed1." << trace::DumpSourceLines(node);
|
||||
}
|
||||
}
|
||||
auto keep_prob = AnfAlgo::GetNodeAttr<float>(node, kKeepProb);
|
||||
|
|
|
@ -36,7 +36,7 @@ void FakeLearnedScaleQuantPerLayerGradUnifyMindIR::CreateOutputsOfLSQPerLayerGra
|
|||
if (lsq_perlayer_grad_inputs.size() < kFakeLearnedScaleQuantGradInputNum) {
|
||||
MS_LOG(EXCEPTION) << "Lsq_perlayer_grad_node has wrong inputs size, should be not less than "
|
||||
<< kFakeLearnedScaleQuantGradInputNum << ", but got " << lsq_perlayer_grad_inputs.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node);
|
||||
<< trace::DumpSourceLines(lsq_perlayer_grad_node);
|
||||
}
|
||||
std::vector<AnfNodePtr> lsq_perlayer_grad_d_inputs = {
|
||||
NewValueNode(std::make_shared<Primitive>(kFakeLearnedScaleQuantPerLayerGradDOpName)),
|
||||
|
@ -68,12 +68,12 @@ void FakeLearnedScaleQuantPerLayerGradUnifyMindIR::CreateOutputsOfLSQPerLayerRed
|
|||
if (lsq_perlayer_grad_inputs.size() < kFakeLearnedScaleQuantGradInputNum) {
|
||||
MS_LOG(EXCEPTION) << "Lsq_perlayer_grad_node has wrong inputs size, should be not less than "
|
||||
<< kFakeLearnedScaleQuantGradInputNum << ", but got " << lsq_perlayer_grad_inputs.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node);
|
||||
<< trace::DumpSourceLines(lsq_perlayer_grad_node);
|
||||
}
|
||||
if (lsq_perlayer_grad_d_outputs.size() != kFakeLearnedScaleQuantGradDOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Lsq_perlayer_grad_d_outputs has wrong inputs size, should be "
|
||||
<< kFakeLearnedScaleQuantGradDOutputNum << ", but got " << lsq_perlayer_grad_d_outputs.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(lsq_perlayer_grad_node);
|
||||
<< trace::DumpSourceLines(lsq_perlayer_grad_node);
|
||||
}
|
||||
std::vector<AnfNodePtr> lsq_perlayer_reduce_grad_inputs = {
|
||||
NewValueNode(std::make_shared<Primitive>(kFakeLearnedScaleQuantPerLayerGradDReduceOpName)),
|
||||
|
@ -98,7 +98,7 @@ void FakeLearnedScaleQuantPerChannelGradUnifyMindIR::CreateOutputsOfLSQPerChanne
|
|||
if (lsq_perchannel_grad_inputs.size() < kFakeLearnedScaleQuantGradInputNum) {
|
||||
MS_LOG(EXCEPTION) << "Lsq_perchannel_grad_node has wrong inputs size, should be not less than "
|
||||
<< kFakeLearnedScaleQuantGradInputNum << ", but got " << lsq_perchannel_grad_inputs.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node);
|
||||
<< trace::DumpSourceLines(lsq_perchannel_grad_node);
|
||||
}
|
||||
std::vector<AnfNodePtr> lsq_perchannel_grad_d_inputs = {
|
||||
NewValueNode(std::make_shared<Primitive>(kFakeLearnedScaleQuantPerChannelGradDOpName)),
|
||||
|
@ -131,12 +131,12 @@ void FakeLearnedScaleQuantPerChannelGradUnifyMindIR::CreateOutputsOfLSQPerChanne
|
|||
if (lsq_perchannel_grad_inputs.size() < kFakeLearnedScaleQuantGradInputNum) {
|
||||
MS_LOG(EXCEPTION) << "Lsq_perchannel_grad_node has wrong inputs size, should be not less than "
|
||||
<< kFakeLearnedScaleQuantGradInputNum << ", but got " << lsq_perchannel_grad_inputs.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node);
|
||||
<< trace::DumpSourceLines(lsq_perchannel_grad_node);
|
||||
}
|
||||
if (lsq_perchannel_grad_d_outputs.size() != kFakeLearnedScaleQuantGradDOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Lsq_perchannel_grad_d_outputs has wrong inputs size, should be "
|
||||
<< kFakeLearnedScaleQuantGradDOutputNum << ", but got " << lsq_perchannel_grad_inputs.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(lsq_perchannel_grad_node);
|
||||
<< trace::DumpSourceLines(lsq_perchannel_grad_node);
|
||||
}
|
||||
std::vector<AnfNodePtr> lsq_perchannel_reduce_grad_inputs = {
|
||||
NewValueNode(std::make_shared<Primitive>(kFakeLearnedScaleQuantPerChannelGradDReduceOpName)),
|
||||
|
@ -172,7 +172,7 @@ const AnfNodePtr FakeLearnedScaleQuantPerLayerGradUnifyMindIR::Process(const Fun
|
|||
if (lsq_perlayer_grad_d_outputs.size() != kFakeLearnedScaleQuantGradOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Fake_learned_scale_quant_perlayer_grad_d_outputs has wrong inputs size, should be "
|
||||
<< kFakeLearnedScaleQuantGradOutputNum << ", but got " << lsq_perlayer_grad_d_outputs.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
std::vector<AnfNodePtr> lsq_perlayer_reduce_grad_outputs;
|
||||
|
@ -181,7 +181,7 @@ const AnfNodePtr FakeLearnedScaleQuantPerLayerGradUnifyMindIR::Process(const Fun
|
|||
if (lsq_perlayer_reduce_grad_outputs.size() != kSingleOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Fake_learned_scale_quant_perlayer_reduce_grad_outputs has wrong inputs size, should be "
|
||||
<< kSingleOutputNum << ", but got " << lsq_perlayer_reduce_grad_outputs.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), lsq_perlayer_grad_d_outputs[0],
|
||||
|
@ -211,7 +211,7 @@ const AnfNodePtr FakeLearnedScaleQuantPerChannelGradUnifyMindIR::Process(const F
|
|||
if (lsq_perchannel_grad_d_outputs.size() != kFakeLearnedScaleQuantGradOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Fake_learned_scale_quant_perchannel_grad_d_outputs has wrong inputs size, should be "
|
||||
<< kFakeLearnedScaleQuantGradOutputNum << ", but got " << lsq_perchannel_grad_d_outputs.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
std::vector<AnfNodePtr> lsq_perchannel_reduce_grad_outputs;
|
||||
|
@ -220,7 +220,7 @@ const AnfNodePtr FakeLearnedScaleQuantPerChannelGradUnifyMindIR::Process(const F
|
|||
if (lsq_perchannel_reduce_grad_outputs.size() != kSingleOutputNum) {
|
||||
MS_LOG(EXCEPTION) << "Fake_learned_scale_quant_perchannel_reduce_grad_outputs has wrong inputs size, should be "
|
||||
<< kSingleOutputNum << ", but got " << lsq_perchannel_reduce_grad_outputs.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
std::vector<AnfNodePtr> make_tuple_inputs = {NewValueNode(prim::kPrimMakeTuple), lsq_perchannel_grad_d_outputs[0],
|
||||
|
|
|
@ -38,7 +38,7 @@ CNodePtr GetMaxPool(const CNodePtr &maxpool_grad) {
|
|||
MS_EXCEPTION_IF_NULL(maxpool_grad);
|
||||
if (maxpool_grad->inputs().size() != kMaxPoolGradInputNum) {
|
||||
MS_LOG(EXCEPTION) << "MaxPoolGrad's input number should be " << (kMaxPoolGradInputNum - 1) << ", but got "
|
||||
<< (maxpool_grad->inputs().size() - 1) << ". trace: " << trace::DumpSourceLines(maxpool_grad);
|
||||
<< (maxpool_grad->inputs().size() - 1) << trace::DumpSourceLines(maxpool_grad);
|
||||
}
|
||||
auto maxpool_anf = maxpool_grad->input(kIndex2);
|
||||
MS_EXCEPTION_IF_NULL(maxpool_anf);
|
||||
|
@ -51,7 +51,7 @@ CNodePtr MaxPool2MaxPoolWithArgmax::CreateMaxPoolWithArgmax(const FuncGraphPtr &
|
|||
MS_EXCEPTION_IF_NULL(maxpool);
|
||||
if (maxpool->inputs().size() != kMaxPoolInputNum) {
|
||||
MS_LOG(EXCEPTION) << "MaxPool's input number should be " << (kMaxPoolInputNum - 1) << ", but got "
|
||||
<< (maxpool->inputs().size() - 1) << ". trace: " << trace::DumpSourceLines(maxpool);
|
||||
<< (maxpool->inputs().size() - 1) << trace::DumpSourceLines(maxpool);
|
||||
}
|
||||
std::vector<AnfNodePtr> maxpool_argmax_inputs = {NewValueNode(std::make_shared<Primitive>(kMaxPoolWithArgmaxOpName)),
|
||||
maxpool->input(kIndex1)};
|
||||
|
@ -75,7 +75,7 @@ CNodePtr MaxPool2MaxPoolWithArgmax::CreateMaxPoolGradWithArgmax(
|
|||
MS_EXCEPTION_IF_NULL(maxpool_grad);
|
||||
if (maxpool_grad->inputs().size() != kMaxPoolGradInputNum) {
|
||||
MS_LOG(EXCEPTION) << "MaxPoolGrad's input number should be " << (kMaxPoolGradInputNum - 1) << ", but got "
|
||||
<< (maxpool_grad->inputs().size() - 1) << ". trace: " << trace::DumpSourceLines(maxpool_grad);
|
||||
<< (maxpool_grad->inputs().size() - 1) << trace::DumpSourceLines(maxpool_grad);
|
||||
}
|
||||
// MaxPoolGrad's inputs are {input, output, grad_input}, MaxPoolGradWithArgmax's inputs are
|
||||
// {input, grad_input, argmax_output}
|
||||
|
@ -96,11 +96,11 @@ void MaxPool2MaxPoolWithArgmax::SetNodeAttrs(const CNodePtr &maxpool, const CNod
|
|||
auto ksize = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(maxpool, kAttrKernelSize);
|
||||
if (strides.size() != kMaxPoolAttrAxisNum) {
|
||||
MS_LOG(EXCEPTION) << "MaxPool's attr strides has wrong axis number, should be " << kMaxPoolAttrAxisNum
|
||||
<< ", but got " << strides.size() << ". trace: " << trace::DumpSourceLines(maxpool);
|
||||
<< ", but got " << strides.size() << trace::DumpSourceLines(maxpool);
|
||||
}
|
||||
if (ksize.size() != kMaxPoolAttrAxisNum) {
|
||||
MS_LOG(EXCEPTION) << "MaxPool's attr ksize has wrong axis number, should be " << kMaxPoolAttrAxisNum << ", but got "
|
||||
<< ksize.size() << ". trace: " << trace::DumpSourceLines(maxpool);
|
||||
<< ksize.size() << trace::DumpSourceLines(maxpool);
|
||||
}
|
||||
// note that strides and ksize change from (1, 1, x, y) to (1, x, y, 1)
|
||||
strides[kIndex1] = strides[kIndex2];
|
||||
|
|
|
@ -68,8 +68,7 @@ const AnfNodePtr MaxPoolWithArgmaxUnifyMindIR::Process(const FuncGraphPtr &graph
|
|||
auto argmax_shape = output_shape;
|
||||
if (argmax_shape.size() != kMaxPoolWithArgmaxShape || ksize.size() != kMaxPoolWithArgmaxShape) {
|
||||
MS_LOG(EXCEPTION) << "Argmax or kernel_size's shape dim should be equal to 4, but got argmax dim: "
|
||||
<< argmax_shape.size() << ", kernel_size dim: " << ksize.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(node);
|
||||
<< argmax_shape.size() << ", kernel_size dim: " << ksize.size() << trace::DumpSourceLines(node);
|
||||
}
|
||||
argmax_shape[kDim2] = LongToSize(ksize[kDim1] * ksize[kDim2]);
|
||||
argmax_shape[kDim3] = (output_shape[kDim2] * output_shape[kDim3] + kAlignBytes - 1) / kAlignBytes + 1;
|
||||
|
@ -104,8 +103,7 @@ const AnfNodePtr MaxPoolGradWithArgmaxUnifyMindIR::Process(const FuncGraphPtr &g
|
|||
auto argmax_shape = AnfAlgo::GetOutputInferShape(tuple_getitem0_anf, 0);
|
||||
if (argmax_shape.size() != kMaxPoolWithArgmaxShape || ksize.size() != kMaxPoolWithArgmaxShape) {
|
||||
MS_LOG(EXCEPTION) << "Argmax or kernel_size's shape dim should be equal to 4, but got argmax dim: "
|
||||
<< argmax_shape.size() << ", kernel_size dim: " << ksize.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(node);
|
||||
<< argmax_shape.size() << ", kernel_size dim: " << ksize.size() << trace::DumpSourceLines(node);
|
||||
}
|
||||
argmax_shape[kDim3] = (argmax_shape[kDim2] * argmax_shape[kDim3] + kAlignBytes - 1) / kAlignBytes + 1;
|
||||
argmax_shape[kDim2] = LongToSize(ksize[kDim1] * ksize[kDim2]);
|
||||
|
|
|
@ -368,7 +368,7 @@ std::vector<CNodePtr> NeighborExchangeV2UnifyMindIR::CreateSplitNodes(const Func
|
|||
if (neighbor_exchange_v2->size() <= kNeighborExchangeV2InputIdx) {
|
||||
MS_LOG(EXCEPTION) << "Invalid cnode " << neighbor_exchange_v2->DebugString() << " input size "
|
||||
<< neighbor_exchange_v2->size() << ", should be " << kNeighborExchangeV2InputIdx
|
||||
<< ". trace: " << trace::DumpSourceLines(neighbor_exchange_v2);
|
||||
<< trace::DumpSourceLines(neighbor_exchange_v2);
|
||||
}
|
||||
std::vector<CNodePtr> split_nodes = {};
|
||||
|
||||
|
@ -382,8 +382,8 @@ std::vector<CNodePtr> NeighborExchangeV2UnifyMindIR::CreateSplitNodes(const Func
|
|||
auto dtype = AnfAlgo::GetOutputInferDataType(neighbor_exchange_v2_input, 0);
|
||||
auto shape = AnfAlgo::GetOutputInferShape(neighbor_exchange_v2_input, 0);
|
||||
if (SizeToLong(shape.size()) != kShapeSize) { // only support NCHW now
|
||||
MS_LOG(EXCEPTION) << "Invalid shape size " << shape.size()
|
||||
<< ", only support NCHW input now! trace: " << trace::DumpSourceLines(neighbor_exchange_v2);
|
||||
MS_LOG(EXCEPTION) << "Invalid shape size " << shape.size() << ", only support NCHW input now!"
|
||||
<< trace::DumpSourceLines(neighbor_exchange_v2);
|
||||
}
|
||||
|
||||
// splitv for top & bottom
|
||||
|
@ -419,8 +419,7 @@ std::vector<CNodePtr> NeighborExchangeV2UnifyMindIR::CreateSplitNodes(const Func
|
|||
CreateMultipleOutputsOfAnfNode(graph, split_nodes[0], static_cast<size_t>((*split_num)[0]),
|
||||
&split_outputs_top_bottom);
|
||||
if (split_outputs_top_bottom.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The node " << split_nodes[0]->DebugString()
|
||||
<< " should have at least one output, but got 0. trace: "
|
||||
MS_LOG(EXCEPTION) << "The node " << split_nodes[0]->DebugString() << " should have at least one output, but got 0"
|
||||
<< trace::DumpSourceLines(split_nodes[0]);
|
||||
}
|
||||
|
||||
|
@ -613,8 +612,8 @@ CNodePtr NeighborExchangeV2UnifyMindIR::CreateConcatNodes(const FuncGraphPtr &gr
|
|||
CreateMultipleOutputsOfAnfNode(graph, all_to_all_v, static_cast<size_t>(all_to_all_output_num),
|
||||
&all_to_all_v_outputs);
|
||||
if (all_to_all_v_outputs.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString()
|
||||
<< " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(all_to_all_v);
|
||||
MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString() << " should have at least one output, but got 0."
|
||||
<< trace::DumpSourceLines(all_to_all_v);
|
||||
}
|
||||
|
||||
if (recv_rank_ids[kRankIdZero] == kInvalidId && recv_rank_ids[kRankIdFour] == kInvalidId) {
|
||||
|
@ -650,8 +649,7 @@ CNodePtr NeighborExchangeV2UnifyMindIR::CreateConcatNodes(const FuncGraphPtr &gr
|
|||
std::vector<AnfNodePtr> concat_left_outputs;
|
||||
CreateMultipleOutputsOfAnfNode(graph, concat_left, 1, &concat_left_outputs);
|
||||
if (concat_left_outputs.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The node " << concat_left->DebugString()
|
||||
<< " should have at least one output, but got 0. trace: "
|
||||
MS_LOG(EXCEPTION) << "The node " << concat_left->DebugString() << " should have at least one output, but got 0."
|
||||
<< trace::DumpSourceLines(concat_left);
|
||||
}
|
||||
concat_input_all.insert(concat_input_all.end(), concat_left_outputs.begin(), concat_left_outputs.end());
|
||||
|
@ -663,8 +661,7 @@ CNodePtr NeighborExchangeV2UnifyMindIR::CreateConcatNodes(const FuncGraphPtr &gr
|
|||
std::vector<AnfNodePtr> concat_middle_outputs;
|
||||
CreateMultipleOutputsOfAnfNode(graph, concat_middle, 1, &concat_middle_outputs);
|
||||
if (concat_middle_outputs.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The node " << concat_middle->DebugString()
|
||||
<< " should have at least one output, but got 0. trace: "
|
||||
MS_LOG(EXCEPTION) << "The node " << concat_middle->DebugString() << " should have at least one output, but got 0."
|
||||
<< trace::DumpSourceLines(concat_middle);
|
||||
}
|
||||
concat_input_all.insert(concat_input_all.end(), concat_middle_outputs.begin(), concat_middle_outputs.end());
|
||||
|
@ -677,8 +674,7 @@ CNodePtr NeighborExchangeV2UnifyMindIR::CreateConcatNodes(const FuncGraphPtr &gr
|
|||
std::vector<AnfNodePtr> concat_right_outputs;
|
||||
CreateMultipleOutputsOfAnfNode(graph, concat_right, 1, &concat_right_outputs);
|
||||
if (concat_right_outputs.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The node " << concat_right->DebugString()
|
||||
<< " should have at least one output, but got 0. trace: "
|
||||
MS_LOG(EXCEPTION) << "The node " << concat_right->DebugString() << " should have at least one output, but got 0."
|
||||
<< trace::DumpSourceLines(concat_right);
|
||||
}
|
||||
concat_input_all.insert(concat_input_all.end(), concat_right_outputs.begin(), concat_right_outputs.end());
|
||||
|
@ -707,15 +703,15 @@ std::vector<CNodePtr> NeighborExchangeV2GradUnifyMindIR::CreateSplitNodesForGrad
|
|||
if (neighbor_exchange_v2_grad->size() <= kNeighborExchangeV2InputIdx) {
|
||||
MS_LOG(EXCEPTION) << "Invalid cnode " << neighbor_exchange_v2_grad->DebugString() << " input size "
|
||||
<< neighbor_exchange_v2_grad->size() << ", should be " << kNeighborExchangeV2InputIdx
|
||||
<< ". trace: " << trace::DumpSourceLines(neighbor_exchange_v2_grad);
|
||||
<< trace::DumpSourceLines(neighbor_exchange_v2_grad);
|
||||
}
|
||||
|
||||
auto neighbor_exchange_v2_grad_input = neighbor_exchange_v2_grad->input(kNeighborExchangeV2InputIdx);
|
||||
auto dtype = AnfAlgo::GetOutputInferDataType(neighbor_exchange_v2_grad_input, 0);
|
||||
auto shape = AnfAlgo::GetOutputInferShape(neighbor_exchange_v2_grad_input, 0);
|
||||
if (SizeToLong(shape.size()) != kShapeSize) {
|
||||
MS_LOG(EXCEPTION) << "Invalid shape size " << shape.size()
|
||||
<< ", only support NCHW input now! trace: " << trace::DumpSourceLines(neighbor_exchange_v2_grad);
|
||||
MS_LOG(EXCEPTION) << "Invalid shape size " << shape.size() << ", only support NCHW input now!"
|
||||
<< trace::DumpSourceLines(neighbor_exchange_v2_grad);
|
||||
}
|
||||
|
||||
std::vector<CNodePtr> split_nodes = {};
|
||||
|
@ -741,8 +737,7 @@ std::vector<CNodePtr> NeighborExchangeV2GradUnifyMindIR::CreateSplitNodesForGrad
|
|||
CreateMultipleOutputsOfAnfNode(graph, split_nodes[0], static_cast<size_t>(num_split_h), &split_outputs_top_bottom);
|
||||
if (split_outputs_top_bottom.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The node " << split_nodes[0]->DebugString()
|
||||
<< " should have at least one output, but got 0. trace: "
|
||||
<< trace::DumpSourceLines(split_nodes[0]);
|
||||
<< " should have at least one output, but got 0." << trace::DumpSourceLines(split_nodes[0]);
|
||||
}
|
||||
size_split_h = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(split_nodes[0], kAttrSizeSplits);
|
||||
} else {
|
||||
|
@ -842,8 +837,8 @@ CNodePtr NeighborExchangeV2GradUnifyMindIR::CreateSplitGradNodes(const FuncGraph
|
|||
CreateMultipleOutputsOfAnfNode(graph, all_to_all_v, static_cast<size_t>(all_to_all_output_num),
|
||||
&all_to_all_v_outputs);
|
||||
if (all_to_all_v_outputs.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString()
|
||||
<< " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(all_to_all_v);
|
||||
MS_LOG(EXCEPTION) << "The node " << all_to_all_v->DebugString() << " should have at least one output, but got 0."
|
||||
<< trace::DumpSourceLines(all_to_all_v);
|
||||
}
|
||||
// create pad nodes
|
||||
// slice begin & size
|
||||
|
@ -887,8 +882,8 @@ CNodePtr NeighborExchangeV2GradUnifyMindIR::CreateSplitGradNodes(const FuncGraph
|
|||
std::vector<AnfNodePtr> pad_outputs;
|
||||
CreateMultipleOutputsOfAnfNode(graph, pad, 1, &pad_outputs);
|
||||
if (pad_outputs.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The node " << pad->DebugString()
|
||||
<< " should have at least one output, but got 0. trace: " << trace::DumpSourceLines(pad);
|
||||
MS_LOG(EXCEPTION) << "The node " << pad->DebugString() << " should have at least one output, but got 0."
|
||||
<< trace::DumpSourceLines(pad);
|
||||
}
|
||||
addn_inputs.insert(addn_inputs.end(), pad_outputs.begin(), pad_outputs.end());
|
||||
++pad_num;
|
||||
|
|
|
@ -68,7 +68,7 @@ const AnfNodePtr SliceGradUnifyMindIR::Process(const FuncGraphPtr &graph, const
|
|||
if (input_num != kSliceGradInputTensorNum && input_num != kSliceGradCangjieInputTensorNum) {
|
||||
MS_LOG(EXCEPTION) << "The input tensor size[" << input_num
|
||||
<< "] of node " + slice_grad->DebugString() + " is not equal to " << kSliceGradInputTensorNum
|
||||
<< " or " << kSliceGradCangjieInputTensorNum << ". trace: " << trace::DumpSourceLines(node);
|
||||
<< " or " << kSliceGradCangjieInputTensorNum << trace::DumpSourceLines(node);
|
||||
}
|
||||
std::vector<AnfNodePtr> pad_inputs = {NewValueNode(std::make_shared<Primitive>(kPadOpName)),
|
||||
slice_grad->input(kIndex1)};
|
||||
|
@ -93,7 +93,7 @@ const AnfNodePtr SliceGradUnifyMindIR::Process(const FuncGraphPtr &graph, const
|
|||
MS_LOG(EXCEPTION)
|
||||
<< "For SliceGrad, x_shape dim number should be equal to len(begin) and len(size), but got x_shape dim: "
|
||||
<< x_shape.size() << ", len(begin): " << begins.size() << ", len(size): " << sizes.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
std::vector<std::vector<int64_t>> paddings;
|
||||
for (size_t i = 0; i < x_shape.size(); ++i) {
|
||||
|
|
|
@ -63,8 +63,8 @@ CNodePtr CreateOneHot(const FuncGraphPtr &graph, const CNodePtr &sparse_softmax_
|
|||
size_t index = logits_shape.size() - 1;
|
||||
depth = SizeToLong(logits_shape[index]);
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Logits's shape of node [" << sparse_softmax_node->DebugString()
|
||||
<< "] is empty. trace: " << trace::DumpSourceLines(sparse_softmax_node);
|
||||
MS_LOG(EXCEPTION) << "Logits's shape of node [" << sparse_softmax_node->DebugString() << "] is empty"
|
||||
<< trace::DumpSourceLines(sparse_softmax_node);
|
||||
}
|
||||
|
||||
auto value_on = std::make_shared<tensor::Tensor>(1.0, kFloat32);
|
||||
|
@ -127,7 +127,7 @@ CNodePtr CreateSoftmaxCrossEntropyWithLogits(const FuncGraphPtr &graph, const CN
|
|||
if (!labels_shape.empty()) {
|
||||
loss_shape.emplace_back(labels_shape[0]);
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "One_hot output's shape is empty. trace: " << trace::DumpSourceLines(one_hot_node);
|
||||
MS_LOG(EXCEPTION) << "One_hot output's shape is empty." << trace::DumpSourceLines(one_hot_node);
|
||||
}
|
||||
|
||||
auto shapes = {loss_shape, AnfAlgo::GetOutputInferShape(one_hot_node, 0)};
|
||||
|
@ -141,8 +141,7 @@ std::vector<int64_t> GetAxis(const AnfNodePtr &node) {
|
|||
MS_EXCEPTION_IF_NULL(node);
|
||||
std::vector<size_t> output_shape = AnfAlgo::GetOutputInferShape(node, 0);
|
||||
if (output_shape.empty()) {
|
||||
MS_LOG(EXCEPTION) << node->fullname_with_scope()
|
||||
<< "'s output shape is empty. trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << node->fullname_with_scope() << "'s output shape is empty" << trace::DumpSourceLines(node);
|
||||
}
|
||||
std::vector<int64_t> range;
|
||||
for (size_t i = 0; i < output_shape.size(); i++) {
|
||||
|
@ -311,7 +310,7 @@ CNodePtr CreateRealDiv(const FuncGraphPtr &graph, const CNodePtr &sparse_softmax
|
|||
std::vector<size_t> labels_shape = AnfAlgo::GetPrevNodeOutputInferShape(sparse_softmax_node, 1);
|
||||
if (labels_shape.size() != 1) {
|
||||
MS_LOG(EXCEPTION) << "Label's shape should be 1-D, but got " << labels_shape.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(sparse_softmax_node);
|
||||
<< trace::DumpSourceLines(sparse_softmax_node);
|
||||
}
|
||||
auto y_value = static_cast<float>(labels_shape[0]);
|
||||
auto y = std::make_shared<tensor::Tensor>(y_value, kFloat32);
|
||||
|
@ -360,14 +359,13 @@ CNodePtr CreateMul(const FuncGraphPtr &graph, const CNodePtr &sparse_softmax_nod
|
|||
if (softmax_output_shape.size() != softmax_output_shape_size) {
|
||||
MS_LOG(EXCEPTION) << "SoftmaxCrossEntropyWithLogits the second output shape size should be "
|
||||
<< softmax_output_shape_size << ", but got " << softmax_output_shape.size()
|
||||
<< ". trace: " << trace::DumpSourceLines(softmax_output_node);
|
||||
<< trace::DumpSourceLines(softmax_output_node);
|
||||
}
|
||||
ShapeVector tensor_shape;
|
||||
tensor_shape.emplace_back(softmax_output_shape[0]);
|
||||
tensor_shape.emplace_back(1);
|
||||
if (softmax_output_shape[0] == 0) {
|
||||
MS_LOG(EXCEPTION) << "output_shape[0] of softmax should not be 0. trace: "
|
||||
<< trace::DumpSourceLines(softmax_output_node);
|
||||
MS_LOG(EXCEPTION) << "output_shape[0] of softmax should not be 0" << trace::DumpSourceLines(softmax_output_node);
|
||||
}
|
||||
std::vector<float> tensor_value(softmax_output_shape[0], 1.0 / softmax_output_shape[0]);
|
||||
auto buf_size = sizeof(float) * tensor_value.size();
|
||||
|
@ -413,13 +411,13 @@ bool IsSparseSoftmaxCrossEntropyWithLogitsGrad(const CNodePtr &sparse, string pa
|
|||
if (AnfAlgo::GetCNodeName(sparse) != kSparseSoftmaxCrossEntropyWithLogitsOpName) {
|
||||
MS_LOG(EXCEPTION) << "The pass of " << pass_name << "'s input node should be "
|
||||
<< kSparseSoftmaxCrossEntropyWithLogitsOpName << ", but got " << AnfAlgo::GetCNodeName(sparse)
|
||||
<< ". trace: " << trace::DumpSourceLines(sparse);
|
||||
<< trace::DumpSourceLines(sparse);
|
||||
}
|
||||
if (AnfAlgo::HasNodeAttr(kAttrIsGrad, sparse)) {
|
||||
return AnfAlgo::GetNodeAttr<bool>(sparse, kAttrIsGrad);
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Node of " << sparse->fullname_with_scope() << " does not have the attr " << kAttrIsGrad
|
||||
<< ", related pass: " << pass_name << ", trace: " << trace::DumpSourceLines(sparse);
|
||||
<< ", related pass: " << pass_name << trace::DumpSourceLines(sparse);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
|
|
@ -153,7 +153,7 @@ void CheckCNodeInputSize(const CNodePtr &cnode, size_t input_tensor_size) {
|
|||
if (real_input_tensor_num != input_tensor_size) {
|
||||
MS_LOG(EXCEPTION) << "The input tensor size[" << real_input_tensor_num
|
||||
<< "] of node [" + cnode->DebugString() + "] is not equal to " << input_tensor_size
|
||||
<< ". trace: " << trace::DumpSourceLines(cnode);
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ AnfNodePtr AddCastOpNodeToGraph(const FuncGraphPtr &func_graph, const AnfNodePtr
|
|||
MS_EXCEPTION_IF_NULL(cpu_dynamic_kernel);
|
||||
cpu_dynamic_kernel->Initialize();
|
||||
} catch (std::exception &e) {
|
||||
MS_LOG(EXCEPTION) << e.what() << "\nTrace: " << trace::DumpSourceLines(cast);
|
||||
MS_LOG(EXCEPTION) << e.what() << trace::DumpSourceLines(cast);
|
||||
}
|
||||
AnfAlgo::SetKernelMod(cpu_kernel, cast.get());
|
||||
return cast;
|
||||
|
|
|
@ -378,7 +378,7 @@ void MemReuseChecker::CheckNormalIR(const session::KernelGraph *graph) {
|
|||
if (i + 1 >= node->inputs().size()) {
|
||||
MS_LOG(EXCEPTION) << "Input index: " << i
|
||||
<< " is larger than input number: " << AnfAlgo::GetInputTensorNum(node)
|
||||
<< ". trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
auto real_input_index = AnfAlgo::GetRealInputIndex(node, i);
|
||||
auto input = node->input(real_input_index + 1);
|
||||
|
|
|
@ -42,7 +42,7 @@ const AnfNodePtr ConvTransposeToConvBackpropInputPass::Process(const FuncGraphPt
|
|||
|
||||
if (conv_transpose->inputs().empty()) {
|
||||
MS_LOG(EXCEPTION) << "Cnode inputs should not be empty, cnode: " << node->DebugString()
|
||||
<< ", trace: " << trace::DumpSourceLines(conv_transpose);
|
||||
<< trace::DumpSourceLines(conv_transpose);
|
||||
}
|
||||
|
||||
auto prim = GetValueNode<PrimitivePtr>(conv_transpose->input(kCNodePrimitiveIdx));
|
||||
|
|
|
@ -103,7 +103,7 @@ void AddMissingAttrs(const CNodePtr &cnode, kernel::OpImplyType imply_type,
|
|||
auto default_value = attr->default_value();
|
||||
if (default_value.empty()) {
|
||||
MS_LOG(EXCEPTION) << "attr [" << attr_name << "] in the registration information of op [" << op_name
|
||||
<< "] does not have a value. trace: " << trace::DumpSourceLines(cnode);
|
||||
<< "] does not have a value." << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
ParseAttrDefaultValue(op_name, attr_name, default_value, attr->type(), primitive);
|
||||
need_update = true;
|
||||
|
|
|
@ -193,8 +193,7 @@ const AnfNodePtr OptimizeDependence::Process(const FuncGraphPtr &func_graph, con
|
|||
bool inputs_changed = false;
|
||||
for (auto index : candidate_inputs) {
|
||||
if (index >= new_inputs.size()) {
|
||||
MS_LOG(EXCEPTION) << "Index is out of the size of " << cnode->DebugString()
|
||||
<< " inputs. trace: " << trace::DumpSourceLines(cnode);
|
||||
MS_LOG(EXCEPTION) << "Index is out of the size of " << cnode->DebugString() << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
auto replace_node = GetConvertNode(func_graph, cnode, index);
|
||||
if (replace_node != nullptr) {
|
||||
|
|
|
@ -48,7 +48,7 @@ bool WeightCheck(const AnfNodePtr &node) {
|
|||
for (auto index : iter->second) {
|
||||
if (index >= real_inputs.size()) {
|
||||
MS_LOG(EXCEPTION) << "index out of range. node: " << node->DebugString() << ", index: " << index
|
||||
<< real_inputs.size() << ". trace: " << trace::DumpSourceLines(node);
|
||||
<< real_inputs.size() << trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
if (real_inputs[index].first->isa<Parameter>() &&
|
||||
|
|
|
@ -256,7 +256,7 @@ KernelWithIndex AnfRuntimeAlgorithm::VisitKernelWithReturnType(const AnfNodePtr
|
|||
}
|
||||
if (opt::IsNopNode(cnode) && skip_nop_node) {
|
||||
if (cnode->size() != kNopNodeInputSize) {
|
||||
MS_LOG(EXCEPTION) << "Invalid nop node " << cnode->DebugString() << " trace: " << trace::DumpSourceLines(cnode);
|
||||
MS_LOG(EXCEPTION) << "Invalid nop node " << cnode->DebugString() << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
return VisitKernelWithReturnType(cnode->input(kNopNodeRealInputIndex), 0, skip_nop_node, return_types);
|
||||
}
|
||||
|
@ -471,8 +471,7 @@ std::string AnfRuntimeAlgorithm::GetNodeDebugString(const AnfNodePtr &node) {
|
|||
void AnfRuntimeAlgorithm::SetNodeAttr(const std::string &key, const ValuePtr &value, const AnfNodePtr &node) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!node->isa<CNode>()) {
|
||||
MS_LOG(EXCEPTION) << "Only cnode has attr, but this anf is " << node->DebugString()
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Only cnode has attr, but this anf is " << node->DebugString() << trace::DumpSourceLines(node);
|
||||
}
|
||||
// single op cnode.
|
||||
auto primitive = AnfAlgo::GetCNodePrimitive(node);
|
||||
|
@ -496,7 +495,7 @@ void AnfRuntimeAlgorithm::CopyNodeAttr(const std::string &old_key, const std::st
|
|||
MS_EXCEPTION_IF_NULL(to);
|
||||
if (!from->isa<CNode>() || !to->isa<CNode>()) {
|
||||
MS_LOG(EXCEPTION) << "Only cnode has attr, but this from_anf is " << from->DebugString() << " ,to_node is "
|
||||
<< to->DebugString() << " trace: " << trace::DumpSourceLines(from);
|
||||
<< to->DebugString() << trace::DumpSourceLines(from);
|
||||
}
|
||||
auto from_primitive = AnfAlgo::GetCNodePrimitive(from);
|
||||
MS_EXCEPTION_IF_NULL(from_primitive);
|
||||
|
@ -510,7 +509,7 @@ void AnfRuntimeAlgorithm::CopyNodeAttrs(const AnfNodePtr &from, const AnfNodePtr
|
|||
MS_EXCEPTION_IF_NULL(to);
|
||||
if (!from->isa<CNode>() || !to->isa<CNode>()) {
|
||||
MS_LOG(EXCEPTION) << "Only cnode has attr, but this from_anf is " << from->DebugString() << ",to_node is "
|
||||
<< from->DebugString() << " trace: " << trace::DumpSourceLines(from);
|
||||
<< from->DebugString() << trace::DumpSourceLines(from);
|
||||
}
|
||||
auto from_primitive = AnfAlgo::GetCNodePrimitive(from);
|
||||
MS_EXCEPTION_IF_NULL(from_primitive);
|
||||
|
@ -522,8 +521,7 @@ void AnfRuntimeAlgorithm::CopyNodeAttrs(const AnfNodePtr &from, const AnfNodePtr
|
|||
void AnfRuntimeAlgorithm::EraseNodeAttr(const std::string &key, const AnfNodePtr node) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!node->isa<CNode>()) {
|
||||
MS_LOG(EXCEPTION) << "Only cnode has attr, but this anf is " << node->DebugString()
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Only cnode has attr, but this anf is " << node->DebugString() << trace::DumpSourceLines(node);
|
||||
}
|
||||
// single op cnode.
|
||||
auto primitive = AnfAlgo::GetCNodePrimitive(node);
|
||||
|
@ -558,7 +556,7 @@ size_t AnfRuntimeAlgorithm::GetInputNum(const CNodePtr &cnode) {
|
|||
MS_EXCEPTION_IF_NULL(cnode);
|
||||
size_t input_num = cnode->size();
|
||||
if (input_num == 0) {
|
||||
MS_LOG(EXCEPTION) << "Cnode inputs size can't be zero. trace: " << trace::DumpSourceLines(cnode);
|
||||
MS_LOG(EXCEPTION) << "Cnode inputs size can't be zero." << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
return input_num - 1;
|
||||
}
|
||||
|
@ -599,8 +597,7 @@ std::vector<std::string> AnfRuntimeAlgorithm::GetAllOutputFormats(const AnfNodeP
|
|||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!AnfUtils::IsRealKernel(node)) {
|
||||
MS_LOG(EXCEPTION) << "Not real kernel:"
|
||||
<< "#node [" << node->DebugString() << "]"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< "#node [" << node->DebugString() << "]" << trace::DumpSourceLines(node);
|
||||
}
|
||||
auto kernel_info = dynamic_cast<device::KernelInfo *>(node->kernel_info());
|
||||
MS_EXCEPTION_IF_NULL(kernel_info);
|
||||
|
@ -614,8 +611,7 @@ std::vector<std::string> AnfRuntimeAlgorithm::GetAllInputFormats(const AnfNodePt
|
|||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!AnfUtils::IsRealKernel(node)) {
|
||||
MS_LOG(EXCEPTION) << "Not real kernel:"
|
||||
<< "#node [" << node->DebugString() << "]"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< "#node [" << node->DebugString() << "]" << trace::DumpSourceLines(node);
|
||||
}
|
||||
auto kernel_info = dynamic_cast<device::KernelInfo *>(node->kernel_info());
|
||||
MS_EXCEPTION_IF_NULL(kernel_info);
|
||||
|
@ -629,8 +625,7 @@ std::vector<TypeId> AnfRuntimeAlgorithm::GetAllInputDeviceTypes(const AnfNodePtr
|
|||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!AnfUtils::IsRealKernel(node)) {
|
||||
MS_LOG(EXCEPTION) << "Not real kernel:"
|
||||
<< "#node [" << node->DebugString() << "]"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< "#node [" << node->DebugString() << "]" << trace::DumpSourceLines(node);
|
||||
}
|
||||
auto kernel_info = dynamic_cast<device::KernelInfo *>(node->kernel_info());
|
||||
MS_EXCEPTION_IF_NULL(kernel_info);
|
||||
|
@ -644,8 +639,7 @@ std::vector<TypeId> AnfRuntimeAlgorithm::GetAllOutputDeviceTypes(const AnfNodePt
|
|||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!AnfUtils::IsRealKernel(node)) {
|
||||
MS_LOG(EXCEPTION) << "Not real kernel:"
|
||||
<< "#node [" << node->DebugString() << "]"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< "#node [" << node->DebugString() << "]" << trace::DumpSourceLines(node);
|
||||
}
|
||||
auto kernel_info = dynamic_cast<device::KernelInfo *>(node->kernel_info());
|
||||
MS_EXCEPTION_IF_NULL(kernel_info);
|
||||
|
@ -659,8 +653,7 @@ std::string AnfRuntimeAlgorithm::GetOriginDataFormat(const AnfNodePtr &node) {
|
|||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!AnfUtils::IsRealKernel(node)) {
|
||||
MS_LOG(EXCEPTION) << "Not real kernel:"
|
||||
<< "#node [" << node->DebugString() << "]"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< "#node [" << node->DebugString() << "]" << trace::DumpSourceLines(node);
|
||||
}
|
||||
auto kernel_info = dynamic_cast<device::KernelInfo *>(node->kernel_info());
|
||||
MS_EXCEPTION_IF_NULL(kernel_info);
|
||||
|
@ -675,8 +668,7 @@ std::string AnfRuntimeAlgorithm::GetOutputFormat(const AnfNodePtr &node, size_t
|
|||
if (output_idx > GetOutputTensorNum(node)) {
|
||||
MS_LOG(EXCEPTION) << "Output index:" << output_idx
|
||||
<< " is out of the node output range :" << GetOutputTensorNum(node) << " #node ["
|
||||
<< node->DebugString() << "]"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< node->DebugString() << "]" << trace::DumpSourceLines(node);
|
||||
}
|
||||
if (!AnfUtils::IsRealKernel(node)) {
|
||||
return AnfAlgo::GetPrevNodeOutputFormat(node, output_idx);
|
||||
|
@ -688,8 +680,7 @@ std::string AnfRuntimeAlgorithm::GetOutputFormat(const AnfNodePtr &node, size_t
|
|||
auto format = build_info->GetOutputFormat(output_idx);
|
||||
if (format == kernel::KernelBuildInfo::kInvalidFormat) {
|
||||
MS_LOG(EXCEPTION) << "Node [" << node->DebugString() << "]"
|
||||
<< " has a invalid output format"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< " has a invalid output format" << trace::DumpSourceLines(node);
|
||||
}
|
||||
return format;
|
||||
}
|
||||
|
@ -699,8 +690,7 @@ std::string AnfRuntimeAlgorithm::GetInputFormat(const AnfNodePtr &node, size_t i
|
|||
if (input_idx > GetInputTensorNum(node)) {
|
||||
MS_LOG(EXCEPTION) << "Input index :" << input_idx
|
||||
<< " is out of the number node Input range :" << GetInputTensorNum(node) << "#node ["
|
||||
<< node->DebugString() << "]"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< node->DebugString() << "]" << trace::DumpSourceLines(node);
|
||||
}
|
||||
if (!AnfUtils::IsRealKernel(node)) {
|
||||
return GetPrevNodeOutputFormat(node, input_idx);
|
||||
|
@ -712,8 +702,7 @@ std::string AnfRuntimeAlgorithm::GetInputFormat(const AnfNodePtr &node, size_t i
|
|||
auto format = build_info->GetInputFormat(input_idx);
|
||||
if (format == kernel::KernelBuildInfo::kInvalidFormat) {
|
||||
MS_LOG(EXCEPTION) << "Node [" << node->DebugString() << "]"
|
||||
<< " has a invalid input format"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< " has a invalid input format" << trace::DumpSourceLines(node);
|
||||
}
|
||||
return format;
|
||||
}
|
||||
|
@ -722,8 +711,7 @@ KernelWithIndex AnfRuntimeAlgorithm::GetPrevNodeOutput(const AnfNodePtr &anf_nod
|
|||
bool skip_nop_node) {
|
||||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
if (!anf_node->isa<CNode>()) {
|
||||
MS_LOG(EXCEPTION) << anf_node->DebugString() << "anf_node is not CNode."
|
||||
<< " trace: " << trace::DumpSourceLines(anf_node);
|
||||
MS_LOG(EXCEPTION) << anf_node->DebugString() << "anf_node is not CNode." << trace::DumpSourceLines(anf_node);
|
||||
}
|
||||
if (CheckPrimitiveType(anf_node, prim::kPrimTupleGetItem)) {
|
||||
return VisitKernelWithReturnType(anf_node, 0, skip_nop_node);
|
||||
|
@ -753,15 +741,13 @@ std::vector<size_t> AnfRuntimeAlgorithm::GetOutputInferShape(const AnfNodePtr &n
|
|||
return TransShapeToSizet(base_shape->cast<abstract::ShapePtr>());
|
||||
}
|
||||
MS_LOG(EXCEPTION) << "The node " << node->DebugString() << "is a single output node but got index [" << output_idx
|
||||
<< "."
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
} else if (base_shape->isa<abstract::TupleShape>()) {
|
||||
auto tuple_shape = base_shape->cast<abstract::TupleShapePtr>();
|
||||
MS_EXCEPTION_IF_NULL(tuple_shape);
|
||||
if (output_idx >= tuple_shape->size()) {
|
||||
MS_LOG(EXCEPTION) << "Output index " << output_idx << "is larger than output number " << tuple_shape->size()
|
||||
<< " node:" << node->DebugString() << "."
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< node->DebugString() << trace::DumpSourceLines(node);
|
||||
}
|
||||
auto b_shp = (*tuple_shape)[output_idx];
|
||||
if (b_shp->isa<abstract::Shape>()) {
|
||||
|
@ -771,15 +757,13 @@ std::vector<size_t> AnfRuntimeAlgorithm::GetOutputInferShape(const AnfNodePtr &n
|
|||
} else {
|
||||
MS_LOG(EXCEPTION) << "The output type of ApplyKernel index:" << output_idx
|
||||
<< " should be a NoShape , ArrayShape or a TupleShape, but it is " << base_shape->ToString()
|
||||
<< "node :" << node->DebugString() << "."
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< "node :" << node->DebugString() << "." << trace::DumpSourceLines(node);
|
||||
}
|
||||
} else if (base_shape->isa<abstract::NoShape>()) {
|
||||
return std::vector<size_t>();
|
||||
}
|
||||
MS_LOG(EXCEPTION) << "The output type of ApplyKernel should be a NoShape , ArrayShape or a TupleShape, but it is "
|
||||
<< base_shape->ToString() << " node : " << node->DebugString()
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< base_shape->ToString() << " node : " << node->DebugString() << trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
std::vector<size_t> AnfRuntimeAlgorithm::GetOutputInferShape(const AnfNodePtr &node, size_t output_idx) {
|
||||
|
@ -844,8 +828,7 @@ std::string AnfRuntimeAlgorithm::GetInputReshapeType(const AnfNodePtr &node, siz
|
|||
if (input_idx > GetInputTensorNum(node)) {
|
||||
MS_LOG(EXCEPTION) << "The index:" << input_idx
|
||||
<< " is out of range of the node's input size : " << GetInputTensorNum(node) << "#node["
|
||||
<< node->DebugString() << "]"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< node->DebugString() << "]" << trace::DumpSourceLines(node);
|
||||
}
|
||||
if (!AnfUtils::IsRealKernel(node)) {
|
||||
return GetPrevNodeOutputReshapeType(node, input_idx);
|
||||
|
@ -865,7 +848,7 @@ std::string AnfRuntimeAlgorithm::GetOutputReshapeType(const AnfNodePtr &node, si
|
|||
if (output_idx > GetOutputTensorNum(node)) {
|
||||
MS_LOG(EXCEPTION) << "The index [" << output_idx << "] is out of range of the node's output size [ "
|
||||
<< GetOutputTensorNum(node) << "#node[ " << node->DebugString() << "]"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
if (!AnfUtils::IsRealKernel(node)) {
|
||||
return GetPrevNodeOutputReshapeType(node, output_idx);
|
||||
|
@ -919,7 +902,7 @@ TypeId AnfRuntimeAlgorithm::GetOutputDeviceDataType(const AnfNodePtr &node, size
|
|||
if (output_idx > GetOutputTensorNum(node)) {
|
||||
MS_LOG(EXCEPTION) << "The index [" << output_idx << "] is out of range of the node's output size [ "
|
||||
<< GetOutputTensorNum(node) << "#node [ " << node->DebugString() << "]"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
if (!AnfUtils::IsRealKernel(node)) {
|
||||
return GetPrevNodeOutputDeviceDataType(node, output_idx);
|
||||
|
@ -930,9 +913,7 @@ TypeId AnfRuntimeAlgorithm::GetOutputDeviceDataType(const AnfNodePtr &node, size
|
|||
MS_EXCEPTION_IF_NULL(build_info);
|
||||
auto dtype = build_info->GetOutputDeviceType(output_idx);
|
||||
if (dtype == TypeId::kNumberTypeEnd) {
|
||||
MS_LOG(EXCEPTION) << "Node [" << node->DebugString() << "]"
|
||||
<< " has a invalid dtype"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Node [" << node->DebugString() << "] has a invalid dtype" << trace::DumpSourceLines(node);
|
||||
}
|
||||
return dtype;
|
||||
}
|
||||
|
@ -942,7 +923,7 @@ TypeId AnfRuntimeAlgorithm::GetInputDeviceDataType(const AnfNodePtr &node, size_
|
|||
if (input_idx > GetInputTensorNum(node)) {
|
||||
MS_LOG(EXCEPTION) << "The index [" << input_idx << "] is out of range of the node's input size [ "
|
||||
<< GetInputTensorNum(node) << "#node [ " << node->DebugString() << "]"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
if (!AnfUtils::IsRealKernel(node)) {
|
||||
return GetPrevNodeOutputDeviceDataType(node, 0);
|
||||
|
|
|
@ -360,7 +360,7 @@ void KernelNotSupportException(const AnfNodePtr &kernel_node) {
|
|||
operator_info << ") ";
|
||||
}
|
||||
operator_info << "is not support.";
|
||||
MS_LOG(EXCEPTION) << operator_info.str() << " Trace: " << trace::DumpSourceLines(kernel_node);
|
||||
MS_LOG(EXCEPTION) << operator_info.str() << trace::DumpSourceLines(kernel_node);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
|
@ -389,7 +389,7 @@ void CPUSession::BuildKernel(const KernelGraph *kernel_graph) {
|
|||
try {
|
||||
cpu_kernel->Init(kernel_node);
|
||||
} catch (std::exception &e) {
|
||||
MS_LOG(EXCEPTION) << e.what() << "\nTrace: " << trace::DumpSourceLines(kernel_node);
|
||||
MS_LOG(EXCEPTION) << e.what() << trace::DumpSourceLines(kernel_node);
|
||||
}
|
||||
AnfAlgo::SetKernelMod(cpu_kernel, kernel_node.get());
|
||||
MS_LOG(INFO) << "Cpu build success operator[" << kernel_name << "].";
|
||||
|
|
|
@ -424,15 +424,13 @@ void CheckInputTensorShape(const TensorPtr &tensor, const CNodePtr &kernel, size
|
|||
if (tensor_shape.size() != input_shape.size()) {
|
||||
MS_LOG(EXCEPTION) << "The input tensor's shape size: " << tensor_shape.size()
|
||||
<< " is not equal to expected size: " << input_shape.size() << " for input[" << input_index
|
||||
<< "] of kernel: " << AnfAlgo::GetCNodeName(kernel)
|
||||
<< ", trace: " << trace::DumpSourceLines(kernel);
|
||||
<< "] of kernel: " << AnfAlgo::GetCNodeName(kernel) << trace::DumpSourceLines(kernel);
|
||||
}
|
||||
for (size_t i = 0; i < tensor_shape.size(); i++) {
|
||||
if (tensor_shape[i] < 0 || static_cast<size_t>(tensor_shape[i]) != input_shape[i]) {
|
||||
MS_LOG(EXCEPTION) << "The input tensor's shape: " << tensor_shape
|
||||
<< " is not equal to expected shape: " << input_shape << " for input[" << input_index
|
||||
<< "] of kernel: " << AnfAlgo::GetCNodeName(kernel)
|
||||
<< ", trace: " << trace::DumpSourceLines(kernel);
|
||||
<< "] of kernel: " << AnfAlgo::GetCNodeName(kernel) << trace::DumpSourceLines(kernel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ std::shared_ptr<session::KernelGraph> SingleKernelGraph::ConstructKernelGraphBas
|
|||
MS_LOG(EXCEPTION)
|
||||
<< "The size of output_dtypes should be equal to size of output_shapes, but got output_dtypes size: "
|
||||
<< output_dtypes.size() << ", output_shapes size: " << output_shapes.size() << ". The op name is: " << op_name
|
||||
<< ", trace: " << trace::DumpSourceLines(cnode);
|
||||
<< trace::DumpSourceLines(cnode);
|
||||
}
|
||||
AnfAlgo::SetOutputInferTypeAndShape(output_dtypes, output_shapes, cnode.get());
|
||||
// set execution order
|
||||
|
|
|
@ -853,7 +853,7 @@ CNodePtr GetPrimalUser(const CNodePtr &j_user, const std::map<FuncGraphPtr, std:
|
|||
<< ", J operation: " << j_user->DebugString() << ", Primal call: ";
|
||||
size_t count = 0;
|
||||
for (const auto &user : primal_users) {
|
||||
MS_LOG(INFO) << "[ " << ++count << " ] : " << user->DebugString(2) << ", trace: " << trace::DumpSourceLines(user);
|
||||
MS_LOG(INFO) << "[ " << ++count << " ] : " << user->DebugString(2) << trace::DumpSourceLines(user);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -1966,8 +1966,7 @@ static void ExtractStrategyAndInit(const CNodePtr &cnode, const PrimitivePtr &pr
|
|||
|
||||
MS_EXCEPTION_IF_NULL(in_strategy);
|
||||
if (op_info->Init(in_strategy, out_strategy) == FAILED) {
|
||||
MS_LOG(EXCEPTION) << "Failure:operator " << prim->name() << " init failed"
|
||||
<< " trace: " << trace::DumpSourceLines(cnode);
|
||||
MS_LOG(EXCEPTION) << "Failure:operator " << prim->name() << " init failed" << trace::DumpSourceLines(cnode);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -689,7 +689,7 @@ std::string JoinBranchesFailedInfo(const AbstractBasePtr &spec, const AbstractBa
|
|||
}
|
||||
}
|
||||
}
|
||||
buffer << ". trace: " << trace::DumpSourceLines(node);
|
||||
buffer << trace::DumpSourceLines(node);
|
||||
return buffer.str();
|
||||
}
|
||||
|
||||
|
|
|
@ -632,8 +632,7 @@ void AscendKernelRuntime::TaskFailCallback(rtExceptionInfo *task_fail_info) {
|
|||
<< "Task overflow infos task_id: " << task_fail_info->taskid
|
||||
<< ", stream_id: " << task_fail_info->streamid << ", tid: " << task_fail_info->tid
|
||||
<< ", device_id: " << task_fail_info->deviceid << ", retcode: " << task_fail_info->retcode
|
||||
<< " (" << GetErrorMsg(task_fail_info->retcode) << ")"
|
||||
<< ", trace: " << trace::DumpSourceLines(node);
|
||||
<< " (" << GetErrorMsg(task_fail_info->retcode) << ")" << trace::DumpSourceLines(node);
|
||||
overflow_tasks_[key] = 1;
|
||||
} else {
|
||||
overflow_tasks_[key]++;
|
||||
|
@ -709,7 +708,7 @@ void AscendKernelRuntime::DumpTaskExceptionInfo(const session::KernelGraph & /*
|
|||
}
|
||||
auto full_scope_name = node->fullname_with_scope();
|
||||
MS_LOG(ERROR) << "Dump node (" << full_scope_name << ") task error input/output data to: " << path
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
E2eDump::DumpInputImpl(node, false, path, &full_scope_name, nullptr);
|
||||
E2eDump::DumpOutputImpl(node, false, path, &full_scope_name, nullptr);
|
||||
}
|
||||
|
|
|
@ -330,8 +330,7 @@ void SetCastAndWeightFormat(const CNodePtr &kernel_node) {
|
|||
if (!AnfAlgo::HasNodeAttr(kAttrPynativeNextIndex, kernel_node) ||
|
||||
!AnfAlgo::HasNodeAttr(kAttrPynativeNextOpName, kernel_node)) {
|
||||
MS_LOG(EXCEPTION) << "The node [" << kernel_node->DebugString() << "] attr of " << kAttrPynativeNextIndex << " or "
|
||||
<< kAttrPynativeNextOpName << " has not been set yet!"
|
||||
<< " trace: " << trace::DumpSourceLines(kernel_node);
|
||||
<< kAttrPynativeNextOpName << " has not been set yet!" << trace::DumpSourceLines(kernel_node);
|
||||
}
|
||||
auto next_index = AnfAlgo::GetNodeAttr<size_t>(kernel_node, kAttrPynativeNextIndex);
|
||||
auto next_op_name = AnfAlgo::GetNodeAttr<std::string>(kernel_node, kAttrPynativeNextOpName);
|
||||
|
@ -342,7 +341,7 @@ void SetCastAndWeightFormat(const CNodePtr &kernel_node) {
|
|||
}
|
||||
if (iter->second.size() < next_index) {
|
||||
MS_LOG(EXCEPTION) << "Next input index " << next_index << "is out of range in the next op map max size is "
|
||||
<< iter->second.size() << " trace: " << trace::DumpSourceLines(kernel_node);
|
||||
<< iter->second.size() << trace::DumpSourceLines(kernel_node);
|
||||
}
|
||||
if (AnfAlgo::GetCNodeName(kernel_node) != prim::kPrimCast->name()) {
|
||||
MS_LOG(INFO) << "Only supported to change the node Cast's build info!!!";
|
||||
|
@ -705,7 +704,7 @@ void PrintNotMatchMessage(const std::vector<std::shared_ptr<kernel::KernelBuildI
|
|||
auto full_name = kernel_node->fullname_with_scope();
|
||||
if (ai_core.empty() && ai_cpu.empty()) {
|
||||
MS_LOG(EXCEPTION) << "Can not find any available kernel info for: " << full_name
|
||||
<< ". Maybe the operator can not supported on Ascend platform.\nTrace: "
|
||||
<< ". Maybe the operator can not supported on Ascend platform."
|
||||
<< trace::DumpSourceLines(kernel_node);
|
||||
} else {
|
||||
auto candidates = KernelInfoCandidateList(ai_core, ai_cpu);
|
||||
|
@ -714,7 +713,7 @@ void PrintNotMatchMessage(const std::vector<std::shared_ptr<kernel::KernelBuildI
|
|||
<< "Please check the given data type or shape:\n"
|
||||
<< buffer.str()
|
||||
<< "\nFor more details, please refer to 'Kernel Select Failed' at "
|
||||
"https://www.mindspore.cn\nTrace:"
|
||||
"https://www.mindspore.cn"
|
||||
<< trace::DumpSourceLines(kernel_node);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -469,7 +469,7 @@ bool CPUKernelRuntime::Run(const session::KernelGraph &kernel_graph, bool) {
|
|||
try {
|
||||
ret = kernel_mod->Launch(kernel_inputs, kernel_workspaces, kernel_outputs, 0);
|
||||
} catch (std::exception &e) {
|
||||
MS_LOG(EXCEPTION) << e.what() << "\nTrace:" << trace::DumpSourceLines(kernel);
|
||||
MS_LOG(EXCEPTION) << e.what() << trace::DumpSourceLines(kernel);
|
||||
}
|
||||
#ifndef ENABLE_SECURITY
|
||||
if (iter_dump_flag) {
|
||||
|
@ -483,7 +483,7 @@ bool CPUKernelRuntime::Run(const session::KernelGraph &kernel_graph, bool) {
|
|||
#ifdef ENABLE_DUMP_IR
|
||||
mindspore::RDR::TriggerAll();
|
||||
#endif
|
||||
MS_LOG(EXCEPTION) << "Launch kernel failed. Trace:" << trace::DumpSourceLines(kernel);
|
||||
MS_LOG(EXCEPTION) << "Launch kernel failed." << trace::DumpSourceLines(kernel);
|
||||
}
|
||||
static_cast<CPUMemoryManager *>(mem_manager_.get())->DecreaseAddressRefCount(kernel);
|
||||
#ifdef ENABLE_PROFILE
|
||||
|
|
|
@ -232,7 +232,7 @@ void KernelNotSupportException(const AnfNodePtr &kernel_node, const std::vector<
|
|||
operator_info
|
||||
<< "is not support. This error means the current input type is not supported, please refer to the MindSpore "
|
||||
"doc for supported types.\n";
|
||||
MS_EXCEPTION(TypeError) << operator_info.str() << "Trace: " << trace::DumpSourceLines(kernel_node);
|
||||
MS_EXCEPTION(TypeError) << operator_info.str() << trace::DumpSourceLines(kernel_node);
|
||||
}
|
||||
|
||||
void UpdateDynamicKernelBuildInfoAndAttrs(const CNodePtr &kernel_node) {
|
||||
|
|
|
@ -44,7 +44,7 @@ inline void AbstractTypeJoinLogging(const AbstractBasePtr &abstract1, const Abst
|
|||
<< "this: " << abstract1->ToString() << ", other: " << abstract2->ToString();
|
||||
auto node = GetTraceNode(abstract1);
|
||||
if (node != nullptr) {
|
||||
oss << ". Please check the node " << node->DebugString() << ". trace: " << trace::DumpSourceLines(node);
|
||||
oss << ". Please check the node " << node->DebugString() << trace::DumpSourceLines(node);
|
||||
}
|
||||
MS_EXCEPTION(TypeError) << oss.str();
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ inline void TypeJoinLogging(const TypePtr &type1, const TypePtr &type2, const Ab
|
|||
<< "this: " << abstract1->ToString() << ", other: " << abstract2->ToString();
|
||||
auto node = GetTraceNode(abstract1);
|
||||
if (node != nullptr) {
|
||||
oss << ". Please check the node " << node->DebugString() << ". trace: " << trace::DumpSourceLines(node);
|
||||
oss << ". Please check the node " << node->DebugString() << trace::DumpSourceLines(node);
|
||||
}
|
||||
MS_EXCEPTION(TypeError) << oss.str();
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ inline void ShapeJoinLogging(const BaseShapePtr &shape1, const BaseShapePtr &sha
|
|||
<< "this: " << abstract1->ToString() << ", other: " << abstract2->ToString();
|
||||
auto node = GetTraceNode(abstract1);
|
||||
if (node != nullptr) {
|
||||
oss << ". Please check the node " << node->DebugString() << ". trace: " << trace::DumpSourceLines(node);
|
||||
oss << ". Please check the node " << node->DebugString() << trace::DumpSourceLines(node);
|
||||
}
|
||||
MS_EXCEPTION(ValueError) << oss.str();
|
||||
}
|
||||
|
|
|
@ -106,8 +106,7 @@ bool AnfUtils::IsRealKernel(const AnfNodePtr &node) {
|
|||
return true;
|
||||
}
|
||||
if (cnode->size() == 0) {
|
||||
MS_LOG(EXCEPTION) << "Illegal null input of cnode(%s)" << node->DebugString()
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Illegal null input of cnode(%s)" << node->DebugString() << trace::DumpSourceLines(node);
|
||||
}
|
||||
return !IsOneOfPrimitive(cnode->input(kAnfPrimitiveIndex), virtual_prims);
|
||||
}
|
||||
|
@ -145,7 +144,7 @@ std::string AnfUtils::GetCNodeName(const AnfNodePtr &node) {
|
|||
}
|
||||
return func_graph->ToString();
|
||||
}
|
||||
MS_LOG(EXCEPTION) << "Unknown anf node type " << node->DebugString() << " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Unknown anf node type " << node->DebugString() << trace::DumpSourceLines(node);
|
||||
}
|
||||
|
||||
size_t AnfUtils::GetInputTensorNum(const AnfNodePtr &node) {
|
||||
|
@ -153,7 +152,7 @@ size_t AnfUtils::GetInputTensorNum(const AnfNodePtr &node) {
|
|||
auto cnode = node->cast<CNodePtr>();
|
||||
if (cnode == nullptr) {
|
||||
MS_LOG(EXCEPTION) << "Only cnode has real input, but this anf is " << node->DebugString()
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
<< trace::DumpSourceLines(node);
|
||||
}
|
||||
ssize_t input_tensor_num = cnode->input_tensor_num();
|
||||
if (input_tensor_num >= 0) {
|
||||
|
@ -161,8 +160,7 @@ size_t AnfUtils::GetInputTensorNum(const AnfNodePtr &node) {
|
|||
}
|
||||
size_t input_num = cnode->inputs().size();
|
||||
if (input_num == 0) {
|
||||
MS_LOG(EXCEPTION) << "Cnode inputs size can't be zero"
|
||||
<< " trace: " << trace::DumpSourceLines(node);
|
||||
MS_LOG(EXCEPTION) << "Cnode inputs size can't be zero" << trace::DumpSourceLines(node);
|
||||
}
|
||||
// Exclude inputs[0].
|
||||
--input_num;
|
||||
|
|
Loading…
Reference in New Issue