From 6cfd71106481d87f654c4c5ed6540b77c4f0402a Mon Sep 17 00:00:00 2001 From: ZPaC Date: Tue, 21 Feb 2023 11:44:59 +0800 Subject: [PATCH] Move get detail shape to backend anfalgo. --- .../ccsrc/backend/common/optimizer/helper.cc | 4 +- .../common/session/anf_runtime_algorithm.cc | 52 ++++++++++-- .../common/session/anf_runtime_algorithm.h | 4 + .../ccsrc/include/common/utils/anfalgo.h | 7 +- .../ascend/hal/hardware/ascend_auto_monad.cc | 4 +- .../kernel/tbe/tbe_json/tbe_json_utils.cc | 2 +- .../tbe/tbe_kernel_select/tbe_select_utils.cc | 4 +- .../device/ascend/optimizer/ascend_helper.cc | 7 +- .../buffer_fusion/ub_pattern_fusion.cc | 4 +- .../enhancer/concat_outputs_for_all_gather.cc | 2 +- .../enhancer/insert_pad_for_nms_with_mask.cc | 2 +- .../enhancer/insert_transpose_for_sort.cc | 4 +- .../optimizer/format_type/deal_ref_output.cc | 2 +- .../eliminate_graph_output_transdata.cc | 2 +- .../optimizer/format_type/insert_cast.cc | 4 +- .../format_type/insert_transdata_for_runop.cc | 4 +- .../optimizer/format_type/merge_cast_to_op.cc | 4 +- .../ascend/optimizer/ge/lamb_fission.cc | 4 +- ...softmax_cross_entropy_with_logits_split.cc | 2 +- .../ir_fission/batch_norm_grad_split.cc | 5 +- .../bce_with_logits_loss_fission.cc | 4 +- .../optimizer/ir_fission/bn_grad_split.cc | 5 +- .../ascend/optimizer/ir_fission/bn_split.cc | 6 +- .../optimizer/ir_fission/concat_fission.cc | 4 +- .../optimizer/ir_fission/lamb_fission.cc | 4 +- .../ir_fission/layer_norm_grad_split.cc | 8 +- .../optimizer/ir_fission/pack_fission.cc | 2 +- .../ir_fission/reduce_sum_fission.cc | 2 +- .../optimizer/ir_fission/transdata_split.cc | 4 +- .../adam_apply_one_with_decay_rule.cc | 4 +- .../ir_fusion/adaptive_max_pool2d_fusion.cc | 6 +- ...duce_grad_conv2d_backprop_filter_fusion.cc | 3 +- .../clip_by_norm_no_div_square_sum_fusion.cc | 2 +- .../ir_fusion/clip_by_value_fusion.cc | 2 +- .../ir_fusion/confusion_mul_grad_fusion.cc | 2 +- .../lamb_next_mv_with_decay_v1_rule.cc | 4 +- .../lamb_update_with_lr_rule_fusion.cc | 2 +- .../softmax_dropout_do_mask_v3_fusion.cc | 2 +- .../optimizer/ir_fusion/square_sum_fusion.cc | 4 +- .../mindir/all_to_all_unify_mindir.cc | 3 +- .../optimizer/mindir/bn_grad_unify_mindir.cc | 9 +- .../mindir/maxpool_to_maxpool_with_argmax.cc | 2 +- .../neighbor_exchange_v2_unify_mindir.cc | 2 +- ..._cross_entropy_with_logits_unify_mindir.cc | 6 +- .../device/cpu/optimizer/insert_cast_cpu.cc | 5 +- .../optimizer/insert_format_transform_op.cc | 2 +- .../device/cpu/optimizer/print_value_type.cc | 2 +- .../device/gpu/optimizer/adam_fusion.cc | 2 +- .../gpu/optimizer/adam_weight_decay_fusion.cc | 2 +- .../gpu/optimizer/add_relu_grad_v2_fusion.cc | 2 +- .../gpu/optimizer/add_relu_v2_fusion.cc | 2 +- .../device/gpu/optimizer/alltoall_fusion.cc | 2 +- .../optimizer/apply_momentum_scale_fusion.cc | 2 +- .../optimizer/apply_momentum_weight_fusion.cc | 2 +- .../apply_momentum_weight_scale_fusion.cc | 2 +- .../optimizer/batch_norm_add_relu_fusion.cc | 2 +- .../batch_norm_add_relu_grad_fusion.cc | 4 +- .../gpu/optimizer/batch_norm_relu_fusion.cc | 2 +- .../optimizer/batch_norm_relu_grad_fusion.cc | 2 +- .../optimizer/bce_with_logits_loss_fusion.cc | 4 +- .../concat_outputs_for_all_gather.cc | 2 +- .../gpu/optimizer/cudnn_inplace_fusion.cc | 2 +- .../device/gpu/optimizer/insert_cast_gpu.cc | 4 +- .../optimizer/insert_format_transform_op.cc | 2 +- .../gpu/optimizer/matmul_biasadd_fusion.cc | 2 +- .../post_batch_norm_add_relu_fusion.cc | 2 +- .../gpu/optimizer/print_reduce_fusion.cc | 2 +- .../device/gpu/optimizer/relu_v2_pass.cc | 4 +- .../gpu/optimizer/replace_addn_fusion.cc | 2 +- .../optimizer/replace_momentum_cast_fusion.cc | 2 +- .../gpu/optimizer/trt_pass/graph_converter.cc | 2 +- mindspore/ccsrc/utils/anfalgo.cc | 84 +++++-------------- .../mindspore/communication/management.py | 1 + 73 files changed, 182 insertions(+), 183 deletions(-) diff --git a/mindspore/ccsrc/backend/common/optimizer/helper.cc b/mindspore/ccsrc/backend/common/optimizer/helper.cc index cf5dd036e53..65557a67c09 100644 --- a/mindspore/ccsrc/backend/common/optimizer/helper.cc +++ b/mindspore/ccsrc/backend/common/optimizer/helper.cc @@ -633,10 +633,10 @@ CNodePtr AddCastNode(const FuncGraphPtr &func_graph, const TypeId dst_type, cons if (is_input) { auto node_input = common::AnfAlgo::GetInputNode(node, 0); (void)new_cast_inputs.emplace_back(node_input); - shape = common::AnfAlgo::GetOutputDetailShape(node_input, 0); + shape = AnfAlgo::GetOutputDetailShape(node_input, 0); } else { (void)new_cast_inputs.emplace_back(node); - shape = common::AnfAlgo::GetOutputDetailShape(node, 0); + shape = AnfAlgo::GetOutputDetailShape(node, 0); } CNodePtr new_cast = NewCNode(new_cast_inputs, func_graph, {node}); new_cast->set_scope(node->scope()); diff --git a/mindspore/ccsrc/backend/common/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/backend/common/session/anf_runtime_algorithm.cc index ac56c742c82..00a9cef1a2f 100644 --- a/mindspore/ccsrc/backend/common/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/backend/common/session/anf_runtime_algorithm.cc @@ -549,7 +549,7 @@ bool AnfRuntimeAlgorithm::IsRealSquenceOutput(const AnfNodePtr &node) { std::vector AnfRuntimeAlgorithm::GetOutputDeviceShapeForTbeBuild(const AnfNodePtr &node, size_t output_idx, const std::string &format) { - auto output_shape = common::AnfAlgo::GetOutputDetailShape(node, output_idx); + auto output_shape = AnfAlgo::GetOutputDetailShape(node, output_idx); std::vector infer_shape; if (output_shape->isa()) { auto shape_ptr = output_shape->cast(); @@ -589,7 +589,7 @@ ShapeVector AnfRuntimeAlgorithm::GetOutputDeviceShape(const AnfNodePtr &node, si std::vector AnfRuntimeAlgorithm::GetInputDeviceShapeForTbeBuild(const AnfNodePtr &node, size_t input_idx, const std::string &format) { - auto output_shape = common::AnfAlgo::GetPrevNodeOutputDetailShape(node, input_idx); + auto output_shape = AnfAlgo::GetPrevNodeOutputDetailShape(node, input_idx); std::vector infer_shape; if (output_shape->isa()) { auto shape_ptr = output_shape->cast(); @@ -1733,7 +1733,49 @@ std::vector AnfRuntimeAlgorithm::GetAllOutputObjectType(const AnfNodePtr return {AnfAlgo::GetAbstractObjectType(node->abstract())}; } -std::vector AnfAlgo::GetAllOutputInferDataTypes(const AnfNodePtr &node) { +abstract::BaseShapePtr AnfRuntimeAlgorithm::GetOutputDetailShape(const AnfNodePtr &node, size_t output_idx) { + MS_EXCEPTION_IF_NULL(node); + auto base_shape = node->Shape(); + MS_EXCEPTION_IF_NULL(base_shape); + if (base_shape->isa()) { + if (output_idx == 0) { + return base_shape; + } + MS_LOG(EXCEPTION) << "The node " << node->DebugString() << "is a single output node but got index [" << output_idx + << "]." << trace::DumpSourceLines(node); + } else if (base_shape->isa()) { + auto tuple_shape = base_shape->cast(); + MS_EXCEPTION_IF_NULL(tuple_shape); + if (IsRealSquenceOutput(node)) { + return tuple_shape; + } + if (output_idx >= tuple_shape->size()) { + MS_LOG(EXCEPTION) << "Output index " << output_idx << "is larger than output number " << tuple_shape->size() + << " node:" << node->DebugString() << "." << trace::DumpSourceLines(node); + } + auto b_shp = (*tuple_shape)[output_idx]; + if (b_shp->isa() || b_shp->isa()) { + return b_shp; + } else { + MS_LOG(EXCEPTION) << "The output type of ApplyKernel index:" << output_idx + << " should be a NoShape , ArrayShape or a TupleShape, but it is " << base_shape->ToString() + << "node :" << node->DebugString() << "." << trace::DumpSourceLines(node); + } + } else if (base_shape->isa()) { + return base_shape; + } else if (base_shape->isa()) { + return common::AnfAlgo::GetDynamicSequenceShape(node, output_idx); + } + MS_LOG(EXCEPTION) << "The output type of ApplyKernel should be a NoShape , ArrayShape or a TupleShape, but it is " + << base_shape->ToString() << " node : " << node->DebugString() << trace::DumpSourceLines(node); +} + +abstract::BaseShapePtr AnfRuntimeAlgorithm::GetPrevNodeOutputDetailShape(const AnfNodePtr &node, size_t input_idx) { + KernelWithIndex kernel_with_index = common::AnfAlgo::GetPrevNodeOutput(node, input_idx); + return AnfAlgo::GetOutputDetailShape(kernel_with_index.first, kernel_with_index.second); +} + +std::vector AnfRuntimeAlgorithm::GetAllOutputInferDataTypes(const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(node); std::vector outputs; auto out_nums = AnfAlgo::GetOutputElementNum(node); @@ -1746,7 +1788,7 @@ std::vector AnfAlgo::GetAllOutputInferDataTypes(const AnfNodePtr &node) // if input node is MakeTuple, find the PrevNodeNum recursively; // The monad node in the end is not included in the num; -size_t AnfAlgo::GetInputElementNum(const AnfNodePtr &node) { +size_t AnfRuntimeAlgorithm::GetInputElementNum(const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(node); auto cnode = node->cast(); MS_EXCEPTION_IF_NULL(cnode); @@ -1772,7 +1814,7 @@ size_t AnfAlgo::GetInputElementNum(const AnfNodePtr &node) { return element_num; } -void AnfAlgo::SetDynamicAttrToPrim(const PrimitivePtr &prim) { +void AnfRuntimeAlgorithm::SetDynamicAttrToPrim(const PrimitivePtr &prim) { prim->AddAttr(kAttrMutableKernel, MakeValue(true)); prim->AddAttr(kAttrInputIsDynamicShape, MakeValue(true)); prim->AddAttr(kAttrOutputIsDynamicShape, MakeValue(true)); diff --git a/mindspore/ccsrc/backend/common/session/anf_runtime_algorithm.h b/mindspore/ccsrc/backend/common/session/anf_runtime_algorithm.h index 76f13a5766f..dabbfb7255b 100644 --- a/mindspore/ccsrc/backend/common/session/anf_runtime_algorithm.h +++ b/mindspore/ccsrc/backend/common/session/anf_runtime_algorithm.h @@ -225,6 +225,10 @@ class BACKEND_EXPORT AnfRuntimeAlgorithm { static size_t GetInputElementNum(const AnfNodePtr &node); static bool IsRealSquenceOutput(const AnfNodePtr &node); static void SetDynamicAttrToPrim(const PrimitivePtr &prim); + + // Get output detail shape. These interfaces should take TUPLE output into consideration. + static abstract::BaseShapePtr GetOutputDetailShape(const AnfNodePtr &node, size_t output_idx); + static abstract::BaseShapePtr GetPrevNodeOutputDetailShape(const AnfNodePtr &node, size_t input_idx); }; } // namespace session using AnfAlgo = session::AnfRuntimeAlgorithm; diff --git a/mindspore/ccsrc/include/common/utils/anfalgo.h b/mindspore/ccsrc/include/common/utils/anfalgo.h index 0c4fdb9c740..5ff27033ef4 100644 --- a/mindspore/ccsrc/include/common/utils/anfalgo.h +++ b/mindspore/ccsrc/include/common/utils/anfalgo.h @@ -139,9 +139,7 @@ class COMMON_EXPORT AnfAlgo { static void SetOutputInferTypeAndShape(const std::vector &types, const std::vector &shapes, AnfNode *node, bool disable_dynamic_len = false); static void SetScalarTupleOutputInferType(const std::vector &types, const AnfNodePtr &node); - // get and set output shape ptr - static abstract::BaseShapePtr GetOutputDetailShape(const AnfNodePtr &node, size_t output_idx); - static abstract::BaseShapePtr GetPrevNodeOutputDetailShape(const AnfNodePtr &node, size_t input_idx); + // set output shape ptr static void SetOutputTypeAndDetailShape(const std::vector &types, const std::vector &shapes, AnfNode *node); static void CopyAbstract(const AnfNodePtr &from_node, AnfNode *to_node); @@ -296,6 +294,9 @@ class COMMON_EXPORT AnfAlgo { static bool HasTupleInput(const CNodePtr &node); static bool HasDynamicTupleInput(const CNodePtr &node); static bool IsReduceOp(const std::string &op_name); + + // Get the element shape of dynamic sequence shape. + static abstract::BaseShapePtr GetDynamicSequenceShape(const AnfNodePtr &node, size_t output_idx); }; } // namespace common } // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/hardware/ascend_auto_monad.cc b/mindspore/ccsrc/plugin/device/ascend/hal/hardware/ascend_auto_monad.cc index 67b2de750e6..adfa4950835 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/hardware/ascend_auto_monad.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/hardware/ascend_auto_monad.cc @@ -1416,8 +1416,8 @@ class AscendAutoMonadConverter { std::vector cast_inputs = {NewValueNode(std::make_shared(prim::kPrimCast->name())), source}; auto cast_node = kernel_graph_->NewCNode(cast_inputs); - auto origin_shape = common::AnfAlgo::GetOutputDetailShape(source, kFirstOutput); - auto shape = common::AnfAlgo::GetOutputDetailShape(target, kFirstOutput); + auto origin_shape = AnfAlgo::GetOutputDetailShape(source, kFirstOutput); + auto shape = AnfAlgo::GetOutputDetailShape(target, kFirstOutput); if (!common::IsEqual(origin_shape, shape)) { MS_LOG(EXCEPTION) << "Assign: " << target->DebugString() << " and " << source->DebugString() << " has different shape, source shape: " << origin_shape->ToString() diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_json/tbe_json_utils.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_json/tbe_json_utils.cc index dd05752c2ea..a6ece455582 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_json/tbe_json_utils.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_json/tbe_json_utils.cc @@ -97,7 +97,7 @@ std::vector TbeJsonUtils::GetInputDeviceShapeForTbeBuild(const AnfNodeP std::vector TbeJsonUtils::GetOutputOriShapeForTbeBuild(const AnfNodePtr &anf_node, size_t real_idx) { MS_EXCEPTION_IF_NULL(anf_node); std::vector shape; - auto out_shape = common::AnfAlgo::GetOutputDetailShape(anf_node, real_idx); + auto out_shape = AnfAlgo::GetOutputDetailShape(anf_node, real_idx); MS_EXCEPTION_IF_NULL(out_shape); if (out_shape->isa()) { auto shape_ptr = out_shape->cast(); diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_kernel_select/tbe_select_utils.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_kernel_select/tbe_select_utils.cc index bef25ef3087..194ea053715 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_kernel_select/tbe_select_utils.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_kernel_select/tbe_select_utils.cc @@ -71,8 +71,8 @@ bool HostCheck::CheckValidDeviceShape(const AnfNodePtr &node) { std::vector HostCheck::GetFinalInferShape(const AnfNodePtr &node, size_t index, bool is_output, const std::string &format) { - auto shape = is_output ? common::AnfAlgo::GetOutputDetailShape(node, index) - : common::AnfAlgo::GetPrevNodeOutputDetailShape(node, index); + auto shape = + is_output ? AnfAlgo::GetOutputDetailShape(node, index) : AnfAlgo::GetPrevNodeOutputDetailShape(node, index); std::vector infer_shape; if (shape->isa()) { auto shape_ptr = shape->cast(); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ascend_helper.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ascend_helper.cc index 7b9dfbe5f40..0ad6cfee0e6 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ascend_helper.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ascend_helper.cc @@ -238,7 +238,7 @@ AnfNodePtr AddTransOpNodeToGraphWithFormat(const FuncGraphPtr &func_graph, const << input_format << " and dst format " << dst_format; } std::string spec_format = input_format == kOpFormat_DEFAULT ? dst_format : input_format; - auto input_node_out_shape = common::AnfAlgo::GetOutputDetailShape(input_node, 0); + auto input_node_out_shape = AnfAlgo::GetOutputDetailShape(input_node, 0); MS_EXCEPTION_IF_NULL(input_node_out_shape); auto out_shape_ptr = input_node_out_shape->cast(); MS_EXCEPTION_IF_NULL(out_shape_ptr); @@ -365,7 +365,7 @@ CNodePtr NewTransOpNode(const FuncGraphPtr &func_graph, const AnfNodePtr &input, MS_EXCEPTION_IF_NULL(trans_node); auto infer_type = common::AnfAlgo::GetOutputInferDataType(input, 0); - auto out_shape_base = common::AnfAlgo::GetOutputDetailShape(input, 0); + auto out_shape_base = AnfAlgo::GetOutputDetailShape(input, 0); MS_EXCEPTION_IF_NULL(out_shape_base); ShapeVector out_shape; bool is_dynamic_shape = false; @@ -555,8 +555,7 @@ CNodePtr InsertCastForInput(const FuncGraphPtr &func_graph, const CNodePtr &cnod origin_type = common::AnfAlgo::GetOutputInferDataType(prev_node.first, prev_node.second); } const std::string dev_fmt = AnfAlgo::GetInputFormat(cnode, input_index); - const abstract::BaseShapePtr origin_shape = - common::AnfAlgo::GetOutputDetailShape(prev_node.first, prev_node.second); + const abstract::BaseShapePtr origin_shape = AnfAlgo::GetOutputDetailShape(prev_node.first, prev_node.second); // In graph kernel, we check parameter, // the eliminate pass will not eliminate this case, so we just do not insert the no used cast. if (TypeId device_type = AnfAlgo::GetInputDeviceDataType(cnode, input_index); origin_type != device_type) { diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/buffer_fusion/ub_pattern_fusion.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/buffer_fusion/ub_pattern_fusion.cc index 3cb27e7f99a..7fa3ab44306 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/buffer_fusion/ub_pattern_fusion.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/buffer_fusion/ub_pattern_fusion.cc @@ -196,7 +196,7 @@ AnfNodePtr CreateTupleGetItem(const AnfNodePtr &buffer_fusion_kernel, session::K MS_EXCEPTION_IF_NULL(tuple_item); common::AnfAlgo::SetOutputTypeAndDetailShape( {common::AnfAlgo::GetOutputInferDataType(buffer_fusion_kernel, output_index)}, - {common::AnfAlgo::GetOutputDetailShape(buffer_fusion_kernel, output_index)}, tuple_item.get()); + {AnfAlgo::GetOutputDetailShape(buffer_fusion_kernel, output_index)}, tuple_item.get()); return tuple_item; } @@ -582,7 +582,7 @@ bool UbPatternFusion::ReplaceFusionOp(mindspore::HashMap(output_info)[i]}, - {common::AnfAlgo::GetOutputDetailShape(node, i)}, tuple_getitem.get()); + {AnfAlgo::GetOutputDetailShape(node, i)}, tuple_getitem.get()); (void)new_outputs.emplace_back(std::move(tuple_getitem)); } return InsertConcatForOutput(func_graph, node, output_info, new_outputs, rank_size); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/enhancer/insert_pad_for_nms_with_mask.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/enhancer/insert_pad_for_nms_with_mask.cc index 9e3cf627937..fa7b0f67d88 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/enhancer/insert_pad_for_nms_with_mask.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/enhancer/insert_pad_for_nms_with_mask.cc @@ -64,7 +64,7 @@ const AnfNodePtr InsertPadForNMSWithMask::Process(const FuncGraphPtr &func_graph for (size_t input_idx = 0; input_idx < input_num; input_idx++) { auto cur_input = common::AnfAlgo::GetInputNode(cnode, input_idx); auto origin_type = common::AnfAlgo::GetPrevNodeOutputInferDataType(cnode, input_idx); - auto origin_shape_base_ptr = common::AnfAlgo::GetPrevNodeOutputDetailShape(cnode, input_idx); + auto origin_shape_base_ptr = AnfAlgo::GetPrevNodeOutputDetailShape(cnode, input_idx); auto origin_shape_ptr = origin_shape_base_ptr->cast(); MS_EXCEPTION_IF_NULL(origin_shape_ptr); auto origin_shape = origin_shape_ptr->shape(); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/enhancer/insert_transpose_for_sort.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/enhancer/insert_transpose_for_sort.cc index c0825655dfe..c52db518e4d 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/enhancer/insert_transpose_for_sort.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/enhancer/insert_transpose_for_sort.cc @@ -54,7 +54,7 @@ CNodePtr InsertForInput(const FuncGraphPtr &func_graph, const CNodePtr &node, co auto in_node = common::AnfAlgo::GetInputNode(node, 0); auto type = common::AnfAlgo::GetPrevNodeOutputInferDataType(node, 0); - auto in_shape = common::AnfAlgo::GetPrevNodeOutputDetailShape(node, 0); + auto in_shape = AnfAlgo::GetPrevNodeOutputDetailShape(node, 0); auto transpose_out_shape = InferTransposeOutputShape(in_shape, perm); auto ori_out_types = AnfAlgo::GetAllOutputInferDataTypes(node); @@ -112,7 +112,7 @@ AnfNodePtr InsertForOutput(const FuncGraphPtr &func_graph, const CNodePtr &orig_ (void)transpose_inputs.push_back(tuple_getitem); (void)transpose_inputs.push_back(perm_value_input); - auto shape = common::AnfAlgo::GetOutputDetailShape(node, output_idx); + auto shape = AnfAlgo::GetOutputDetailShape(node, output_idx); auto type = common::AnfAlgo::GetOutputInferDataType(node, output_idx); auto transpose_out_shape = InferTransposeOutputShape(shape, perm); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/deal_ref_output.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/deal_ref_output.cc index 942ed33ae10..c1e7de3c828 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/deal_ref_output.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/deal_ref_output.cc @@ -113,7 +113,7 @@ AnfNodePtr DealRefOutput::AddAdditionalToRefOutput(const FuncGraphPtr &func_grap auto cur_format = AnfAlgo::GetOutputFormat(cnode, output_index); auto cur_type = AnfAlgo::GetOutputDeviceDataType(cnode, output_index); auto cur_shape = common::AnfAlgo::GetOutputInferShape(cnode, output_index); - auto detail_shape = common::AnfAlgo::GetOutputDetailShape(cnode, output_index); + auto detail_shape = AnfAlgo::GetOutputDetailShape(cnode, output_index); // insert trans if (origin_format != cur_format && cur_shape.size() > 1) { auto kernel_select = std::make_shared(); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/eliminate_graph_output_transdata.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/eliminate_graph_output_transdata.cc index 1e7ea3ae437..b4e0e4a898a 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/eliminate_graph_output_transdata.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/eliminate_graph_output_transdata.cc @@ -35,7 +35,7 @@ bool IsDepthwiseCase(const AnfNodePtr &node, size_t index, const std::string &fo if (format != kOpFormat_FRAC_Z) { return false; } - abstract::BaseShapePtr base_shape = common::AnfAlgo::GetOutputDetailShape(node, index); + abstract::BaseShapePtr base_shape = AnfAlgo::GetOutputDetailShape(node, index); MS_EXCEPTION_IF_NULL(base_shape); if (base_shape->isa()) { auto shape_ptr = base_shape->cast(); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/insert_cast.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/insert_cast.cc index 25df70acf5a..2bc5916d656 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/insert_cast.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/insert_cast.cc @@ -51,7 +51,7 @@ AnfNodePtr InsertCastForMultipleOutput(const FuncGraphPtr &func_graph, const CNo size_t out_num = AnfAlgo::GetOutputTensorNum(cnode); for (size_t output_idx = 0; output_idx < out_num; ++output_idx) { AnfNodePtr replace_node = nullptr; - const auto origin_shape = common::AnfAlgo::GetOutputDetailShape(cnode, output_idx); + const auto origin_shape = AnfAlgo::GetOutputDetailShape(cnode, output_idx); const auto origin_type = common::AnfAlgo::GetOutputInferDataType(cnode, output_idx); auto idx = NewValueNode(SizeToLong(output_idx)); MS_EXCEPTION_IF_NULL(idx); @@ -105,7 +105,7 @@ AnfNodePtr InsertCastForOutput(const FuncGraphPtr &func_graph, const CNodePtr &o // Single output, output is not TUPLE if (!cnode->Type()->isa()) { const std::string dev_fmt = AnfAlgo::GetOutputFormat(cnode, 0); - const abstract::BaseShapePtr origin_shape = common::AnfAlgo::GetOutputDetailShape(cnode, 0); + const abstract::BaseShapePtr origin_shape = AnfAlgo::GetOutputDetailShape(cnode, 0); const TypeId origin_type = common::AnfAlgo::GetOutputInferDataType(cnode, 0); const TypeId device_type = AnfAlgo::GetOutputDeviceDataType(cnode, 0); AnfNodePtr replace_node = cnode; diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/insert_transdata_for_runop.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/insert_transdata_for_runop.cc index 0bea32c4213..7c754603be8 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/insert_transdata_for_runop.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/insert_transdata_for_runop.cc @@ -31,8 +31,8 @@ bool IsDepthwiseCase(const CNodePtr &node, size_t index, const std::string &form if (format != kOpFormat_FRAC_Z) { return false; } - abstract::BaseShapePtr base_shape = is_tuple ? common::AnfAlgo::GetPrevNodeOutputDetailShape(node, index) - : common::AnfAlgo::GetOutputDetailShape(node, index); + abstract::BaseShapePtr base_shape = + is_tuple ? AnfAlgo::GetPrevNodeOutputDetailShape(node, index) : AnfAlgo::GetOutputDetailShape(node, index); MS_EXCEPTION_IF_NULL(base_shape); if (base_shape->isa()) { auto shape_ptr = base_shape->cast(); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/merge_cast_to_op.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/merge_cast_to_op.cc index c2f5ff8014e..0a6e7e3705a 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/merge_cast_to_op.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/format_type/merge_cast_to_op.cc @@ -125,7 +125,7 @@ void ChangeNodeInferInfo(const CNodePtr &cnode, const CNodePtr &cast, const size MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(cast); auto cast_dtype = common::AnfAlgo::GetOutputInferDataType(cast, 0); - auto cast_shape = common::AnfAlgo::GetOutputDetailShape(cast, 0); + auto cast_shape = AnfAlgo::GetOutputDetailShape(cast, 0); std::vector shapes; std::vector types; size_t output_num = AnfAlgo::GetOutputTensorNum(cnode); @@ -135,7 +135,7 @@ void ChangeNodeInferInfo(const CNodePtr &cnode, const CNodePtr &cast, const size (void)types.emplace_back(cast_dtype); continue; } - (void)shapes.emplace_back(common::AnfAlgo::GetOutputDetailShape(cnode, index)); + (void)shapes.emplace_back(AnfAlgo::GetOutputDetailShape(cnode, index)); (void)types.emplace_back(common::AnfAlgo::GetOutputInferDataType(cnode, index)); } common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, cnode.get()); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ge/lamb_fission.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ge/lamb_fission.cc index 5d8129684a3..bc425008f68 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ge/lamb_fission.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ge/lamb_fission.cc @@ -20,6 +20,7 @@ #include "include/common/utils/anfalgo.h" #include "mindspore/core/ops/core_ops.h" #include "backend/common/optimizer/optimizer.h" +#include "backend/common/session/anf_runtime_algorithm.h" namespace mindspore { namespace opt { @@ -47,8 +48,7 @@ AnfNodePtr CreateCastNode(const FuncGraphPtr &graph, const AnfNodePtr &input, co if (common::AnfAlgo::GetOutputInferDataType(input, 0) != dst_type) { AnfNodePtr cast = graph->NewCNode({NewValueNode(std::make_shared(kCastOpName)), input}); MS_EXCEPTION_IF_NULL(cast); - common::AnfAlgo::SetOutputTypeAndDetailShape({dst_type}, {common::AnfAlgo::GetOutputDetailShape(input, 0)}, - cast.get()); + common::AnfAlgo::SetOutputTypeAndDetailShape({dst_type}, {AnfAlgo::GetOutputDetailShape(input, 0)}, cast.get()); common::AnfAlgo::SetNodeAttr(kAttrDstType, TypeIdToType(dst_type), cast); cast->set_scope(input->scope()); return cast; diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ge/sparse_softmax_cross_entropy_with_logits_split.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ge/sparse_softmax_cross_entropy_with_logits_split.cc index bee14b758f0..d8fb3ad65d6 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ge/sparse_softmax_cross_entropy_with_logits_split.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ge/sparse_softmax_cross_entropy_with_logits_split.cc @@ -246,7 +246,7 @@ CNodePtr CreateTile(const FuncGraphPtr &graph, const CNodePtr &sparse_softmax_no tile_node->set_scope(mul_node->scope()); common::AnfAlgo::SetOutputTypeAndDetailShape({common::AnfAlgo::GetPrevNodeOutputInferDataType(mul_node, 1)}, - {common::AnfAlgo::GetPrevNodeOutputDetailShape(sparse_softmax_node, 1)}, + {AnfAlgo::GetPrevNodeOutputDetailShape(sparse_softmax_node, 1)}, tile_node.get()); // Feature map set std::vector feature_map_input_indexs; diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/batch_norm_grad_split.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/batch_norm_grad_split.cc index e2474228797..3c876613de3 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/batch_norm_grad_split.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/batch_norm_grad_split.cc @@ -44,8 +44,7 @@ void BatchNormGradSplit::CreateOutputsOfUpdateGrad(const FuncGraphPtr &graph, co auto types = {common::AnfAlgo::GetOutputInferDataType(bn_grad_node, 1), common::AnfAlgo::GetOutputInferDataType(bn_grad_node, 2)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(bn_grad_node, 1), - common::AnfAlgo::GetOutputDetailShape(bn_grad_node, 2)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(bn_grad_node, 1), AnfAlgo::GetOutputDetailShape(bn_grad_node, 2)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, bn_update_grad.get()); common::AnfAlgo::CopyNodeAttr(kAttrEpsilon, bn_grad_node, bn_update_grad); @@ -79,7 +78,7 @@ void BatchNormGradSplit::CreateOutputsOfReduceGrad(const FuncGraphPtr &graph, co bn_reduce_grad->set_scope(bn_grad_node->scope()); auto types = {common::AnfAlgo::GetOutputInferDataType(bn_grad_node, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(bn_grad_node, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(bn_grad_node, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, bn_reduce_grad.get()); common::AnfAlgo::CopyNodeAttr(kAttrEpsilon, bn_grad_node, bn_reduce_grad); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/bce_with_logits_loss_fission.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/bce_with_logits_loss_fission.cc index 7b7b961b047..72fe54e5158 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/bce_with_logits_loss_fission.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/bce_with_logits_loss_fission.cc @@ -41,7 +41,7 @@ AnfNodePtr BCEWithLogitsLossFission::AddReduceNode(const FuncGraphPtr &func_grap MS_EXCEPTION_IF_NULL(new_cnode); auto predict_input = cnode->inputs()[kIndex1]; auto new_node_dtype = {common::AnfAlgo::GetOutputInferDataType(predict_input, 0)}; - auto new_node_shape = {common::AnfAlgo::GetOutputDetailShape(predict_input, 0)}; + auto new_node_shape = {AnfAlgo::GetOutputDetailShape(predict_input, 0)}; // The kAttrReduction is necessary for InferShape of BCEWithLogitsLoss op common::AnfAlgo::SetNodeAttr(kAttrReduction, MakeValue("none"), new_cnode); common::AnfAlgo::SetOutputTypeAndDetailShape(new_node_dtype, new_node_shape, new_cnode.get()); @@ -60,7 +60,7 @@ AnfNodePtr BCEWithLogitsLossFission::AddReduceNode(const FuncGraphPtr &func_grap } auto reduce_node = NewCNode(reduce_inputs, func_graph); MS_EXCEPTION_IF_NULL(reduce_node); - auto shape = {common::AnfAlgo::GetOutputDetailShape(node, 0)}; + auto shape = {AnfAlgo::GetOutputDetailShape(node, 0)}; auto type = common::AnfAlgo::GetOutputInferDataType(node, 0); if (type == kNumberTypeFloat16) { common::AnfAlgo::SetOutputTypeAndDetailShape({kNumberTypeFloat32}, shape, reduce_node.get()); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/bn_grad_split.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/bn_grad_split.cc index a64e8fcf5c5..72f24ac79d9 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/bn_grad_split.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/bn_grad_split.cc @@ -45,8 +45,7 @@ void BnGradSplit::CreateOutputsOfUpdateGrad(const FuncGraphPtr &graph, const CNo auto types = {common::AnfAlgo::GetOutputInferDataType(bn_grad_node, 1), common::AnfAlgo::GetOutputInferDataType(bn_grad_node, 2)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(bn_grad_node, 1), - common::AnfAlgo::GetOutputDetailShape(bn_grad_node, 2)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(bn_grad_node, 1), AnfAlgo::GetOutputDetailShape(bn_grad_node, 2)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, bn_update_grad.get()); common::AnfAlgo::CopyNodeAttr(kAttrEpsilon, bn_grad_node, bn_update_grad); if (common::AnfAlgo::HasNodeAttr(kAttrFormat, bn_grad_node)) { @@ -86,7 +85,7 @@ void BnGradSplit::CreateOutputsOfReduceGrad(const FuncGraphPtr &graph, const CNo bn_reduce_grad->set_scope(bn_grad_node->scope()); auto types = {common::AnfAlgo::GetOutputInferDataType(bn_grad_node, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(bn_grad_node, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(bn_grad_node, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, bn_reduce_grad.get()); common::AnfAlgo::CopyNodeAttr(kAttrEpsilon, bn_grad_node, bn_reduce_grad); if (common::AnfAlgo::HasNodeAttr(kAttrFormat, bn_grad_node)) { diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/bn_split.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/bn_split.cc index f9ebb24a1a7..c079b797b07 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/bn_split.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/bn_split.cc @@ -56,8 +56,7 @@ bool BnSplit::CreateOutputsOfBNTrainingReduce(const FuncGraphPtr &graph, const C bn_training_reduce->set_kernel_info(kernel_info); auto types = {common::AnfAlgo::GetOutputInferDataType(bn_cnode, 1), common::AnfAlgo::GetOutputInferDataType(bn_cnode, 1)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(bn_cnode, 1), - common::AnfAlgo::GetOutputDetailShape(bn_cnode, 1)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(bn_cnode, 1), AnfAlgo::GetOutputDetailShape(bn_cnode, 1)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, bn_training_reduce.get()); bn_training_reduce->set_scope(bn_cnode->scope()); if (is_dynamic) { @@ -205,8 +204,7 @@ AnfNodePtr InsertCast(const FuncGraphPtr &graph, const AnfNodePtr &input, const MS_EXCEPTION_IF_NULL(input); if (common::AnfAlgo::GetOutputInferDataType(input, 0) != dst_type) { AnfNodePtr cast = graph->NewCNode({NewValueNode(std::make_shared(kCastOpName)), input}); - common::AnfAlgo::SetOutputTypeAndDetailShape({dst_type}, {common::AnfAlgo::GetOutputDetailShape(input, 0)}, - cast.get()); + common::AnfAlgo::SetOutputTypeAndDetailShape({dst_type}, {AnfAlgo::GetOutputDetailShape(input, 0)}, cast.get()); common::AnfAlgo::SetNodeAttr(kIsBackendCast, MakeValue(true), cast); cast->set_scope(input->scope()); return cast; diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/concat_fission.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/concat_fission.cc index e93eb15c703..4c68ca225fb 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/concat_fission.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/concat_fission.cc @@ -50,7 +50,7 @@ AnfNodePtr ConcatFission::CreateNewConcat(const FuncGraphPtr &func_graph, const if (axis_from_attr < 0) { axis_from_attr += SizeToLong(input_shape.size()); } - auto output_shape_ptr = common::AnfAlgo::GetOutputDetailShape(origin_concat_cnode, 0); + auto output_shape_ptr = AnfAlgo::GetOutputDetailShape(origin_concat_cnode, 0); MS_EXCEPTION_IF_NULL(output_shape_ptr); auto output_shapeptr = output_shape_ptr->cast(); MS_EXCEPTION_IF_NULL(output_shapeptr); @@ -63,7 +63,7 @@ AnfNodePtr ConcatFission::CreateNewConcat(const FuncGraphPtr &func_graph, const auto axis = LongToSize(axis_from_attr); output_shape[axis] = 0; for (size_t i = begin_index; i < begin_index + offset; ++i) { - auto last_input_shape_ptr = common::AnfAlgo::GetPrevNodeOutputDetailShape(origin_concat_cnode, i - 1); + auto last_input_shape_ptr = AnfAlgo::GetPrevNodeOutputDetailShape(origin_concat_cnode, i - 1); MS_EXCEPTION_IF_NULL(last_input_shape_ptr); auto last_input_shapeptr = last_input_shape_ptr->cast(); MS_EXCEPTION_IF_NULL(last_input_shapeptr); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/lamb_fission.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/lamb_fission.cc index 14d8022a1ec..c591d7e11b5 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/lamb_fission.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/lamb_fission.cc @@ -20,6 +20,7 @@ #include "include/common/utils/anfalgo.h" #include "mindspore/core/ops/core_ops.h" #include "backend/common/optimizer/optimizer.h" +#include "backend/common/session/anf_runtime_algorithm.h" namespace mindspore { namespace opt { @@ -46,8 +47,7 @@ AnfNodePtr CreateCastNode(const FuncGraphPtr &graph, const AnfNodePtr &input, co MS_EXCEPTION_IF_NULL(input); if (common::AnfAlgo::GetOutputInferDataType(input, 0) != dst_type) { AnfNodePtr cast = graph->NewCNode({NewValueNode(std::make_shared(kCastOpName)), input}); - common::AnfAlgo::SetOutputTypeAndDetailShape({dst_type}, {common::AnfAlgo::GetOutputDetailShape(input, 0)}, - cast.get()); + common::AnfAlgo::SetOutputTypeAndDetailShape({dst_type}, {AnfAlgo::GetOutputDetailShape(input, 0)}, cast.get()); common::AnfAlgo::SetNodeAttr(kAttrDstType, MakeValue(static_cast(dst_type)), cast); cast->set_scope(input->scope()); return cast; diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/layer_norm_grad_split.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/layer_norm_grad_split.cc index a27fb8f3942..726dbbbd759 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/layer_norm_grad_split.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/layer_norm_grad_split.cc @@ -48,8 +48,8 @@ void LayerNormGradSplit::CreateOutputsOfLayerNormXBackpropV2(const FuncGraphPtr MS_EXCEPTION_IF_NULL(layer_norm_x_backprop); layer_norm_x_backprop->set_scope(layer_norm_grad->scope()); auto types = {common::AnfAlgo::GetOutputInferDataType(layer_norm_grad, 0), kNumberTypeFloat32}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(layer_norm_grad, 0), - common::AnfAlgo::GetPrevNodeOutputDetailShape(layer_norm_grad, 1)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(layer_norm_grad, 0), + AnfAlgo::GetPrevNodeOutputDetailShape(layer_norm_grad, 1)}; if (is_dynamic) { common::AnfAlgo::SetNodeAttr(kAttrInputIsDynamicShape, MakeValue(true), layer_norm_x_backprop); common::AnfAlgo::SetNodeAttr(kAttrOutputIsDynamicShape, MakeValue(true), layer_norm_x_backprop); @@ -78,8 +78,8 @@ void LayerNormGradSplit::CreateOutputsOfLayerNormBetaGammaBackpropV2( } auto types = {common::AnfAlgo::GetOutputInferDataType(layer_norm_grad, kLayerNormGradOutputGammaIndex), common::AnfAlgo::GetOutputInferDataType(layer_norm_grad, kLayerNormGradOutputBetaIndex)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(layer_norm_grad, kLayerNormGradOutputGammaIndex), - common::AnfAlgo::GetOutputDetailShape(layer_norm_grad, kLayerNormGradOutputBetaIndex)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(layer_norm_grad, kLayerNormGradOutputGammaIndex), + AnfAlgo::GetOutputDetailShape(layer_norm_grad, kLayerNormGradOutputBetaIndex)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, layer_norm_beta_gamma_backprop.get()); // get device shape of LayerNormGrad's 5th Input, and convert it to attr diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/pack_fission.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/pack_fission.cc index 4ef9a5efb8a..a68e911a33e 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/pack_fission.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/pack_fission.cc @@ -40,7 +40,7 @@ AnfNodePtr PackFission::CreateNewPack(const FuncGraphPtr &func_graph, const CNod std::vector dyn_input_sizes{SizeToLong(offset)}; common::AnfAlgo::SetNodeAttr(kAttrDynInputSizes, MakeValue(dyn_input_sizes), new_pack); // infer shape - auto output_shape_ptr = common::AnfAlgo::GetOutputDetailShape(origin_pack_cnode, 0); + auto output_shape_ptr = AnfAlgo::GetOutputDetailShape(origin_pack_cnode, 0); MS_EXCEPTION_IF_NULL(output_shape_ptr); auto output_shape = output_shape_ptr->cast(); MS_EXCEPTION_IF_NULL(output_shape); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/reduce_sum_fission.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/reduce_sum_fission.cc index 6ec6d09615c..32fafcdab16 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/reduce_sum_fission.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/reduce_sum_fission.cc @@ -54,7 +54,7 @@ const AnfNodePtr ReduceSumFission::Process(const FuncGraphPtr &graph, const AnfN auto cnode = node->cast(); auto prim = common::AnfAlgo::GetCNodePrimitive(cnode); auto keep_dims = common::AnfAlgo::GetNodeAttr(cnode, kAttrKeepDims); - auto out_shape = common::AnfAlgo::GetOutputDetailShape(cnode, 0); + auto out_shape = AnfAlgo::GetOutputDetailShape(cnode, 0); std::vector inp_axis; auto axis_value = prim->GetAttr(kAttrAxis); MS_EXCEPTION_IF_NULL(axis_value); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/transdata_split.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/transdata_split.cc index 15618a333a1..1ebbac540ef 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/transdata_split.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/transdata_split.cc @@ -32,9 +32,9 @@ bool IsDepthwiseCase(const CNodePtr &node, const std::string &input_format, cons MS_EXCEPTION_IF_NULL(node); abstract::BaseShapePtr base_shape; if (input_format == kOpFormat_FRAC_Z && output_format == kOpFormat_DEFAULT) { - base_shape = common::AnfAlgo::GetPrevNodeOutputDetailShape(node, 0); + base_shape = AnfAlgo::GetPrevNodeOutputDetailShape(node, 0); } else if (input_format == kOpFormat_DEFAULT && output_format == kOpFormat_FRAC_Z) { - base_shape = common::AnfAlgo::GetOutputDetailShape(node, 0); + base_shape = AnfAlgo::GetOutputDetailShape(node, 0); } else { return false; } diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adam_apply_one_with_decay_rule.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adam_apply_one_with_decay_rule.cc index efd74cb8c1d..e39220cd053 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adam_apply_one_with_decay_rule.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adam_apply_one_with_decay_rule.cc @@ -330,8 +330,8 @@ const AnfNodePtr AdamApplyOneWithDecayRule::Process(const FuncGraphPtr &graph, c MS_EXCEPTION_IF_NULL(add1); auto types = {common::AnfAlgo::GetOutputInferDataType(add1, 0), common::AnfAlgo::GetOutputInferDataType(add0, 0), common::AnfAlgo::GetOutputInferDataType(sub0, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(add1, 0), common::AnfAlgo::GetOutputDetailShape(add0, 0), - common::AnfAlgo::GetOutputDetailShape(sub0, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(add1, 0), AnfAlgo::GetOutputDetailShape(add0, 0), + AnfAlgo::GetOutputDetailShape(sub0, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, fusion_node.get()); std::vector fusion_node_outputs; diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adaptive_max_pool2d_fusion.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adaptive_max_pool2d_fusion.cc index 8bd431f3ae2..2a25be4b076 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adaptive_max_pool2d_fusion.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adaptive_max_pool2d_fusion.cc @@ -141,8 +141,8 @@ const AnfNodePtr AdaptiveMaxPool2DFusion::Process(const FuncGraphPtr &func_graph if (height % output_h != 0 || width % output_w != 0) { auto types = {common::AnfAlgo::GetOutputInferDataType(adaptive_max_pool2d, 0), kNumberTypeInt64}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(adaptive_max_pool2d, 0), - common::AnfAlgo::GetOutputDetailShape(adaptive_max_pool2d, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(adaptive_max_pool2d, 0), + AnfAlgo::GetOutputDetailShape(adaptive_max_pool2d, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, adaptive_max_pool2d.get()); std::vector multi_outputs; CreateMultipleOutputsOfAnfNode(func_graph, adaptive_max_pool2d, kAdaptiveMaxpool2DOutputNumber, &multi_outputs); @@ -159,7 +159,7 @@ const AnfNodePtr AdaptiveMaxPool2DFusion::Process(const FuncGraphPtr &func_graph adaptive_max_pool2d->inputs().end()); auto pooling = NewCNode(pooling_inputs, kernel_graph); auto types = {common::AnfAlgo::GetOutputInferDataType(adaptive_max_pool2d, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(adaptive_max_pool2d, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(adaptive_max_pool2d, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, pooling.get()); pooling->set_scope(adaptive_max_pool2d->scope()); SetNodeAttr(pooling, height_attr, width_attr); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/bn_reduce_grad_conv2d_backprop_filter_fusion.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/bn_reduce_grad_conv2d_backprop_filter_fusion.cc index 87eb2d6c01f..76f88e370c7 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/bn_reduce_grad_conv2d_backprop_filter_fusion.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/bn_reduce_grad_conv2d_backprop_filter_fusion.cc @@ -95,8 +95,7 @@ const AnfNodePtr BNReduceGradConv2dBackpropFilterFusion::Process(const FuncGraph MS_EXCEPTION_IF_NULL(fused_dbn_dw); auto types = {common::AnfAlgo::GetOutputInferDataType(bnreduce_grad, 0), common::AnfAlgo::GetOutputInferDataType(conv_back_filter, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(bnreduce_grad, 0), - common::AnfAlgo::GetOutputDetailShape(conv_back_filter, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(bnreduce_grad, 0), AnfAlgo::GetOutputDetailShape(conv_back_filter, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, fused_dbn_dw.get()); fused_dbn_dw->set_scope(bnreduce_grad->scope()); common::AnfAlgo::CopyNodeAttr(kAttrFilterSizes, conv_back_filter, fused_dbn_dw); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/clip_by_norm_no_div_square_sum_fusion.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/clip_by_norm_no_div_square_sum_fusion.cc index d5dc8c766e3..9097ac28c79 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/clip_by_norm_no_div_square_sum_fusion.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/clip_by_norm_no_div_square_sum_fusion.cc @@ -65,7 +65,7 @@ const AnfNodePtr ClipByNormNoDivSquareSumFusion::Process(const FuncGraphPtr &gra auto fusion_node = NewCNode(inputs, graph); MS_EXCEPTION_IF_NULL(fusion_node); auto types = {common::AnfAlgo::GetOutputInferDataType(node, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(node, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(node, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, fusion_node.get()); fusion_node->set_scope(node->scope()); return fusion_node; diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/clip_by_value_fusion.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/clip_by_value_fusion.cc index 3f7e5d46de2..080522fbb86 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/clip_by_value_fusion.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/clip_by_value_fusion.cc @@ -91,7 +91,7 @@ const AnfNodePtr ClipByValueFusion::Process(const FuncGraphPtr &graph, const Anf auto clip_by_value = NewCNode(inputs, graph); MS_EXCEPTION_IF_NULL(clip_by_value); auto types = {common::AnfAlgo::GetOutputInferDataType(node, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(node, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(node, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, clip_by_value.get()); clip_by_value->set_scope(node->scope()); return clip_by_value; diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/confusion_mul_grad_fusion.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/confusion_mul_grad_fusion.cc index bce4d24f384..cfdf1174fb0 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/confusion_mul_grad_fusion.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/confusion_mul_grad_fusion.cc @@ -113,7 +113,7 @@ CNodePtr ConfusionMulGradFusion::CreateFusionNode(const FuncGraphPtr &graph, con common::AnfAlgo::CopyNodeAttr(kAttrKeepDims, reduce_sum, fusion_node); auto types = {common::AnfAlgo::GetOutputInferDataType(mul0, 0), common::AnfAlgo::GetOutputInferDataType(reduce_sum, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(mul0, 0), common::AnfAlgo::GetOutputDetailShape(reduce_sum, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(mul0, 0), AnfAlgo::GetOutputDetailShape(reduce_sum, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, fusion_node.get()); return fusion_node; } diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/lamb_next_mv_with_decay_v1_rule.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/lamb_next_mv_with_decay_v1_rule.cc index b3db5cd4b3f..bb8a34beac9 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/lamb_next_mv_with_decay_v1_rule.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/lamb_next_mv_with_decay_v1_rule.cc @@ -183,8 +183,8 @@ const AnfNodePtr LambNextMVWithDecayV1Rule::Process(const FuncGraphPtr &func_gra std::tie(add0, add1) = GetAdd0Add1Nodes(real_div0, real_div1); auto types = {common::AnfAlgo::GetOutputInferDataType(node, 0), common::AnfAlgo::GetOutputInferDataType(add0, 0), common::AnfAlgo::GetOutputInferDataType(add1, 0), common::AnfAlgo::GetOutputInferDataType(add5, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(node, 0), common::AnfAlgo::GetOutputDetailShape(add0, 0), - common::AnfAlgo::GetOutputDetailShape(add1, 0), common::AnfAlgo::GetOutputDetailShape(add5, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(node, 0), AnfAlgo::GetOutputDetailShape(add0, 0), + AnfAlgo::GetOutputDetailShape(add1, 0), AnfAlgo::GetOutputDetailShape(add5, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, fusion_node.get()); std::vector fusion_node_outputs; diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/lamb_update_with_lr_rule_fusion.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/lamb_update_with_lr_rule_fusion.cc index 1373171cadc..1f639fc31be 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/lamb_update_with_lr_rule_fusion.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/lamb_update_with_lr_rule_fusion.cc @@ -70,7 +70,7 @@ const AnfNodePtr LambUpdateWithLRRuleFusion::Process(const FuncGraphPtr &graph, MS_EXCEPTION_IF_NULL(lamb_update_with_lr); auto types = {common::AnfAlgo::GetOutputInferDataType(node, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(node, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(node, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, lamb_update_with_lr.get()); lamb_update_with_lr->set_scope(node->scope()); return lamb_update_with_lr; diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/softmax_dropout_do_mask_v3_fusion.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/softmax_dropout_do_mask_v3_fusion.cc index 7a5a309d9b9..539be1810ff 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/softmax_dropout_do_mask_v3_fusion.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/softmax_dropout_do_mask_v3_fusion.cc @@ -83,7 +83,7 @@ const AnfNodePtr SoftmaxDropoutDoMaskV3Fusion::Process(const FuncGraphPtr &graph MS_EXCEPTION_IF_NULL(softmax_dropout); auto types = {common::AnfAlgo::GetOutputInferDataType(softmax, 0), common::AnfAlgo::GetOutputInferDataType(dropout, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(softmax, 0), common::AnfAlgo::GetOutputDetailShape(dropout, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(softmax, 0), AnfAlgo::GetOutputDetailShape(dropout, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, softmax_dropout.get()); softmax_dropout->set_scope(softmax->scope()); common::AnfAlgo::CopyNodeAttr(kAttrAxis, softmax, softmax_dropout); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/square_sum_fusion.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/square_sum_fusion.cc index f5ddc2a6f21..8738e3de7ba 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/square_sum_fusion.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/square_sum_fusion.cc @@ -60,7 +60,7 @@ CNodePtr SquareSumFusion::GenerateSquareSumV1(const FuncGraphPtr &graph, const C MS_EXCEPTION_IF_NULL(kernel_info); square_sumv1->set_kernel_info(kernel_info); auto types = {common::AnfAlgo::GetOutputInferDataType(sum, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(sum, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(sum, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, square_sumv1.get()); square_sumv1->set_scope(sum->scope()); common::AnfAlgo::CopyNodeAttr(kAttrAxis, sum, square_sumv1); @@ -82,7 +82,7 @@ CNodePtr SquareSumFusion::GenerateSquareSumV2(const FuncGraphPtr &graph, const C auto square_sumv2 = NewCNode(square_sumv2_inputs, graph); MS_EXCEPTION_IF_NULL(square_sumv2); auto types = {common::AnfAlgo::GetOutputInferDataType(sum, 0), common::AnfAlgo::GetOutputInferDataType(square, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(sum, 0), common::AnfAlgo::GetOutputDetailShape(square, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(sum, 0), AnfAlgo::GetOutputDetailShape(square, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, square_sumv2.get()); square_sumv2->set_scope(sum->scope()); common::AnfAlgo::CopyNodeAttr(kAttrAxis, sum, square_sumv2); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/all_to_all_unify_mindir.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/all_to_all_unify_mindir.cc index 3d173f0673f..3b78be462cc 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/all_to_all_unify_mindir.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/all_to_all_unify_mindir.cc @@ -22,6 +22,7 @@ #include "include/common/utils/comm_manager.h" #include "backend/common/optimizer/helper.h" #include "frontend/parallel/ops_info/ops_utils.h" +#include "backend/common/session/anf_runtime_algorithm.h" namespace mindspore { namespace opt { @@ -117,7 +118,7 @@ CNodePtr AllToAllUnifyMindIR::CreateAllToAllvNode(const FuncGraphPtr &graph, con (void)all_to_all_v_input.insert(all_to_all_v_input.end(), split_outputs.begin(), split_outputs.end()); auto all_to_all_v = NewCNode(all_to_all_v_input, graph); MS_EXCEPTION_IF_NULL(all_to_all_v); - auto single_shape = common::AnfAlgo::GetOutputDetailShape(split_outputs[0], 0UL); + auto single_shape = AnfAlgo::GetOutputDetailShape(split_outputs[0], 0UL); auto single_type = common::AnfAlgo::GetOutputInferDataType(split_outputs[0], 0UL); std::vector dtypes(split_count, single_type); std::vector shapes(split_count, single_shape); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/bn_grad_unify_mindir.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/bn_grad_unify_mindir.cc index d990a69af9f..8a99015237b 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/bn_grad_unify_mindir.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/bn_grad_unify_mindir.cc @@ -49,11 +49,10 @@ AnfNodePtr BuildBatchNormGrad(const PatternMap &m, const AnfNodePtr &new_node) { common::AnfAlgo::GetOutputInferDataType(bn_grad_node, 2UL), common::AnfAlgo::GetPrevNodeOutputInferDataType(bn_grad_node, 3UL), common::AnfAlgo::GetPrevNodeOutputInferDataType(bn_grad_node, 4UL)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(bn_grad_node, 0UL), - common::AnfAlgo::GetOutputDetailShape(bn_grad_node, 1UL), - common::AnfAlgo::GetOutputDetailShape(bn_grad_node, 2UL), - common::AnfAlgo::GetPrevNodeOutputDetailShape(bn_grad_node, 3UL), - common::AnfAlgo::GetPrevNodeOutputDetailShape(bn_grad_node, 4UL)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(bn_grad_node, 0UL), AnfAlgo::GetOutputDetailShape(bn_grad_node, 1UL), + AnfAlgo::GetOutputDetailShape(bn_grad_node, 2UL), + AnfAlgo::GetPrevNodeOutputDetailShape(bn_grad_node, 3UL), + AnfAlgo::GetPrevNodeOutputDetailShape(bn_grad_node, 4UL)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, new_bn_grad.get()); common::AnfAlgo::CopyNodeAttrs(bn_grad_node, new_bn_grad); common::AnfAlgo::SetNodeAttr(kAttrUnifyIRPassed, MakeValue(true), new_bn_grad); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/maxpool_to_maxpool_with_argmax.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/maxpool_to_maxpool_with_argmax.cc index e0c1b7fdacc..367690fde9a 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/maxpool_to_maxpool_with_argmax.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/maxpool_to_maxpool_with_argmax.cc @@ -64,7 +64,7 @@ CNodePtr MaxPool2MaxPoolWithArgmax::CreateMaxPoolWithArgmax(const FuncGraphPtr & // MaxPoolWithArgmax's second output is argmax, whose datatype is uint16 and with same shape as first output TypeId argmax_dtype = kNumberTypeUInt16; auto types = {common::AnfAlgo::GetOutputInferDataType(maxpool, 0UL), argmax_dtype}; - auto out_shape = common::AnfAlgo::GetOutputDetailShape(maxpool, 0UL); + auto out_shape = AnfAlgo::GetOutputDetailShape(maxpool, 0UL); std::vector shapes = {out_shape, out_shape}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, maxpool_argmax.get()); return maxpool_argmax; diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/neighbor_exchange_v2_unify_mindir.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/neighbor_exchange_v2_unify_mindir.cc index fde4c06ec0f..434be2ba67d 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/neighbor_exchange_v2_unify_mindir.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/neighbor_exchange_v2_unify_mindir.cc @@ -835,7 +835,7 @@ CNodePtr NeighborExchangeV2GradUnifyMindIR::CreateSplitGradNodes(const FuncGraph auto centerx = GetCenter(graph, neighbor_exchange_v2_grad, split_nodes, split_num, send_rank_ids); auto centerx_dtype = common::AnfAlgo::GetOutputInferDataType(centerx, 0UL); auto centerx_shape = common::AnfAlgo::GetOutputInferShape(centerx, 0UL); - auto base_shape = common::AnfAlgo::GetOutputDetailShape(centerx, 0UL); + auto base_shape = AnfAlgo::GetOutputDetailShape(centerx, 0UL); // empty int64_t all_to_all_output_num = std::count_if(recv_rank_ids.begin(), recv_rank_ids.end(), [](int64_t ids) { return ids != kInvalidId; }); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/sparse_softmax_cross_entropy_with_logits_unify_mindir.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/sparse_softmax_cross_entropy_with_logits_unify_mindir.cc index 36b61ddf05a..f997b6f570c 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/sparse_softmax_cross_entropy_with_logits_unify_mindir.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/mindir/sparse_softmax_cross_entropy_with_logits_unify_mindir.cc @@ -294,9 +294,9 @@ CNodePtr CreateTile(const FuncGraphPtr &graph, const CNodePtr &sparse_softmax_no auto tile_node = pass.NewCNode(tile_inputs, graph); MS_EXCEPTION_IF_NULL(tile_node); tile_node->set_scope(mul_node->scope()); - common::AnfAlgo::SetOutputTypeAndDetailShape( - {common::AnfAlgo::GetPrevNodeOutputInferDataType(mul_node, 1UL)}, - {common::AnfAlgo::GetPrevNodeOutputDetailShape(sparse_softmax_node, 1UL)}, tile_node.get()); + common::AnfAlgo::SetOutputTypeAndDetailShape({common::AnfAlgo::GetPrevNodeOutputInferDataType(mul_node, 1UL)}, + {AnfAlgo::GetPrevNodeOutputDetailShape(sparse_softmax_node, 1UL)}, + tile_node.get()); if (is_convert_const_to_attr) { common::AnfAlgo::SetNodeAttr(kAttrMultiples, MakeValue(multiples), tile_node); } diff --git a/mindspore/ccsrc/plugin/device/cpu/optimizer/insert_cast_cpu.cc b/mindspore/ccsrc/plugin/device/cpu/optimizer/insert_cast_cpu.cc index e2d9a974d7e..43a19adc1e2 100644 --- a/mindspore/ccsrc/plugin/device/cpu/optimizer/insert_cast_cpu.cc +++ b/mindspore/ccsrc/plugin/device/cpu/optimizer/insert_cast_cpu.cc @@ -134,8 +134,7 @@ void InsertCast(const FuncGraphPtr &func_graph, const CNodePtr &cnode) { auto cur_input = common::AnfAlgo::GetInputNode(cnode, input_index); MS_EXCEPTION_IF_NULL(cur_input); const std::string dev_fmt = AnfAlgo::GetInputFormat(cnode, input_index); - const abstract::BaseShapePtr origin_shape = - common::AnfAlgo::GetOutputDetailShape(prev_node.first, prev_node.second); + const abstract::BaseShapePtr origin_shape = AnfAlgo::GetOutputDetailShape(prev_node.first, prev_node.second); TypeId device_type = AnfAlgo::GetInputDeviceDataType(cnode, input_index); if (origin_type != device_type && origin_type != kTypeUnknown && device_type != kTypeUnknown) { auto cast = AddCastOpNodeToGraph(func_graph, cur_input, dev_fmt, origin_type, device_type, origin_shape); @@ -199,7 +198,7 @@ void InsertCastForGraphOutput(const FuncGraphPtr &func_graph, const AnfNodePtr & auto device_type = AnfAlgo::GetPrevNodeOutputDeviceDataType(func_output, i); const std::string dev_fmt = AnfAlgo::GetPrevNodeOutputFormat(func_output, i); if (infer_type != device_type && device_type != kTypeUnknown) { - const abstract::BaseShapePtr origin_shape = common::AnfAlgo::GetPrevNodeOutputDetailShape(func_output_node, i); + const abstract::BaseShapePtr origin_shape = AnfAlgo::GetPrevNodeOutputDetailShape(func_output_node, i); auto cast = AddCastOpNodeToGraph(func_graph, input_node, dev_fmt, device_type, infer_type, origin_shape); MS_EXCEPTION_IF_NULL(cast); cast->set_scope(func_output->scope()); diff --git a/mindspore/ccsrc/plugin/device/cpu/optimizer/insert_format_transform_op.cc b/mindspore/ccsrc/plugin/device/cpu/optimizer/insert_format_transform_op.cc index 7d34e4e72ed..7aacb192a2c 100644 --- a/mindspore/ccsrc/plugin/device/cpu/optimizer/insert_format_transform_op.cc +++ b/mindspore/ccsrc/plugin/device/cpu/optimizer/insert_format_transform_op.cc @@ -86,7 +86,7 @@ CNodePtr InsertTransposeOp(const FuncGraphPtr &graph, const AnfNodePtr &node, co auto transpose_op = graph->NewCNode(transpose_input); // 3.Set the output info of transpose. auto transpose_type = {common::AnfAlgo::GetPrevNodeOutputInferDataType(used_node, IntToSize(used_node_index))}; - auto transpose_shape = {common::AnfAlgo::GetPrevNodeOutputDetailShape(used_node, IntToSize(used_node_index))}; + auto transpose_shape = {AnfAlgo::GetPrevNodeOutputDetailShape(used_node, IntToSize(used_node_index))}; common::AnfAlgo::SetOutputTypeAndDetailShape(transpose_type, transpose_shape, transpose_op.get()); // 4. Set the new edge of transpose op. FuncGraphManagerPtr manager = graph->manager(); diff --git a/mindspore/ccsrc/plugin/device/cpu/optimizer/print_value_type.cc b/mindspore/ccsrc/plugin/device/cpu/optimizer/print_value_type.cc index c9716b4dba5..266c476e24a 100644 --- a/mindspore/ccsrc/plugin/device/cpu/optimizer/print_value_type.cc +++ b/mindspore/ccsrc/plugin/device/cpu/optimizer/print_value_type.cc @@ -131,7 +131,7 @@ bool PrintValueType::Run(const FuncGraphPtr &graph) { size_t output_num = AnfAlgo::GetOutputTensorNum(cnode); for (size_t i = 0; i < output_num; i++) { types.push_back(common::AnfAlgo::GetOutputInferDataType(cnode, i)); - shapes.push_back(common::AnfAlgo::GetOutputDetailShape(cnode, i)); + shapes.push_back(AnfAlgo::GetOutputDetailShape(cnode, i)); } common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, cnode.get()); // add build info diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/adam_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/adam_fusion.cc index 51729905729..b31a4dad68b 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/adam_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/adam_fusion.cc @@ -165,7 +165,7 @@ const AnfNodePtr AdamFusion::Process(const FuncGraphPtr &graph, const AnfNodePtr auto adam = graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(adam); auto types = {common::AnfAlgo::GetOutputInferDataType(node, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(node, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(node, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, adam.get()); adam->set_scope(node->scope()); auto build_info = GenerateKernelBuildInfo(adam); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/adam_weight_decay_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/adam_weight_decay_fusion.cc index 7dc11ee8f9e..6521d3d89d2 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/adam_weight_decay_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/adam_weight_decay_fusion.cc @@ -170,7 +170,7 @@ const AnfNodePtr AdamWeightDecayFusion::Process(const FuncGraphPtr &graph, const auto adam_weight_decay = graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(adam_weight_decay); auto types = {common::AnfAlgo::GetOutputInferDataType(node, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(node, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(node, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, adam_weight_decay.get()); adam_weight_decay->set_scope(node->scope()); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/add_relu_grad_v2_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/add_relu_grad_v2_fusion.cc index d9b4a828d43..fd9d8246e7a 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/add_relu_grad_v2_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/add_relu_grad_v2_fusion.cc @@ -89,7 +89,7 @@ const AnfNodePtr AddReluGradV2Fusion::Process(const FuncGraphPtr &graph, const A auto add_relugrad = graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add_relugrad); auto types = {common::AnfAlgo::GetOutputInferDataType(node, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(node, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(node, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, add_relugrad.get()); add_relugrad->set_scope(node->scope()); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/add_relu_v2_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/add_relu_v2_fusion.cc index f375d5d72ec..a446691beae 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/add_relu_v2_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/add_relu_v2_fusion.cc @@ -92,7 +92,7 @@ const AnfNodePtr AddReluV2Fusion::Process(const FuncGraphPtr &graph, const AnfNo size_t output_num = AnfAlgo::GetOutputElementNum(node); for (size_t i = 0; i < output_num; i++) { types.push_back(common::AnfAlgo::GetOutputInferDataType(node, i)); - shapes.push_back(common::AnfAlgo::GetOutputDetailShape(node, i)); + shapes.push_back(AnfAlgo::GetOutputDetailShape(node, i)); } common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, add_relu.get()); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/alltoall_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/alltoall_fusion.cc index 7e4d1048bbb..b3beccdbed9 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/alltoall_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/alltoall_fusion.cc @@ -100,7 +100,7 @@ CNodePtr CreateAllToAllvNode(const FuncGraphPtr &graph, const CNodePtr &all_to_a MS_EXCEPTION_IF_NULL(all_to_all_v); // Prepare dtypes, shapes and ranks vectors. - auto single_shape = common::AnfAlgo::GetOutputDetailShape(split_outputs[0], 0); + auto single_shape = AnfAlgo::GetOutputDetailShape(split_outputs[0], 0); auto single_type = common::AnfAlgo::GetOutputInferDataType(split_outputs[0], 0); std::vector dtypes(split_count, single_type); std::vector shapes(split_count, single_shape); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/apply_momentum_scale_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/apply_momentum_scale_fusion.cc index eb3c7cc9511..c8683727665 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/apply_momentum_scale_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/apply_momentum_scale_fusion.cc @@ -89,7 +89,7 @@ const AnfNodePtr ApplyMomentumScaleFusion::Process(const FuncGraphPtr &graph, co auto replace_node = graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(replace_node); auto types = {common::AnfAlgo::GetOutputInferDataType(node, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(node, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(node, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, replace_node.get()); replace_node->set_scope(node->scope()); return replace_node; diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/apply_momentum_weight_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/apply_momentum_weight_fusion.cc index c96fb7d4ad9..d6b24473c8a 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/apply_momentum_weight_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/apply_momentum_weight_fusion.cc @@ -61,7 +61,7 @@ const AnfNodePtr ApplyMomentumWeightDecayFusion::Process(const FuncGraphPtr &gra auto replace_node = graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(replace_node); auto types = {common::AnfAlgo::GetOutputInferDataType(node, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(node, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(node, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, replace_node.get()); replace_node->set_scope(node->scope()); return replace_node; diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/apply_momentum_weight_scale_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/apply_momentum_weight_scale_fusion.cc index 39162e9082b..4a240eaeca7 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/apply_momentum_weight_scale_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/apply_momentum_weight_scale_fusion.cc @@ -127,7 +127,7 @@ const AnfNodePtr ApplyMomentumWeightDecayScaleFusion::Process(const FuncGraphPtr auto replace_node = graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(replace_node); auto types = {common::AnfAlgo::GetOutputInferDataType(node, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(node, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(node, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, replace_node.get()); replace_node->set_scope(node->scope()); return replace_node; diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_add_relu_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_add_relu_fusion.cc index b28aa6519b5..6f12b35233b 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_add_relu_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_add_relu_fusion.cc @@ -149,7 +149,7 @@ const AnfNodePtr BatchNormAddReluFusion::Process(const FuncGraphPtr &graph, cons auto output_num = AnfAlgo::GetOutputTensorNum(batch_norm); for (size_t i = 0; i < output_num; i++) { outputs_type.push_back(common::AnfAlgo::GetOutputInferDataType(batch_norm, i)); - outputs_shape.push_back(common::AnfAlgo::GetOutputDetailShape(batch_norm, i)); + outputs_shape.push_back(AnfAlgo::GetOutputDetailShape(batch_norm, i)); } common::AnfAlgo::SetOutputTypeAndDetailShape(outputs_type, outputs_shape, fused_batch_norm_with_add_relu.get()); common::AnfAlgo::CopyNodeAttrs(batch_norm, fused_batch_norm_with_add_relu); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_add_relu_grad_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_add_relu_grad_fusion.cc index 7a43451710f..908968915ce 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_add_relu_grad_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_add_relu_grad_fusion.cc @@ -74,11 +74,11 @@ void SetShapeAndType(const CNodePtr &bn_add_relu_grad, const AnfNodePtr &bn_grad auto output_num = AnfAlgo::GetOutputTensorNum(bn_grad); for (size_t i = 0; i < output_num; ++i) { outputs_type.push_back(common::AnfAlgo::GetOutputInferDataType(bn_grad, i)); - outputs_shape.push_back(common::AnfAlgo::GetOutputDetailShape(bn_grad, i)); + outputs_shape.push_back(AnfAlgo::GetOutputDetailShape(bn_grad, i)); } outputs_type.push_back(common::AnfAlgo::GetOutputInferDataType(relu_grad, 0)); - outputs_shape.push_back(common::AnfAlgo::GetOutputDetailShape(relu_grad, 0)); + outputs_shape.push_back(AnfAlgo::GetOutputDetailShape(relu_grad, 0)); common::AnfAlgo::SetOutputTypeAndDetailShape(outputs_type, outputs_shape, bn_add_relu_grad.get()); } diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_relu_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_relu_fusion.cc index c20858cdc17..0b1274bbd46 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_relu_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_relu_fusion.cc @@ -101,7 +101,7 @@ const AnfNodePtr BatchNormReluFusion::Process(const FuncGraphPtr &graph, const A auto output_num = AnfAlgo::GetOutputTensorNum(batch_norm); for (size_t i = 0; i < output_num; i++) { outputs_type.push_back(common::AnfAlgo::GetOutputInferDataType(batch_norm, i)); - outputs_shape.push_back(common::AnfAlgo::GetOutputDetailShape(batch_norm, i)); + outputs_shape.push_back(AnfAlgo::GetOutputDetailShape(batch_norm, i)); } common::AnfAlgo::SetOutputTypeAndDetailShape(outputs_type, outputs_shape, fused_batch_norm_with_relu.get()); common::AnfAlgo::CopyNodeAttrs(batch_norm, fused_batch_norm_with_relu); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_relu_grad_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_relu_grad_fusion.cc index 2573888b3f8..1f663bd435e 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_relu_grad_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/batch_norm_relu_grad_fusion.cc @@ -101,7 +101,7 @@ const AnfNodePtr BatchNormReluGradFusion::Process(const FuncGraphPtr &graph, con auto output_num = AnfAlgo::GetOutputTensorNum(node); for (size_t i = 0; i < output_num; i++) { outputs_type.push_back(common::AnfAlgo::GetOutputInferDataType(node, i)); - outputs_shape.push_back(common::AnfAlgo::GetOutputDetailShape(node, i)); + outputs_shape.push_back(AnfAlgo::GetOutputDetailShape(node, i)); } common::AnfAlgo::SetOutputTypeAndDetailShape(outputs_type, outputs_shape, fused_batch_norm_grad_with_relu.get()); common::AnfAlgo::CopyNodeAttrs(node, fused_batch_norm_grad_with_relu); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/bce_with_logits_loss_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/bce_with_logits_loss_fusion.cc index fa24bc72854..63b3ade24ce 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/bce_with_logits_loss_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/bce_with_logits_loss_fusion.cc @@ -41,7 +41,7 @@ AnfNodePtr AddReduceNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node) MS_EXCEPTION_IF_NULL(kernel_graph); auto predict_input = cnode->inputs()[1]; auto new_node_dtype = {common::AnfAlgo::GetOutputInferDataType(predict_input, 0)}; - auto new_node_shape = {common::AnfAlgo::GetOutputDetailShape(predict_input, 0)}; + auto new_node_shape = {AnfAlgo::GetOutputDetailShape(predict_input, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(new_node_dtype, new_node_shape, new_cnode.get()); // Add reduce node @@ -69,7 +69,7 @@ AnfNodePtr AddReduceNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node) auto reduce_node = func_graph->NewCNode(reduce_inputs); MS_EXCEPTION_IF_NULL(reduce_node); auto type = common::AnfAlgo::GetOutputInferDataType(node, 0); - auto shape = {common::AnfAlgo::GetOutputDetailShape(node, 0)}; + auto shape = {AnfAlgo::GetOutputDetailShape(node, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape({type}, shape, reduce_node.get()); common::AnfAlgo::SetNodeAttr("keep_dims", MakeValue(false), reduce_node); reduce_node->set_scope(cnode->scope()); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/concat_outputs_for_all_gather.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/concat_outputs_for_all_gather.cc index 5d63741cda8..716cb996fc9 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/concat_outputs_for_all_gather.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/concat_outputs_for_all_gather.cc @@ -155,7 +155,7 @@ const AnfNodePtr ConcatOutputsForAllGather::Process(const FuncGraphPtr &func_gra idx->set_abstract(abstract_scalar); auto tuple_getitem = func_graph->NewCNode({NewValueNode(prim::kPrimTupleGetItem), node, idx}); MS_EXCEPTION_IF_NULL(tuple_getitem); - auto shape = common::AnfAlgo::GetOutputDetailShape(node, i); + auto shape = AnfAlgo::GetOutputDetailShape(node, i); common::AnfAlgo::SetOutputTypeAndDetailShape({std::get<0>(output_info)[i]}, {shape}, tuple_getitem.get()); new_outputs.emplace_back(std::move(tuple_getitem)); } diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/cudnn_inplace_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/cudnn_inplace_fusion.cc index a6c65e099c8..ad3b8d1f72b 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/cudnn_inplace_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/cudnn_inplace_fusion.cc @@ -158,7 +158,7 @@ void CopyKernelInfo(AnfNodePtr src, AnfNodePtr dst) { std::vector shapes; for (size_t i = 0; i < output_num; i++) { types.emplace_back(common::AnfAlgo::GetOutputInferDataType(src, i)); - shapes.emplace_back(common::AnfAlgo::GetOutputDetailShape(src, i)); + shapes.emplace_back(AnfAlgo::GetOutputDetailShape(src, i)); } common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, dst.get()); } diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/insert_cast_gpu.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/insert_cast_gpu.cc index 09e525ef97e..6febf3d78b3 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/insert_cast_gpu.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/insert_cast_gpu.cc @@ -38,7 +38,7 @@ void InsertCast(const FuncGraphPtr &graph, const AnfNodePtr &node, size_t i, con auto cast = graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(cast); common::AnfAlgo::SetNodeAttr(kAttrDstType, TypeIdToType(cast_type), cast); - auto cast_shape = {common::AnfAlgo::GetPrevNodeOutputDetailShape(node, i)}; + auto cast_shape = {AnfAlgo::GetPrevNodeOutputDetailShape(node, i)}; common::AnfAlgo::SetOutputTypeAndDetailShape({cast_type}, cast_shape, cast.get()); FuncGraphManagerPtr manager = graph->manager(); MS_EXCEPTION_IF_NULL(manager); @@ -110,7 +110,7 @@ bool InsertCastGPU::Run(const FuncGraphPtr &graph) { auto output_types = std::vector(output_num, kNumberTypeFloat32); std::vector output_shapes; for (size_t output_index = 0; output_index < output_num; ++output_index) { - auto shape = common::AnfAlgo::GetOutputDetailShape(node, output_index); + auto shape = AnfAlgo::GetOutputDetailShape(node, output_index); (void)output_shapes.emplace_back(shape); } common::AnfAlgo::SetOutputTypeAndDetailShape(output_types, output_shapes, node.get()); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/insert_format_transform_op.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/insert_format_transform_op.cc index 0ab3ea1d4ee..28d5f17220d 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/insert_format_transform_op.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/insert_format_transform_op.cc @@ -130,7 +130,7 @@ CNodePtr InsertTransposeOp(const FuncGraphPtr &graph, const AnfNodePtr &node, co MS_EXCEPTION_IF_NULL(transpose_op); // 3.Set the output info of transpose. auto transpose_type = {common::AnfAlgo::GetPrevNodeOutputInferDataType(used_node, used_node_index)}; - auto base_shape = common::AnfAlgo::GetPrevNodeOutputDetailShape(used_node, used_node_index); + auto base_shape = AnfAlgo::GetPrevNodeOutputDetailShape(used_node, used_node_index); common::AnfAlgo::SetOutputTypeAndDetailShape(transpose_type, {base_shape}, transpose_op.get()); // 4. Set the new edge of transpose op. diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/matmul_biasadd_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/matmul_biasadd_fusion.cc index 834faa7d57d..242af1e3a9f 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/matmul_biasadd_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/matmul_biasadd_fusion.cc @@ -100,7 +100,7 @@ const AnfNodePtr MatMulBiasAddFusion::Process(const FuncGraphPtr &graph, const A // Copy Abstract and KernelBuildInfo. auto types = {common::AnfAlgo::GetOutputInferDataType(node, 0)}; - auto shapes = {common::AnfAlgo::GetOutputDetailShape(node, 0)}; + auto shapes = {AnfAlgo::GetOutputDetailShape(node, 0)}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, fused_node.get()); common::AnfAlgo::CopyNodeAttrs(matmul, fused_node); fused_node->set_scope(node->scope()); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/post_batch_norm_add_relu_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/post_batch_norm_add_relu_fusion.cc index a068041e89f..67d3dd9dbe6 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/post_batch_norm_add_relu_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/post_batch_norm_add_relu_fusion.cc @@ -91,7 +91,7 @@ const AnfNodePtr PostBatchNormAddReluFusion::Process(const FuncGraphPtr &graph, auto output_num = AnfAlgo::GetOutputTensorNum(batch_norm); for (size_t i = 0; i < output_num; i++) { outputs_type.push_back(common::AnfAlgo::GetOutputInferDataType(batch_norm, i)); - outputs_shape.push_back(common::AnfAlgo::GetOutputDetailShape(batch_norm, i)); + outputs_shape.push_back(AnfAlgo::GetOutputDetailShape(batch_norm, i)); } common::AnfAlgo::SetOutputTypeAndDetailShape(outputs_type, outputs_shape, fused_batch_norm_with_add_relu.get()); common::AnfAlgo::CopyNodeAttrs(batch_norm, fused_batch_norm_with_add_relu); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/print_reduce_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/print_reduce_fusion.cc index 3ddabf16fc0..2b48ba6190d 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/print_reduce_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/print_reduce_fusion.cc @@ -191,7 +191,7 @@ bool PrintReduceFusion::Run(const FuncGraphPtr &graph) { size_t output_num = AnfAlgo::GetOutputTensorNum(cnode); for (size_t i = 0; i < output_num; i++) { types.push_back(common::AnfAlgo::GetOutputInferDataType(cnode, i)); - shapes.push_back(common::AnfAlgo::GetOutputDetailShape(cnode, i)); + shapes.push_back(AnfAlgo::GetOutputDetailShape(cnode, i)); } common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, print_fused.get()); // add build info diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/relu_v2_pass.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/relu_v2_pass.cc index 930cd989dfc..91e955c0f21 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/relu_v2_pass.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/relu_v2_pass.cc @@ -83,7 +83,7 @@ CNodePtr CreateReluV2(const FuncGraphPtr &graph, const CNodePtr &relu) { auto element_num = std::accumulate(output_shape.begin(), output_shape.end(), int64_t(1), std::multiplies()); std::vector mask_shape = {(element_num + kBitPerUInt - 1) / kBitPerUInt}; - std::vector shapes = {common::AnfAlgo::GetOutputDetailShape(relu, 0), + std::vector shapes = {AnfAlgo::GetOutputDetailShape(relu, 0), std::make_shared(mask_shape)}; auto types = {common::AnfAlgo::GetOutputInferDataType(relu, 0), kNumberTypeUInt32}; common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, new_node.get()); @@ -110,7 +110,7 @@ CNodePtr CreateReluGradV2(const FuncGraphPtr &graph, const CNodePtr &relu_grad, size_t output_num = AnfAlgo::GetOutputTensorNum(relu_grad); for (size_t i = 0; i < output_num; i++) { types.push_back(common::AnfAlgo::GetOutputInferDataType(relu_grad, i)); - shapes.push_back(common::AnfAlgo::GetOutputDetailShape(relu_grad, i)); + shapes.push_back(AnfAlgo::GetOutputDetailShape(relu_grad, i)); } common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, new_node.get()); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/replace_addn_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/replace_addn_fusion.cc index 80c18709c86..5c39d16e6e3 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/replace_addn_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/replace_addn_fusion.cc @@ -44,7 +44,7 @@ AnfNodePtr BuildAdd(const PatternMap &m, const AnfNodePtr &default_node) { std::vector outputs_type; std::vector outputs_shape; outputs_type.push_back(common::AnfAlgo::GetOutputInferDataType(m.Get(A), 0)); - outputs_shape.push_back(common::AnfAlgo::GetOutputDetailShape(m.Get(A), 0)); + outputs_shape.push_back(AnfAlgo::GetOutputDetailShape(m.Get(A), 0)); common::AnfAlgo::SetOutputTypeAndDetailShape(outputs_type, outputs_shape, default_node.get()); AnfAlgo::SetSelectKernelBuildInfo(AnfAlgo::GetSelectKernelBuildInfo(m.Get(m_addn)), default_node.get()); return default_node; diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/replace_momentum_cast_fusion.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/replace_momentum_cast_fusion.cc index 81c72013a92..206dfa5152b 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/replace_momentum_cast_fusion.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/replace_momentum_cast_fusion.cc @@ -51,7 +51,7 @@ const AnfNodePtr ReplaceMomentumCastFusion::Process(const FuncGraphPtr &graph, c auto output_num = AnfAlgo::GetOutputTensorNum(node); for (size_t i = 0; i < output_num; i++) { outputs_type.push_back(common::AnfAlgo::GetOutputInferDataType(node, i)); - outputs_shape.push_back(common::AnfAlgo::GetOutputDetailShape(node, i)); + outputs_shape.push_back(AnfAlgo::GetOutputDetailShape(node, i)); } outputs_type[kGradIndex] = common::AnfAlgo::GetPrevNodeOutputInferDataType(grad_cast, 0); diff --git a/mindspore/ccsrc/plugin/device/gpu/optimizer/trt_pass/graph_converter.cc b/mindspore/ccsrc/plugin/device/gpu/optimizer/trt_pass/graph_converter.cc index 83a475140aa..6daedbba899 100644 --- a/mindspore/ccsrc/plugin/device/gpu/optimizer/trt_pass/graph_converter.cc +++ b/mindspore/ccsrc/plugin/device/gpu/optimizer/trt_pass/graph_converter.cc @@ -39,7 +39,7 @@ void CopyGraphOutputTypeAndShape(const std::vector &gr std::vector shapes; for (const auto &item : graph_outputs) { types.push_back(common::AnfAlgo::GetOutputInferDataType(item.first, item.second)); - shapes.push_back(common::AnfAlgo::GetOutputDetailShape(item.first, item.second)); + shapes.push_back(AnfAlgo::GetOutputDetailShape(item.first, item.second)); } common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, trt_node.get()); diff --git a/mindspore/ccsrc/utils/anfalgo.cc b/mindspore/ccsrc/utils/anfalgo.cc index d385d648971..6741c4427ed 100644 --- a/mindspore/ccsrc/utils/anfalgo.cc +++ b/mindspore/ccsrc/utils/anfalgo.cc @@ -625,30 +625,6 @@ inline ShapeVector GetShape(const abstract::BaseShapePtr &base_shape) { return shape_ptr->shape(); } -namespace { -// Get the element shape of dynamic sequence shape. -abstract::BaseShapePtr GetDynamicSequenceShape(const AnfNodePtr &node, size_t output_idx) { - MS_EXCEPTION_IF_NULL(node); - if (node->Shape() == nullptr || (!node->Shape()->isa())) { - MS_LOG(EXCEPTION) << "Invalid dynamic shape in node:" << node->DebugString() << "."; - } - if (node->abstract() == nullptr) { - MS_LOG(EXCEPTION) << "Empty abstract in node:" << node->DebugString() << " for dynamic sequence shape."; - } - if (!node->abstract()->isa()) { - MS_LOG(EXCEPTION) << "Not sequence abstract in node:" << node->DebugString() << " for dynamic sequence shape."; - } - const auto &sequence_abs = node->abstract()->cast(); - MS_EXCEPTION_IF_NULL(sequence_abs); - if (!sequence_abs->dynamic_len()) { - MS_LOG(EXCEPTION) << "Not dynamic abstract in node:" << node->DebugString() << " for dynamic sequence shape."; - } - const auto &element_abs = sequence_abs->dynamic_len_element_abs(); - MS_EXCEPTION_IF_NULL(element_abs); - return element_abs->BuildShape(); -} -} // namespace - ShapeVector AnfAlgo::GetOutputInferShape(const AnfNodePtr &node, const abstract::BaseShapePtr &base_shape, size_t output_idx, bool is_real_squence_output) { MS_EXCEPTION_IF_NULL(node); @@ -758,45 +734,6 @@ TypeId AnfAlgo::GetPrevNodeOutputInferDataType(const AnfNodePtr &node, size_t in return AnfAlgo::GetOutputInferDataType(kernel_with_index.first, kernel_with_index.second); } -abstract::BaseShapePtr AnfAlgo::GetOutputDetailShape(const AnfNodePtr &node, size_t output_idx) { - MS_EXCEPTION_IF_NULL(node); - auto base_shape = node->Shape(); - MS_EXCEPTION_IF_NULL(base_shape); - if (base_shape->isa()) { - if (output_idx == 0) { - return base_shape; - } - MS_LOG(EXCEPTION) << "The node " << node->DebugString() << "is a single output node but got index [" << output_idx - << "]." << trace::DumpSourceLines(node); - } else if (base_shape->isa()) { - auto tuple_shape = base_shape->cast(); - MS_EXCEPTION_IF_NULL(tuple_shape); - if (output_idx >= tuple_shape->size()) { - MS_LOG(EXCEPTION) << "Output index " << output_idx << "is larger than output number " << tuple_shape->size() - << " node:" << node->DebugString() << "." << trace::DumpSourceLines(node); - } - auto b_shp = (*tuple_shape)[output_idx]; - if (b_shp->isa() || b_shp->isa()) { - return b_shp; - } else { - MS_LOG(EXCEPTION) << "The output type of ApplyKernel index:" << output_idx - << " should be a NoShape , ArrayShape or a TupleShape, but it is " << base_shape->ToString() - << "node :" << node->DebugString() << "." << trace::DumpSourceLines(node); - } - } else if (base_shape->isa()) { - return base_shape; - } else if (base_shape->isa()) { - return GetDynamicSequenceShape(node, output_idx); - } - MS_LOG(EXCEPTION) << "The output type of ApplyKernel should be a NoShape , ArrayShape or a TupleShape, but it is " - << base_shape->ToString() << " node : " << node->DebugString() << trace::DumpSourceLines(node); -} - -abstract::BaseShapePtr AnfAlgo::GetPrevNodeOutputDetailShape(const AnfNodePtr &node, size_t input_idx) { - KernelWithIndex kernel_with_index = AnfAlgo::GetPrevNodeOutput(node, input_idx); - return AnfAlgo::GetOutputDetailShape(kernel_with_index.first, kernel_with_index.second); -} - // set infer shapes and types of anf node void AnfAlgo::SetOutputTypeAndDetailShape(const std::vector &types, const std::vector &shapes, AnfNode *node) { @@ -1936,5 +1873,26 @@ bool AnfAlgo::IsReduceOp(const std::string &op_name) { prim::kPrimReduceSum->name(), prim::kPrimSquareSumV1->name()}; return reduce_op_type.find(op_name) != reduce_op_type.end(); } + +abstract::BaseShapePtr AnfAlgo::GetDynamicSequenceShape(const AnfNodePtr &node, size_t output_idx) { + MS_EXCEPTION_IF_NULL(node); + if (node->Shape() == nullptr || (!node->Shape()->isa())) { + MS_LOG(EXCEPTION) << "Invalid dynamic shape in node:" << node->DebugString() << "."; + } + if (node->abstract() == nullptr) { + MS_LOG(EXCEPTION) << "Empty abstract in node:" << node->DebugString() << " for dynamic sequence shape."; + } + if (!node->abstract()->isa()) { + MS_LOG(EXCEPTION) << "Not sequence abstract in node:" << node->DebugString() << " for dynamic sequence shape."; + } + const auto &sequence_abs = node->abstract()->cast(); + MS_EXCEPTION_IF_NULL(sequence_abs); + if (!sequence_abs->dynamic_len()) { + MS_LOG(EXCEPTION) << "Not dynamic abstract in node:" << node->DebugString() << " for dynamic sequence shape."; + } + const auto &element_abs = sequence_abs->dynamic_len_element_abs(); + MS_EXCEPTION_IF_NULL(element_abs); + return element_abs->BuildShape(); +} } // namespace common } // namespace mindspore diff --git a/mindspore/python/mindspore/communication/management.py b/mindspore/python/mindspore/communication/management.py index 144621391a8..749c5e81e0b 100755 --- a/mindspore/python/mindspore/communication/management.py +++ b/mindspore/python/mindspore/communication/management.py @@ -151,6 +151,7 @@ def init(backend_name=None): raise RuntimeError("Parameter server and scheduler should use 'CPU' as backend instead of 'Ascend'") if _get_ps_context("worker_num") == 1: GlobalComm.INITED = True + _set_elegant_exit_handle() return if device_target != "Ascend": raise RuntimeError("For 'init', the argument 'backend_name' should be 'Ascend' to init hccl, "