From 663278112f23cf9a84365270453aba10be4a159d Mon Sep 17 00:00:00 2001 From: zhoufeng Date: Fri, 14 Aug 2020 16:24:49 +0800 Subject: [PATCH] optimize code compile performance Signed-off-by: zhoufeng --- .../aicpu/aicpu_kernel_build.cc | 3 - .../kernel_compiler/aicpu/aicpu_kernel_mod.cc | 1 - .../kernel_compiler/aicpu/aicpu_util.cc | 4 - .../kernel_compiler/akg/akg_kernel_build.cc | 6 - .../akg/ascend/akg_ascend_kernel_mod.cc | 5 - .../backend/kernel_compiler/common_utils.cc | 1 - .../kernel_compiler/cpu/cpu_kernel_factory.cc | 1 - .../cpu/mkldnn/lstm_grad_cpu_kernel.cc | 2 - .../cpu/mkldnn/matmul_cpu_kernel.cc | 1 - .../cpu/mkldnn/mkl_cpu_kernel.cc | 1 - .../cpu/ps/embedding_look_up_ps_kernel.cc | 1 - .../kernel_compiler/cpu/ps/pserver_kernel.cc | 3 - .../kernel_compiler/cpu/reduce_cpu_kernel.cc | 2 +- .../kernel_compiler/hccl/hccl_kernel_build.cc | 2 - .../hccl/hcom_all_broadcast.cc | 4 - .../kernel_compiler/hccl/hcom_all_gather.cc | 4 - .../kernel_compiler/hccl/hcom_all_reduce.cc | 4 - .../hccl/hcom_all_reduce_scatter.cc | 4 - .../backend/kernel_compiler/hccl/hcom_util.cc | 2 - .../backend/kernel_compiler/kernel_fusion.cc | 2 - .../backend/kernel_compiler/oplib/oplib.cc | 1 - .../backend/kernel_compiler/rts/assign.cc | 3 - .../backend/kernel_compiler/rts/label_goto.cc | 2 - .../backend/kernel_compiler/rts/label_set.cc | 2 - .../kernel_compiler/rts/label_switch.cc | 2 - .../kernel_compiler/rts/memcpy_async.cc | 3 - .../rts/profiling_kernel_mod.cc | 2 - .../ccsrc/backend/kernel_compiler/rts/recv.cc | 3 - .../kernel_compiler/rts/rt_kernel_build.cc | 3 - .../ccsrc/backend/kernel_compiler/rts/send.cc | 1 - .../kernel_compiler/rts/stream_active.cc | 2 - .../kernel_compiler/rts/stream_switch.cc | 3 - .../tbe/tbe_kernel_parallel_build.cc | 1 - .../tbe_kernel_select/tbe_kernel_select.cc | 1 - .../backend/kernel_compiler/tbe/tbe_utils.cc | 7 - .../ascend/ascend_backend_optimization.cc | 1 - ...v2dbackprop_eltwise_eltwise_fusion_pass.cc | 1 - .../conv2dbackprop_eltwise_fusion_pass.cc | 1 - .../conv_double_in_fusion_pass.cc | 1 - .../conv_single_in_fusion_pass.cc | 1 - .../buffer_fusion/eltwise_fusion_pass.cc | 1 - .../matmul_eltwise_fusion_pass.cc | 1 - .../buffer_fusion/multi_output_fusion_pass.cc | 1 - .../reduce_eltwise_fusion_pass.cc | 1 - .../segment_eltwise_fusion_pass.cc | 1 - .../ascend/buffer_fusion/ub_pattern_fusion.cc | 2 - .../insert_memcpy_async_for_cascade.cc | 2 - .../enhancer/insert_pad_for_nms_with_mask.cc | 4 - .../chang_axis_of_reduce_kernel.cc | 1 - .../ascend/format_type/check_consistency.cc | 1 - .../convert_unsupported_transnode_to_aicpu.cc | 2 +- .../ascend/format_type/insert_cast.cc | 3 - .../ascend/format_type/insert_trans_op.cc | 2 - .../format_type/insert_transdata_for_runop.cc | 2 - .../ir_fission/batch_norm_grad_split.cc | 2 - .../ascend/ir_fission/bn_grad_split.cc | 2 - .../optimizer/ascend/ir_fission/bn_split.cc | 1 - .../ir_fission/layer_norm_grad_split.cc | 2 - .../ir_fission/single_batch_norm_fission.cc | 1 - .../ascend/ir_fusion/add_input_to_output.cc | 1 - .../clip_by_norm_no_div_square_sum_fusion.cc | 2 - .../ascend/ir_fusion/lamb_next_mv_rule.cc | 3 - .../lamb_update_with_lr_rule_fusion.cc | 2 - .../ir_fusion/lamb_update_with_lr_v2.cc | 1 - .../ascend/ir_fusion/mul_add_fusion.cc | 1 - .../ascend/ir_fusion/mul_addn_fusion.cc | 2 - .../ir_fusion/parameter_and_transop_fusion.cc | 2 - .../ir_fusion/refresh_parameter_format.cc | 3 - .../backend/optimizer/common/node_pass.cc | 2 - .../backend/optimizer/common/pass_manager.cc | 4 - .../optimizer/common/pattern_engine.cc | 3 - .../ccsrc/backend/optimizer/common/visit.cc | 2 - .../optimizer/mem_reuse/kernel_refcount.cc | 2 +- .../optimizer/pass/add_atomic_clean.cc | 2 - .../pass/convert_const_input_to_attr.cc | 1 - .../convert_const_input_to_tensor_input.cc | 1 - .../pass/convert_const_scalar_to_tensor.cc | 6 - .../optimizer/pass/eliminate_redundant_op.cc | 1 - .../backend/optimizer/pass/fuse_basic.cc | 2 - .../optimizer/pass/fuse_graph_kernel.cc | 2 - .../backend/session/anf_runtime_algorithm.cc | 1 - .../session/ascend_inference_session.cc | 1 - .../ccsrc/backend/session/ascend_session.cc | 4 - .../ccsrc/backend/session/cpu_session.cc | 2 - .../backend/session/kernel_build_client.cc | 2 - .../ccsrc/backend/session/session_basic.cc | 3 - .../ccsrc/backend/session/session_factory.cc | 2 +- mindspore/ccsrc/debug/anf_ir_dump.cc | 2 - mindspore/ccsrc/debug/anf_ir_utils.cc | 2 - mindspore/ccsrc/debug/common.cc | 1 - mindspore/ccsrc/debug/draw.cc | 4 - mindspore/ccsrc/debug/dump_proto.cc | 3 - .../frontend/operator/cc_implementations.cc | 2 - .../frontend/operator/composite/composite.cc | 1 - .../operator/composite/do_signature.cc | 1 - .../ccsrc/frontend/operator/composite/map.cc | 1 - .../operator/composite/multitype_funcgraph.cc | 7 - .../operator/composite/unpack_call.cc | 2 - .../ccsrc/frontend/operator/ops_extends.cc | 2 - .../frontend/operator/prim_to_function.cc | 3 - .../ccsrc/frontend/optimizer/ad/adjoint.cc | 1 - .../ccsrc/frontend/optimizer/ad/dfunctor.cc | 5 - mindspore/ccsrc/frontend/optimizer/ad/grad.cc | 1 - .../ccsrc/frontend/optimizer/ad/kprim.cc | 3 - mindspore/ccsrc/frontend/optimizer/clean.cc | 2 - .../frontend/optimizer/control_depend.cc | 1 - mindspore/ccsrc/frontend/optimizer/cse.cc | 1 - .../frontend/optimizer/graph_kernel_reuse.cc | 1 - mindspore/ccsrc/frontend/optimizer/irpass.cc | 2 - .../optimizer/irpass/branch_culling.cc | 1 - .../optimizer/irpass/cast_eliminate.cc | 1 - .../optimizer/irpass/grad_var_prepare.cc | 2 - .../optimizer/irpass/gradient_eliminate.cc | 2 - mindspore/ccsrc/frontend/optimizer/opt.cc | 3 - mindspore/ccsrc/frontend/optimizer/pattern.cc | 1 - mindspore/ccsrc/frontend/optimizer/py_pass.cc | 3 - .../frontend/optimizer/py_pass_manager.cc | 2 - .../parallel/auto_parallel/graph_costmodel.cc | 1 - .../auto_parallel/rec_core/rec_cost.cc | 2 - .../auto_parallel/rec_core/rec_partition.cc | 1 - mindspore/ccsrc/frontend/parallel/context.cc | 2 - .../ccsrc/frontend/parallel/device_manager.cc | 2 - .../parallel/graph_util/generate_graph.cc | 1 - .../parallel/graph_util/get_parallel_info.cc | 2 - .../parallel/graph_util/graph_info.cc | 1 - .../ccsrc/frontend/parallel/group_manager.cc | 1 - .../parallel/ops_info/arithmetic_info.cc | 1 - .../parallel/ops_info/batch_parallel_info.cc | 1 - .../parallel/ops_info/bias_add_info.cc | 1 - .../parallel/ops_info/dropout_do_mask_info.cc | 1 - .../parallel/ops_info/l2_normalize_info.cc | 1 - .../frontend/parallel/ops_info/loss_info.cc | 1 - .../parallel/ops_info/operator_info.cc | 2 - .../frontend/parallel/ops_info/prelu_info.cc | 1 - .../parallel/ps/optimizer_info_builder.cc | 1 - .../ccsrc/frontend/parallel/ps/scheduler.cc | 1 - .../frontend/parallel/step_auto_parallel.cc | 2 - .../ccsrc/frontend/parallel/step_parallel.cc | 1 - .../parallel/tensor_layout/arrangement.cc | 1 - .../frontend/parallel/tensor_layout/map.cc | 1 - .../parallel/tensor_layout/shape_util.cc | 1 - .../parallel/tensor_layout/tensor_layout.cc | 1 - .../tensor_layout/tensor_redistribution.cc | 1 - mindspore/ccsrc/pipeline/jit/init.cc | 3 - .../pipeline/jit/parse/data_converter.cc | 4 - .../pipeline/jit/parse/function_block.cc | 1 - mindspore/ccsrc/pipeline/jit/parse/parse.cc | 1 - .../pipeline/jit/parse/python_adapter.cc | 1 - mindspore/ccsrc/pipeline/jit/parse/resolve.cc | 2 - mindspore/ccsrc/pipeline/jit/pass.cc | 5 - mindspore/ccsrc/pipeline/jit/pipeline.cc | 1 - mindspore/ccsrc/pipeline/jit/resource.cc | 1 - .../jit/static_analysis/program_specialize.cc | 2 - .../jit/static_analysis/static_analysis.cc | 1 - mindspore/ccsrc/pipeline/jit/validator.cc | 1 - .../pipeline/pynative/pynative_execute_ge.cc | 1 + mindspore/ccsrc/pybind_api/api_register.cc | 2 - mindspore/ccsrc/pybind_api/export_flags.cc | 2 +- mindspore/ccsrc/pybind_api/ir/dtype_py.cc | 4 - .../ccsrc/pybind_api/ir/func_graph_py.cc | 1 - mindspore/ccsrc/pybind_api/ir/signature_py.cc | 1 - mindspore/ccsrc/pybind_api/ir/tensor_py.cc | 3 - .../device/ascend/ascend_device_address.cc | 2 - .../device/ascend/ascend_kernel_runtime.cc | 5 +- .../device/ascend/kernel_build_ascend.cc | 2 - .../device/ascend/kernel_select_ascend.cc | 6 - .../device/ascend/tasksink/task_generator.cc | 1 - .../runtime/device/cpu/cpu_kernel_runtime.cc | 6 - .../ccsrc/runtime/device/kernel_adjust.cc | 3 - .../ccsrc/runtime/device/kernel_runtime.cc | 4 - mindspore/ccsrc/transform/graph_ir/convert.cc | 426 +---- mindspore/ccsrc/transform/graph_ir/convert.h | 63 +- .../transform/graph_ir/df_graph_manager.cc | 5 - .../ccsrc/transform/graph_ir/graph_builder.cc | 3 +- .../ccsrc/transform/graph_ir/graph_runner.cc | 1 - .../ccsrc/transform/graph_ir/op_adapter.cc | 664 ++++++++ .../ccsrc/transform/graph_ir/op_adapter.h | 709 ++------- .../transform/graph_ir/op_adapter_base.h | 1 - .../transform/graph_ir/op_adapter_desc.h | 75 + .../transform/graph_ir/op_adapter_map.cc | 32 + .../ccsrc/transform/graph_ir/op_adapter_map.h | 205 +++ .../ccsrc/transform/graph_ir/op_declare.cc | 1370 ----------------- .../ccsrc/transform/graph_ir/op_declare.h | 516 ------- .../graph_ir/op_declare/array_ops_declare.cc | 90 ++ .../graph_ir/op_declare/array_ops_declare.h | 58 + .../op_declare/control_flow_ops_declare.cc | 32 + .../op_declare/control_flow_ops_declare.h | 33 + .../graph_ir/op_declare/ctc_ops_declare.cc | 31 + .../graph_ir/op_declare/ctc_ops_declare.h | 29 + .../elewise_calculation_ops_declare.cc | 364 +++++ .../elewise_calculation_ops_declare.h | 193 +++ .../op_declare/functional_ops_declare.cc | 27 + .../op_declare/functional_ops_declare.h | 31 + .../graph_ir/op_declare/hcom_ops_declare.cc | 50 + .../graph_ir/op_declare/hcom_ops_declare.h | 39 + .../graph_ir/op_declare/image_ops_declare.cc | 48 + .../graph_ir/op_declare/image_ops_declare.h | 38 + .../op_declare/logging_ops_declare.cc | 27 + .../graph_ir/op_declare/logging_ops_declare.h | 31 + .../matrix_calculation_ops_declare.cc | 69 + .../matrix_calculation_ops_declare.h | 50 + .../op_declare/nn_batch_norm_ops_declare.cc | 68 + .../op_declare/nn_batch_norm_ops_declare.h | 38 + .../op_declare/nn_calculation_ops_declare.cc | 103 ++ .../op_declare/nn_calculation_ops_declare.h | 55 + .../op_declare/nn_detect_ops_declare.cc | 79 + .../op_declare/nn_detect_ops_declare.h | 44 + .../op_declare/nn_norm_ops_declare.cc | 116 ++ .../graph_ir/op_declare/nn_norm_ops_declare.h | 68 + .../op_declare/nn_pooling_ops_declare.cc | 72 + .../op_declare/nn_pooling_ops_declare.h | 44 + .../op_declare/nn_training_ops_declare.cc | 128 ++ .../op_declare/nn_training_ops_declare.h | 60 + .../op_declare/nonlinear_fuc_ops_declare.cc | 103 ++ .../op_declare/nonlinear_fuc_ops_declare.h | 68 + .../op_declare/npu_loss_scale_ops_declare.cc | 37 + .../op_declare/npu_loss_scale_ops_declare.h | 35 + .../graph_ir/op_declare/op_declare_macro.h | 169 ++ .../graph_ir/op_declare/pad_ops_declare.cc | 32 + .../graph_ir/op_declare/pad_ops_declare.h | 32 + .../op_declare/quantize_ops_declare.cc | 36 + .../op_declare/quantize_ops_declare.h | 32 + .../graph_ir/op_declare/random_ops_declare.cc | 41 + .../graph_ir/op_declare/random_ops_declare.h | 35 + .../graph_ir/op_declare/reduce_ops_declare.cc | 68 + .../graph_ir/op_declare/reduce_ops_declare.h | 52 + .../graph_ir/op_declare/rnn_declare.cc | 51 + .../graph_ir/op_declare/rnn_declare.h | 38 + .../graph_ir/op_declare/rpn_ops_declare.cc | 26 + .../graph_ir/op_declare/rpn_ops_declare.h | 29 + .../op_declare/selection_ops_declare.cc | 136 ++ .../op_declare/selection_ops_declare.h | 81 + .../split_combination_ops_declare.cc | 43 + .../split_combination_ops_declare.h | 37 + .../graph_ir/op_declare/state_ops_declare.cc | 23 + .../graph_ir/op_declare/state_ops_declare.h | 28 + .../op_declare/transformation_ops_declare.cc | 76 + .../op_declare/transformation_ops_declare.h | 50 + mindspore/ccsrc/transform/graph_ir/util.cc | 1 - mindspore/ccsrc/transform/onnx/ir_exporter.cc | 2 - .../ccsrc/transform/onnx/onnx_exporter.cc | 3 - mindspore/ccsrc/transform/op_declare.cc | 1295 ---------------- mindspore/ccsrc/utils/callbacks.cc | 2 - .../ccsrc/utils/context/context_extends.cc | 1 - .../ccsrc/utils/load_onnx/anf_converter.cc | 3 - .../ccsrc/utils/load_onnx/anf_model_parser.cc | 1 - mindspore/ccsrc/utils/summary/event_writer.cc | 1 - mindspore/ccsrc/utils/system/file_system.cc | 2 - mindspore/ccsrc/utils/tensorprint_utils.cc | 3 - mindspore/ccsrc/vm/vmimpl.cc | 6 - mindspore/core/utils/trace_base.cc | 6 - mindspore/core/utils/trace_info.cc | 2 - tests/ut/cpp/CMakeLists.txt | 1 + tests/ut/cpp/transform/op_adapter_test.cc | 4 +- 254 files changed, 4617 insertions(+), 4606 deletions(-) create mode 100644 mindspore/ccsrc/transform/graph_ir/op_adapter.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_adapter_desc.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_adapter_map.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_adapter_map.h delete mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare.cc delete mode 100755 mindspore/ccsrc/transform/graph_ir/op_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/control_flow_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/control_flow_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/ctc_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/ctc_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/hcom_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/hcom_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/logging_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/logging_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nn_batch_norm_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nn_batch_norm_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nn_calculation_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nn_calculation_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nn_detect_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nn_detect_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nn_norm_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nn_norm_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nn_pooling_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nn_pooling_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nn_training_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nn_training_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nonlinear_fuc_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/nonlinear_fuc_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/npu_loss_scale_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/npu_loss_scale_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/op_declare_macro.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/pad_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/pad_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/quantize_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/quantize_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/random_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/random_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/rpn_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/rpn_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/split_combination_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/split_combination_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/state_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/state_ops_declare.h create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.cc create mode 100644 mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.h delete mode 100644 mindspore/ccsrc/transform/op_declare.cc diff --git a/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_build.cc b/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_build.cc index 1e855f8fc0f..cdb2fe10477 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_build.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_build.cc @@ -15,13 +15,11 @@ */ #include "backend/kernel_compiler/aicpu/aicpu_kernel_build.h" #include -#include #include #include #include #include #include -#include #include "runtime/device/kernel_runtime.h" #include "backend/kernel_compiler/aicpu/aicpu_kernel_mod.h" #include "backend/kernel_compiler/akg/akg_kernel_build.h" @@ -30,7 +28,6 @@ #include "proto/attr.pb.h" #include "proto/node_def.pb.h" #include "backend/session/anf_runtime_algorithm.h" -#include "utils/ms_utils.h" #include "backend/kernel_compiler/aicpu/aicpu_util.h" #include "backend/session/kernel_graph.h" #include "backend/kernel_compiler/common_utils.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_mod.cc b/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_mod.cc index b2f992fc829..d00fab381ec 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_mod.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_mod.cc @@ -23,7 +23,6 @@ #include "runtime/mem.h" #include "runtime/rt.h" -#include "backend/kernel_compiler/aicpu/aicpu_kernel_build.h" #include "utils/convert_utils.h" #include "backend/kernel_compiler/aicpu/aicpu_util.h" #include "utils/ms_context.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_util.cc b/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_util.cc index 790319daa6d..2f17967c035 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_util.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_util.cc @@ -14,13 +14,9 @@ * limitations under the License. */ #include "backend/kernel_compiler/aicpu/aicpu_util.h" -#include -#include #include "proto/types.pb.h" #include "runtime/mem.h" -#include "runtime/rt.h" #include "utils/convert_utils.h" -#include "backend/session/anf_runtime_algorithm.h" namespace mindspore { namespace kernel { diff --git a/mindspore/ccsrc/backend/kernel_compiler/akg/akg_kernel_build.cc b/mindspore/ccsrc/backend/kernel_compiler/akg/akg_kernel_build.cc index 1dad3d4e57b..c4420af6d79 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/akg/akg_kernel_build.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/akg/akg_kernel_build.cc @@ -15,22 +15,16 @@ */ #include "backend/kernel_compiler/akg/akg_kernel_build.h" -#include -#include #include #include -#include -#include #include #include #include #include #include -#include #include #include #include -#include "utils/ms_utils.h" #include "utils/convert_utils.h" #include "utils/any.h" #include "utils/utils.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/akg/ascend/akg_ascend_kernel_mod.cc b/mindspore/ccsrc/backend/kernel_compiler/akg/ascend/akg_ascend_kernel_mod.cc index 8affa12c320..8fea3eefd65 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/akg/ascend/akg_ascend_kernel_mod.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/akg/ascend/akg_ascend_kernel_mod.cc @@ -16,17 +16,12 @@ #include "backend/kernel_compiler/akg/ascend/akg_ascend_kernel_mod.h" #include -#include -#include #include -#include #include #include -#include "nlohmann/json.hpp" #include "runtime/rt.h" #include "utils/log_adapter.h" #include "utils/convert_utils.h" -#include "utils/ms_context.h" namespace mindspore { namespace kernel { diff --git a/mindspore/ccsrc/backend/kernel_compiler/common_utils.cc b/mindspore/ccsrc/backend/kernel_compiler/common_utils.cc index bf383ff9d7d..bd9c1a26fa6 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/common_utils.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/common_utils.cc @@ -27,7 +27,6 @@ #include "utils/ms_utils.h" #include "ir/manager.h" #include "ir/meta_tensor.h" -#include "ir/func_graph.h" #include "frontend/operator/ops.h" #include "ir/graph_utils.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/cpu_kernel_factory.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/cpu_kernel_factory.cc index accd7429760..0836696ec3b 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/cpu_kernel_factory.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/cpu_kernel_factory.cc @@ -17,7 +17,6 @@ #include "backend/kernel_compiler/cpu/cpu_kernel_factory.h" #include -#include #include #include "runtime/device/kernel_info.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/lstm_grad_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/lstm_grad_cpu_kernel.cc index ea2ea9824df..52642911078 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/lstm_grad_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/lstm_grad_cpu_kernel.cc @@ -15,8 +15,6 @@ */ #include "backend/kernel_compiler/cpu/mkldnn/lstm_grad_cpu_kernel.h" #include -#include -#include #include #include "utils/ms_utils.h" #include "backend/kernel_compiler/cpu/mkldnn/mkl_kernel_engine.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/matmul_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/matmul_cpu_kernel.cc index ee9c4eb300c..924c49c3eb9 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/matmul_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/matmul_cpu_kernel.cc @@ -14,7 +14,6 @@ * limitations under the License. */ #include "backend/kernel_compiler/cpu/mkldnn/matmul_cpu_kernel.h" -#include #include #include "backend/kernel_compiler/cpu/mkldnn/mkl_kernel_engine.h" #include "utils/ms_utils.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/mkl_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/mkl_cpu_kernel.cc index 7f66b81b82f..922d0b5e668 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/mkl_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/mkl_cpu_kernel.cc @@ -16,7 +16,6 @@ #include "backend/kernel_compiler/cpu/mkldnn/mkl_cpu_kernel.h" #include #include -#include #include "utils/ms_utils.h" #include "backend/kernel_compiler/cpu/mkldnn/mkl_kernel_engine.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/embedding_look_up_ps_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/embedding_look_up_ps_kernel.cc index 4a36628dc7a..66e154f954a 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/embedding_look_up_ps_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/embedding_look_up_ps_kernel.cc @@ -15,7 +15,6 @@ */ #include "backend/kernel_compiler/cpu/ps/embedding_look_up_ps_kernel.h" -#include #include #include #include "backend/kernel_compiler/common_utils.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/pserver_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/pserver_kernel.cc index 3aa421881a0..e6a62a1daac 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/pserver_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/pserver_kernel.cc @@ -14,9 +14,6 @@ * limitations under the License. */ -#include "backend/kernel_compiler/cpu/ps/pserver_kernel.h" -#include "frontend/parallel/ps/util.h" - namespace mindspore { namespace kernel { namespace ps {} // namespace ps diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/reduce_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/reduce_cpu_kernel.cc index c2075b76c56..ef5b06ad2f5 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/reduce_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/reduce_cpu_kernel.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include + #include #include #include "backend/kernel_compiler/cpu/reduce_cpu_kernel.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/hccl/hccl_kernel_build.cc b/mindspore/ccsrc/backend/kernel_compiler/hccl/hccl_kernel_build.cc index 8297be0b6d7..5339dde6e78 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/hccl/hccl_kernel_build.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/hccl/hccl_kernel_build.cc @@ -18,8 +18,6 @@ #include #include -#include - #include "backend/kernel_compiler/hccl/hccl_kernel.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_broadcast.cc b/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_broadcast.cc index 29f3e954815..b0be4c4ef18 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_broadcast.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_broadcast.cc @@ -15,11 +15,7 @@ */ #include "backend/kernel_compiler/hccl/hcom_all_broadcast.h" - -#include -#include #include - #include "utils/ms_context.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_gather.cc b/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_gather.cc index 2502ec799bb..088ca37db92 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_gather.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_gather.cc @@ -15,11 +15,7 @@ */ #include "backend/kernel_compiler/hccl/hcom_all_gather.h" - -#include -#include #include - #include "utils/ms_context.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_reduce.cc b/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_reduce.cc index 70857d6a6cd..70bf880f64b 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_reduce.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_reduce.cc @@ -15,11 +15,7 @@ */ #include "backend/kernel_compiler/hccl/hcom_all_reduce.h" - -#include #include -#include - #include "utils/ms_context.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_reduce_scatter.cc b/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_reduce_scatter.cc index ca38f3e73b3..cbeab9e7e18 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_reduce_scatter.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_all_reduce_scatter.cc @@ -15,11 +15,7 @@ */ #include "backend/kernel_compiler/hccl/hcom_all_reduce_scatter.h" - -#include -#include #include - #include "utils/ms_context.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_util.cc b/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_util.cc index 721c1b6ba04..8dec3669804 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_util.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/hccl/hcom_util.cc @@ -15,9 +15,7 @@ */ #include "backend/kernel_compiler/hccl/hcom_util.h" - #include - #include "backend/kernel_compiler/common_utils.h" #include "backend/session/anf_runtime_algorithm.h" #include "utils/utils.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/kernel_fusion.cc b/mindspore/ccsrc/backend/kernel_compiler/kernel_fusion.cc index d4ef905729c..1bae3fa2257 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/kernel_fusion.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/kernel_fusion.cc @@ -20,8 +20,6 @@ #include #include #include - -#include "utils/ms_utils.h" #include "backend/kernel_compiler/tbe/tbe_kernel_build.h" #include "backend/kernel_compiler/tbe/tbe_kernel_parallel_build.h" #include "backend/kernel_compiler/tbe/tbe_utils.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/oplib/oplib.cc b/mindspore/ccsrc/backend/kernel_compiler/oplib/oplib.cc index 9f3099c415e..0d955540b7d 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/oplib/oplib.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/oplib/oplib.cc @@ -16,7 +16,6 @@ #include "backend/kernel_compiler/oplib/oplib.h" #include -#include #include #include #include diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc index 49666293b4c..7e98fb5994f 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc @@ -15,11 +15,8 @@ */ #include "backend/kernel_compiler/rts/assign.h" - #include - #include "runtime/mem.h" -#include "utils/ms_utils.h" using ge::model_runner::MemcpyAsyncTaskInfo; using MemcpyAsyncTaskInfoPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/label_goto.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/label_goto.cc index 81c85bd370f..eb651b34667 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/label_goto.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/label_goto.cc @@ -15,12 +15,10 @@ */ #include "backend/kernel_compiler/rts/label_goto.h" -#include #include #include "runtime/stream.h" #include "framework/ge_runtime/task_info.h" #include "backend/session/anf_runtime_algorithm.h" -#include "utils/ms_utils.h" using ge::model_runner::LabelGotoTaskInfo; using LabelGotoTaskInfoPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/label_set.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/label_set.cc index 5945dc9f528..69f0bfd49b4 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/label_set.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/label_set.cc @@ -15,12 +15,10 @@ */ #include "backend/kernel_compiler/rts/label_set.h" -#include #include #include "runtime/stream.h" #include "framework/ge_runtime/task_info.h" #include "backend/session/anf_runtime_algorithm.h" -#include "utils/ms_utils.h" using ge::model_runner::LabelSetTaskInfo; using LabelSetTaskInfoPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/label_switch.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/label_switch.cc index 8e065693706..e5315efc6b7 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/label_switch.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/label_switch.cc @@ -15,13 +15,11 @@ */ #include "backend/kernel_compiler/rts/label_switch.h" -#include #include #include #include "runtime/stream.h" #include "framework/ge_runtime/task_info.h" #include "backend/session/anf_runtime_algorithm.h" -#include "utils/ms_utils.h" using ge::model_runner::LabelSwitchTaskInfo; using LabelSwitchTaskInfoPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/memcpy_async.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/memcpy_async.cc index 9546f38e6b6..fa175b3805b 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/memcpy_async.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/memcpy_async.cc @@ -15,12 +15,9 @@ */ #include "backend/kernel_compiler/rts/memcpy_async.h" - #include #include - #include "runtime/mem.h" -#include "utils/ms_utils.h" #include "backend/session/anf_runtime_algorithm.h" #include "common/trans.h" #include "utils/ms_context.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/profiling_kernel_mod.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/profiling_kernel_mod.cc index 8213468b488..e9548481e6c 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/profiling_kernel_mod.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/profiling_kernel_mod.cc @@ -15,11 +15,9 @@ */ #include "backend/kernel_compiler/rts/profiling_kernel_mod.h" - #include #include #include - #include "framework/ge_runtime/task_info.h" #include "runtime/device/ascend/profiling/profiling_utils.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/recv.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/recv.cc index d51663f3544..1661ecf256d 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/recv.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/recv.cc @@ -18,15 +18,12 @@ #include #include "runtime/stream.h" #include "utils/ms_context.h" -#include "runtime/device/ascend/ascend_stream_assign.h" #include "framework/ge_runtime/task_info.h" #include "backend/session/anf_runtime_algorithm.h" -#include "utils/ms_utils.h" namespace mindspore { namespace kernel { using ge::model_runner::EventWaitTaskInfo; -using mindspore::device::ascend::AscendStreamAssign; using EventWaitTaskInfoPtr = std::shared_ptr; RecvKernel::RecvKernel() { event_id_ = 0; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/rt_kernel_build.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/rt_kernel_build.cc index 9704a9b97fe..bf181c487e6 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/rt_kernel_build.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/rt_kernel_build.cc @@ -15,12 +15,9 @@ */ #include "backend/kernel_compiler/rts/rt_kernel_build.h" - #include #include -#include #include - #include "backend/kernel_compiler/rts/rt_kernel.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/send.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/send.cc index 3b284abe306..53081f47918 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/send.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/send.cc @@ -19,7 +19,6 @@ #include "runtime/event.h" #include "framework/ge_runtime/task_info.h" #include "backend/session/anf_runtime_algorithm.h" -#include "utils/ms_utils.h" using ge::model_runner::EventRecordTaskInfo; using EventRecordTaskInfoPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/stream_active.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/stream_active.cc index 4e48366f452..77b80346d15 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/stream_active.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/stream_active.cc @@ -15,12 +15,10 @@ */ #include "backend/kernel_compiler/rts/stream_active.h" -#include #include #include "runtime/stream.h" #include "framework/ge_runtime/task_info.h" #include "backend/session/anf_runtime_algorithm.h" -#include "utils/ms_utils.h" using ge::model_runner::StreamActiveTaskInfo; using StreamActiveTaskInfoPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/stream_switch.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/stream_switch.cc index aecd7f69e8e..e5947487546 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/stream_switch.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/stream_switch.cc @@ -15,14 +15,11 @@ */ #include "backend/kernel_compiler/rts/stream_switch.h" - #include #include - #include "runtime/stream.h" #include "framework/ge_runtime/task_info.h" #include "backend/session/anf_runtime_algorithm.h" -#include "utils/ms_utils.h" using ge::model_runner::StreamSwitchTaskInfo; using StreamSwitchTaskInfoPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_parallel_build.cc b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_parallel_build.cc index 7a625268d36..9a079dd3e0c 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_parallel_build.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_parallel_build.cc @@ -17,7 +17,6 @@ #include "backend/kernel_compiler/tbe/tbe_kernel_parallel_build.h" #include -#include #include #include #include diff --git a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_select.cc b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_select.cc index ec176e324d2..9f19de9d013 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_select.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_select.cc @@ -23,7 +23,6 @@ #include "backend/kernel_compiler/oplib/oplib.h" #include "backend/kernel_compiler/tbe/tbe_kernel_build.h" #include "nlohmann/json.hpp" -#include "utils/ms_context.h" #include "backend/optimizer/common/helper.h" #include "backend/kernel_compiler/tbe/tbe_convert_utils.h" #include "frontend/parallel/ops_info/ops_utils.h" diff --git a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_utils.cc b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_utils.cc index b68d30633d4..3d01a1b3fd2 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_utils.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_utils.cc @@ -16,26 +16,19 @@ #include "backend/kernel_compiler/tbe/tbe_utils.h" -#include #include -#include #include -#include #include #include #include #include #include "runtime/kernel.h" -#include "backend/kernel_compiler/oplib/oplib.h" #include "utils/utils.h" -#include "backend/session/anf_runtime_algorithm.h" #include "utils/ms_utils.h" -#include "runtime/device/kernel_info.h" #include "ir/dtype/type.h" #include "backend/kernel_compiler/tbe/tbe_convert_utils.h" #include "securec/include/securec.h" -#include "frontend/operator/ops.h" namespace mindspore { namespace kernel { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc b/mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc index 4dde74a5afd..0b137c6fc97 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ascend_backend_optimization.cc @@ -81,7 +81,6 @@ #include "backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.h" #include "backend/optimizer/ascend/buffer_fusion/conv_double_in_fusion_pass.h" #include "backend/optimizer/ascend/buffer_fusion/matmul_eltwise_fusion_pass.h" -#include "backend/optimizer/ascend/buffer_fusion/matmul_confusiontranspose_fusion_pass.h" #include "backend/optimizer/ascend/buffer_fusion/depthwiseconv_eltwise_fusion_pass.h" #include "backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_fusion_pass.h" #include "backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_eltwise_fusion_pass.cc index eecda879d96..dbaef1805b3 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_eltwise_fusion_pass.cc @@ -17,7 +17,6 @@ #include #include #include -#include #include "backend/kernel_compiler/kernel_fusion.h" #include "debug/anf_ir_dump.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_fusion_pass.cc index d9d6c37848f..c3967f39633 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_fusion_pass.cc @@ -17,7 +17,6 @@ #include #include #include -#include #include "backend/kernel_compiler/kernel_fusion.h" #include "debug/anf_ir_dump.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_double_in_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_double_in_fusion_pass.cc index d5626bc4145..50d93dd0191 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_double_in_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_double_in_fusion_pass.cc @@ -17,7 +17,6 @@ #include #include #include -#include #include "backend/kernel_compiler/kernel_fusion.h" #include "debug/anf_ir_dump.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.cc index f31d8f75807..d58ce8aa483 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.cc @@ -17,7 +17,6 @@ #include #include #include -#include #include "backend/kernel_compiler/kernel_fusion.h" #include "debug/anf_ir_dump.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/eltwise_fusion_pass.cc index 865ae57349d..e899561eb97 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/eltwise_fusion_pass.cc @@ -17,7 +17,6 @@ #include #include #include -#include #include "backend/kernel_compiler/kernel_fusion.h" #include "debug/anf_ir_dump.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_eltwise_fusion_pass.cc index f026bc8b019..472b6a86154 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_eltwise_fusion_pass.cc @@ -17,7 +17,6 @@ #include #include #include -#include #include "backend/kernel_compiler/kernel_fusion.h" #include "debug/anf_ir_dump.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/multi_output_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/multi_output_fusion_pass.cc index bf29ca14161..038ed424aea 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/multi_output_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/multi_output_fusion_pass.cc @@ -17,7 +17,6 @@ #include #include #include -#include #include "backend/kernel_compiler/kernel_fusion.h" #include "debug/anf_ir_dump.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/reduce_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/reduce_eltwise_fusion_pass.cc index 741be4835de..4a74e6b7ad6 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/reduce_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/reduce_eltwise_fusion_pass.cc @@ -18,7 +18,6 @@ #include #include #include -#include #include "backend/kernel_compiler/kernel_fusion.h" #include "debug/anf_ir_dump.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/segment_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/segment_eltwise_fusion_pass.cc index 6ae541c8cb7..1b8c0159945 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/segment_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/segment_eltwise_fusion_pass.cc @@ -17,7 +17,6 @@ #include #include #include -#include #include "backend/kernel_compiler/kernel_fusion.h" #include "debug/anf_ir_dump.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.cc index 353d3f080a1..62ec43e19bc 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.cc @@ -15,9 +15,7 @@ */ #include "backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.h" #include -#include #include -#include #include #include #include diff --git a/mindspore/ccsrc/backend/optimizer/ascend/enhancer/insert_memcpy_async_for_cascade.cc b/mindspore/ccsrc/backend/optimizer/ascend/enhancer/insert_memcpy_async_for_cascade.cc index 0f1946926d8..4d6079575c9 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/enhancer/insert_memcpy_async_for_cascade.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/enhancer/insert_memcpy_async_for_cascade.cc @@ -15,8 +15,6 @@ */ #include "backend/optimizer/ascend/enhancer/insert_memcpy_async_for_cascade.h" #include -#include -#include #include "utils/utils.h" #include "backend/session/anf_runtime_algorithm.h" #include "frontend/optimizer/opt.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/enhancer/insert_pad_for_nms_with_mask.cc b/mindspore/ccsrc/backend/optimizer/ascend/enhancer/insert_pad_for_nms_with_mask.cc index be61833fe4c..9adbf945017 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/enhancer/insert_pad_for_nms_with_mask.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/enhancer/insert_pad_for_nms_with_mask.cc @@ -16,14 +16,10 @@ #include "backend/optimizer/ascend/enhancer/insert_pad_for_nms_with_mask.h" #include -#include #include -#include "backend/optimizer/ascend/ascend_helper.h" #include "backend/optimizer/common/helper.h" #include "backend/session/anf_runtime_algorithm.h" #include "utils/utils.h" -#include "runtime/device/kernel_info.h" -#include "backend/kernel_compiler//oplib/oplib.h" #include "frontend/operator/ops.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/format_type/chang_axis_of_reduce_kernel.cc b/mindspore/ccsrc/backend/optimizer/ascend/format_type/chang_axis_of_reduce_kernel.cc index 03c4618069f..1e7cd4c710a 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/format_type/chang_axis_of_reduce_kernel.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/format_type/chang_axis_of_reduce_kernel.cc @@ -22,7 +22,6 @@ #include "utils/utils.h" #include "backend/session/anf_runtime_algorithm.h" -#include "utils/ms_utils.h" #include "backend/kernel_compiler/common_utils.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/format_type/check_consistency.cc b/mindspore/ccsrc/backend/optimizer/ascend/format_type/check_consistency.cc index 24fda289ac6..19d8572f44d 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/format_type/check_consistency.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/format_type/check_consistency.cc @@ -21,7 +21,6 @@ #include "utils/utils.h" #include "backend/session/anf_runtime_algorithm.h" -#include "utils/ms_utils.h" #include "backend/kernel_compiler/common_utils.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_unsupported_transnode_to_aicpu.cc b/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_unsupported_transnode_to_aicpu.cc index 48948dca06e..7b4e525c352 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_unsupported_transnode_to_aicpu.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_unsupported_transnode_to_aicpu.cc @@ -18,7 +18,7 @@ #include #include "backend/session/anf_runtime_algorithm.h" #include "backend/kernel_compiler/kernel_build_info.h" -#include "backend/kernel_compiler/kernel_query.h" + namespace mindspore { namespace opt { const BaseRef ConvertUnSupportNodeToAICPU::DefinePattern() const { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/format_type/insert_cast.cc b/mindspore/ccsrc/backend/optimizer/ascend/format_type/insert_cast.cc index 11dcb148556..0029d6c56d7 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/format_type/insert_cast.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/format_type/insert_cast.cc @@ -19,12 +19,9 @@ #include #include #include - -#include "runtime/device/kernel_info.h" #include "backend/optimizer/ascend/ascend_helper.h" #include "backend/optimizer/common/helper.h" #include "backend/kernel_compiler/kernel_build_info.h" -#include "backend/kernel_compiler/oplib/oplib.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/session/kernel_graph.h" #include "utils/utils.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/format_type/insert_trans_op.cc b/mindspore/ccsrc/backend/optimizer/ascend/format_type/insert_trans_op.cc index 9788db6773f..88afc5444e4 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/format_type/insert_trans_op.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/format_type/insert_trans_op.cc @@ -20,8 +20,6 @@ #include "utils/utils.h" #include "backend/optimizer/ascend/ascend_helper.h" #include "backend/session/anf_runtime_algorithm.h" -#include "runtime/device/kernel_info.h" -#include "backend/kernel_compiler/oplib/oplib.h" #include "utils/ms_context.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/format_type/insert_transdata_for_runop.cc b/mindspore/ccsrc/backend/optimizer/ascend/format_type/insert_transdata_for_runop.cc index d0b92b250da..a8289d8fef4 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/format_type/insert_transdata_for_runop.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/format_type/insert_transdata_for_runop.cc @@ -19,8 +19,6 @@ #include "utils/utils.h" #include "backend/optimizer/ascend/ascend_helper.h" #include "backend/session/anf_runtime_algorithm.h" -#include "runtime/device/kernel_info.h" -#include "backend/kernel_compiler/oplib/oplib.h" namespace mindspore { namespace opt { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/batch_norm_grad_split.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/batch_norm_grad_split.cc index 260fc90f3d1..e63249ca465 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/batch_norm_grad_split.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/batch_norm_grad_split.cc @@ -16,12 +16,10 @@ #include "backend/optimizer/ascend/ir_fission/batch_norm_grad_split.h" #include -#include #include #include "utils/utils.h" #include "utils/ms_context.h" -#include "utils/ms_utils.h" #include "backend/optimizer/common/helper.h" #include "runtime/device/kernel_info.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_grad_split.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_grad_split.cc index 45d629d4e90..72ad8d4366a 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_grad_split.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_grad_split.cc @@ -16,12 +16,10 @@ #include "backend/optimizer/ascend/ir_fission/bn_grad_split.h" #include -#include #include #include "utils/utils.h" #include "utils/ms_context.h" -#include "utils/ms_utils.h" #include "backend/optimizer/common/helper.h" #include "runtime/device/kernel_info.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_split.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_split.cc index 8ebbd9269ae..36dc2a30dad 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_split.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/bn_split.cc @@ -16,7 +16,6 @@ #include "backend/optimizer/ascend/ir_fission/bn_split.h" #include -#include #include #include "utils/utils.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/layer_norm_grad_split.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/layer_norm_grad_split.cc index 15e04b697b4..1eca3298e44 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/layer_norm_grad_split.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/layer_norm_grad_split.cc @@ -17,12 +17,10 @@ #include #include -#include #include "backend/session/anf_runtime_algorithm.h" #include "runtime/device/kernel_info.h" #include "ir/primitive.h" -#include "utils/ms_utils.h" #include "utils/utils.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/single_batch_norm_fission.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/single_batch_norm_fission.cc index 133d51734fe..9df2de87ce6 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/single_batch_norm_fission.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/single_batch_norm_fission.cc @@ -16,7 +16,6 @@ #include "backend/optimizer/ascend/ir_fission/single_batch_norm_fission.h" #include #include -#include #include "backend/session/anf_runtime_algorithm.h" #include "backend/optimizer/common/helper.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/add_input_to_output.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/add_input_to_output.cc index cc58d2b0574..52b2deaa4ef 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/add_input_to_output.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/add_input_to_output.cc @@ -15,7 +15,6 @@ */ #include "backend/optimizer/ascend/ir_fusion/add_input_to_output.h" #include -#include #include "backend/optimizer/ascend/ir_fusion/input_to_output_registry.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/kernel_compiler/oplib/oplib.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/clip_by_norm_no_div_square_sum_fusion.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/clip_by_norm_no_div_square_sum_fusion.cc index de6d4e6a51b..6b123c63598 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/clip_by_norm_no_div_square_sum_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/clip_by_norm_no_div_square_sum_fusion.cc @@ -17,11 +17,9 @@ #include #include -#include #include "backend/session/anf_runtime_algorithm.h" #include "ir/primitive.h" -#include "utils/ms_utils.h" #include "utils/utils.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.cc index fd9fd31f12f..e55f16e9915 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.cc @@ -17,10 +17,7 @@ #include "backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.h" #include #include -#include #include -#include -#include "backend/session/anf_runtime_algorithm.h" #include "utils/utils.h" #include "backend/optimizer/common/helper.h" #include "frontend/operator/ops.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_update_with_lr_rule_fusion.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_update_with_lr_rule_fusion.cc index 550301dc114..fcdb4763f1d 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_update_with_lr_rule_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_update_with_lr_rule_fusion.cc @@ -17,11 +17,9 @@ #include #include -#include #include "backend/session/anf_runtime_algorithm.h" #include "ir/primitive.h" -#include "utils/ms_utils.h" #include "utils/utils.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_update_with_lr_v2.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_update_with_lr_v2.cc index 59511a611ac..ccbfd3adb9c 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_update_with_lr_v2.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_update_with_lr_v2.cc @@ -17,7 +17,6 @@ #include "backend/optimizer/ascend/ir_fusion/lamb_update_with_lr_v2.h" #include #include -#include #include "utils/utils.h" #include "frontend/operator/ops.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/mul_add_fusion.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/mul_add_fusion.cc index 2d766891a06..c0b9f634133 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/mul_add_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/mul_add_fusion.cc @@ -17,7 +17,6 @@ #include #include #include -#include #include #include "backend/session/anf_runtime_algorithm.h" #include "frontend/optimizer/opt.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/mul_addn_fusion.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/mul_addn_fusion.cc index 3567864e2fe..73f4e612410 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/mul_addn_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/mul_addn_fusion.cc @@ -16,8 +16,6 @@ #include "backend/optimizer/ascend/ir_fusion/mul_addn_fusion.h" #include #include -#include -#include #include #include "backend/session/anf_runtime_algorithm.h" #include "frontend/optimizer/opt.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/parameter_and_transop_fusion.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/parameter_and_transop_fusion.cc index 0c2667e4d9d..73ec39cffa2 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/parameter_and_transop_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/parameter_and_transop_fusion.cc @@ -21,8 +21,6 @@ #include "frontend/operator/ops.h" #include "runtime/device/kernel_info.h" #include "backend/optimizer/common/helper.h" -#include "backend/optimizer/common/optimizer.h" -#include "backend/optimizer/ascend/ascend_helper.h" namespace mindspore { namespace opt { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/refresh_parameter_format.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/refresh_parameter_format.cc index ebaa429ebfe..b6b179b6250 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/refresh_parameter_format.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/refresh_parameter_format.cc @@ -17,11 +17,8 @@ #include "backend/optimizer/ascend/ir_fusion/refresh_parameter_format.h" #include "backend/session/anf_runtime_algorithm.h" #include "utils/utils.h" -#include "frontend/operator/ops.h" #include "runtime/device/kernel_info.h" #include "backend/optimizer/common/helper.h" -#include "backend/optimizer/common/optimizer.h" -#include "backend/optimizer/ascend/ascend_helper.h" namespace mindspore { namespace opt { diff --git a/mindspore/ccsrc/backend/optimizer/common/node_pass.cc b/mindspore/ccsrc/backend/optimizer/common/node_pass.cc index 16f5284a570..581361f4ae3 100644 --- a/mindspore/ccsrc/backend/optimizer/common/node_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/common/node_pass.cc @@ -17,8 +17,6 @@ #include #include -#include - #include "ir/anf.h" #include "ir/func_graph.h" #include "ir/manager.h" diff --git a/mindspore/ccsrc/backend/optimizer/common/pass_manager.cc b/mindspore/ccsrc/backend/optimizer/common/pass_manager.cc index a0f03e85ad5..fc83d3e389a 100644 --- a/mindspore/ccsrc/backend/optimizer/common/pass_manager.cc +++ b/mindspore/ccsrc/backend/optimizer/common/pass_manager.cc @@ -16,15 +16,11 @@ #include "backend/optimizer/common/pass_manager.h" #include -#include #include #include -#include - #include "ir/anf.h" #include "ir/func_graph.h" #include "ir/manager.h" -#include "utils/utils.h" #include "utils/ms_context.h" #include "debug/anf_ir_dump.h" diff --git a/mindspore/ccsrc/backend/optimizer/common/pattern_engine.cc b/mindspore/ccsrc/backend/optimizer/common/pattern_engine.cc index bd4efd82ef8..21da6b3fb72 100644 --- a/mindspore/ccsrc/backend/optimizer/common/pattern_engine.cc +++ b/mindspore/ccsrc/backend/optimizer/common/pattern_engine.cc @@ -21,10 +21,7 @@ #include #include #include -#include - #include "frontend/optimizer/opt.h" - #include "ir/anf.h" #include "utils/convert_utils_base.h" #include "utils/overload.h" diff --git a/mindspore/ccsrc/backend/optimizer/common/visit.cc b/mindspore/ccsrc/backend/optimizer/common/visit.cc index d0b52609f82..46b5b394970 100644 --- a/mindspore/ccsrc/backend/optimizer/common/visit.cc +++ b/mindspore/ccsrc/backend/optimizer/common/visit.cc @@ -21,8 +21,6 @@ #include #include #include -#include - #include "backend/optimizer/common/pattern_engine.h" #include "utils/any.h" #include "ir/anf.h" diff --git a/mindspore/ccsrc/backend/optimizer/mem_reuse/kernel_refcount.cc b/mindspore/ccsrc/backend/optimizer/mem_reuse/kernel_refcount.cc index b531b0caa58..bc7ad78d70b 100644 --- a/mindspore/ccsrc/backend/optimizer/mem_reuse/kernel_refcount.cc +++ b/mindspore/ccsrc/backend/optimizer/mem_reuse/kernel_refcount.cc @@ -15,7 +15,7 @@ */ #include "backend/optimizer/mem_reuse/kernel_refcount.h" #include -#include "utils/log_adapter.h" + namespace mindspore { namespace memreuse { /** diff --git a/mindspore/ccsrc/backend/optimizer/pass/add_atomic_clean.cc b/mindspore/ccsrc/backend/optimizer/pass/add_atomic_clean.cc index 1bdf464d266..e5612aefa5b 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/add_atomic_clean.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/add_atomic_clean.cc @@ -17,10 +17,8 @@ #include "backend/optimizer/pass/add_atomic_clean.h" #include #include -#include #include "frontend/operator/ops.h" #include "utils/utils.h" -#include "ir/graph_utils.h" #include "utils/log_adapter.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/session/kernel_graph.h" diff --git a/mindspore/ccsrc/backend/optimizer/pass/convert_const_input_to_attr.cc b/mindspore/ccsrc/backend/optimizer/pass/convert_const_input_to_attr.cc index 153faea9271..afc1a5e9e25 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/convert_const_input_to_attr.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/convert_const_input_to_attr.cc @@ -17,7 +17,6 @@ #include #include -#include #include #include "backend/optimizer/pass/const_input_to_attr_registry.h" diff --git a/mindspore/ccsrc/backend/optimizer/pass/convert_const_input_to_tensor_input.cc b/mindspore/ccsrc/backend/optimizer/pass/convert_const_input_to_tensor_input.cc index c4e3f38befc..05e86cdff79 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/convert_const_input_to_tensor_input.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/convert_const_input_to_tensor_input.cc @@ -24,7 +24,6 @@ #include "backend/session/anf_runtime_algorithm.h" #include "backend/session/kernel_graph.h" #include "backend/kernel_compiler/common_utils.h" -#include "runtime/device/kernel_info.h" namespace mindspore { namespace opt { diff --git a/mindspore/ccsrc/backend/optimizer/pass/convert_const_scalar_to_tensor.cc b/mindspore/ccsrc/backend/optimizer/pass/convert_const_scalar_to_tensor.cc index 61825854bcf..7ba67ff74f2 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/convert_const_scalar_to_tensor.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/convert_const_scalar_to_tensor.cc @@ -14,18 +14,12 @@ * limitations under the License. */ #include "backend/optimizer/pass/convert_const_scalar_to_tensor.h" - -#include #include #include - -#include "ir/graph_utils.h" #include "utils/convert_utils.h" #include "backend/optimizer/common/helper.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/session/kernel_graph.h" -#include "backend/kernel_compiler/common_utils.h" -#include "runtime/device/kernel_info.h" namespace mindspore { namespace opt { diff --git a/mindspore/ccsrc/backend/optimizer/pass/eliminate_redundant_op.cc b/mindspore/ccsrc/backend/optimizer/pass/eliminate_redundant_op.cc index 3ef912bcecd..a22045f8471 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/eliminate_redundant_op.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/eliminate_redundant_op.cc @@ -18,7 +18,6 @@ #include #include #include -#include #include "backend/session/anf_runtime_algorithm.h" #include "utils/utils.h" #include "backend/optimizer/common/helper.h" diff --git a/mindspore/ccsrc/backend/optimizer/pass/fuse_basic.cc b/mindspore/ccsrc/backend/optimizer/pass/fuse_basic.cc index 59f7e0f4018..1deb1bc96aa 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/fuse_basic.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/fuse_basic.cc @@ -25,12 +25,10 @@ #include #include "frontend/operator/ops.h" -#include "utils/utils.h" #include "ir/graph_utils.h" #include "backend/optimizer/common/helper.h" #include "backend/session/anf_runtime_algorithm.h" #include "vm/segment_runner.h" -#include "debug/draw.h" #include "debug/anf_ir_dump.h" #include "ir/func_graph_cloner.h" diff --git a/mindspore/ccsrc/backend/optimizer/pass/fuse_graph_kernel.cc b/mindspore/ccsrc/backend/optimizer/pass/fuse_graph_kernel.cc index 238d3573d0f..a564f0edff2 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/fuse_graph_kernel.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/fuse_graph_kernel.cc @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -31,7 +30,6 @@ #include "backend/optimizer/common/helper.h" #include "backend/session/anf_runtime_algorithm.h" #include "vm/segment_runner.h" -#include "debug/draw.h" #include "debug/anf_ir_dump.h" #include "ir/func_graph_cloner.h" diff --git a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc index 06c1219e93d..ab40932c71c 100644 --- a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc @@ -27,7 +27,6 @@ #include "backend/optimizer/common/helper.h" #include "backend/kernel_compiler/kernel.h" #include "backend/kernel_compiler/kernel_build_info.h" -#include "utils/ms_utils.h" #include "common/trans.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/session/ascend_inference_session.cc b/mindspore/ccsrc/backend/session/ascend_inference_session.cc index 2168b9f15fd..267cd9331c6 100644 --- a/mindspore/ccsrc/backend/session/ascend_inference_session.cc +++ b/mindspore/ccsrc/backend/session/ascend_inference_session.cc @@ -16,7 +16,6 @@ #include #include "backend/session/ascend_inference_session.h" -#include "frontend/operator/ops.h" #include "ir/tensor.h" #include "ir/anf.h" #include "ir/param_info.h" diff --git a/mindspore/ccsrc/backend/session/ascend_session.cc b/mindspore/ccsrc/backend/session/ascend_session.cc index 84fc142516f..298a5bbbd2c 100644 --- a/mindspore/ccsrc/backend/session/ascend_session.cc +++ b/mindspore/ccsrc/backend/session/ascend_session.cc @@ -28,17 +28,13 @@ #include "runtime/device/ascend/kernel_select_ascend.h" #include "runtime/device/ascend/kernel_build_ascend.h" #include "runtime/device/ascend/ascend_kernel_runtime.h" -#include "runtime/device/ascend/ascend_device_address.h" #include "backend/optimizer/ascend/ascend_backend_optimization.h" #include "backend/optimizer/common/common_backend_optimization.h" #include "runtime/device/kernel_adjust.h" #include "runtime/device/ascend/ascend_stream_assign.h" -#include "runtime/device/ascend/ascend_label_assign.h" #include "backend/session/anf_runtime_algorithm.h" -#include "ir/scalar.h" #include "debug/anf_ir_dump.h" #include "debug/anf_ir_utils.h" -#include "debug/draw.h" #include "utils/ms_utils.h" #include "backend/optimizer/common/helper.h" #include "runtime/device/kernel_runtime_manager.h" diff --git a/mindspore/ccsrc/backend/session/cpu_session.cc b/mindspore/ccsrc/backend/session/cpu_session.cc index 4ba62e53b78..9a9b404f117 100644 --- a/mindspore/ccsrc/backend/session/cpu_session.cc +++ b/mindspore/ccsrc/backend/session/cpu_session.cc @@ -17,9 +17,7 @@ #include "backend/session/cpu_session.h" #include #include -#include "ir/tensor.h" #include "ir/anf.h" -#include "backend/kernel_compiler/kernel.h" #include "utils/ms_utils.h" #include "backend/session/anf_runtime_algorithm.h" #include "runtime/device/kernel_runtime.h" diff --git a/mindspore/ccsrc/backend/session/kernel_build_client.cc b/mindspore/ccsrc/backend/session/kernel_build_client.cc index e12b6de9bfb..db76b9f020a 100644 --- a/mindspore/ccsrc/backend/session/kernel_build_client.cc +++ b/mindspore/ccsrc/backend/session/kernel_build_client.cc @@ -15,8 +15,6 @@ */ #include "backend/session/kernel_build_client.h" - -#include #include namespace mindspore { diff --git a/mindspore/ccsrc/backend/session/session_basic.cc b/mindspore/ccsrc/backend/session/session_basic.cc index 7028cb79a31..5ae230ed9a7 100644 --- a/mindspore/ccsrc/backend/session/session_basic.cc +++ b/mindspore/ccsrc/backend/session/session_basic.cc @@ -17,7 +17,6 @@ #include #include #include -#include #include "pipeline/jit/parse/data_converter.h" #include "ir/manager.h" #include "ir/param_info.h" @@ -26,9 +25,7 @@ #include "common/trans.h" #include "utils/config_manager.h" #include "backend/session/anf_runtime_algorithm.h" -#include "backend/kernel_compiler/oplib/oplib.h" #include "backend/optimizer/common/common_backend_optimization.h" -#include "backend/optimizer/pass/const_input_to_attr_registry.h" #include "backend/optimizer/common/helper.h" #include "utils/ms_utils.h" #include "ir/dtype.h" diff --git a/mindspore/ccsrc/backend/session/session_factory.cc b/mindspore/ccsrc/backend/session/session_factory.cc index 8a8f9a9cea1..e0a1865020c 100644 --- a/mindspore/ccsrc/backend/session/session_factory.cc +++ b/mindspore/ccsrc/backend/session/session_factory.cc @@ -15,8 +15,8 @@ */ #include "backend/session/session_factory.h" #include -#include #include + namespace mindspore { namespace session { SessionFactory &SessionFactory::Get() { diff --git a/mindspore/ccsrc/debug/anf_ir_dump.cc b/mindspore/ccsrc/debug/anf_ir_dump.cc index a1cc80f96ba..c54df9766ec 100644 --- a/mindspore/ccsrc/debug/anf_ir_dump.cc +++ b/mindspore/ccsrc/debug/anf_ir_dump.cc @@ -20,8 +20,6 @@ #include #include #include -#include - #include "ir/primitive.h" #include "ir/func_graph.h" #include "runtime/device/kernel_info.h" diff --git a/mindspore/ccsrc/debug/anf_ir_utils.cc b/mindspore/ccsrc/debug/anf_ir_utils.cc index 34b2875257a..d2049b1afec 100644 --- a/mindspore/ccsrc/debug/anf_ir_utils.cc +++ b/mindspore/ccsrc/debug/anf_ir_utils.cc @@ -20,9 +20,7 @@ #include #include #include -#include #include - #include "ir/graph_utils.h" #include "utils/symbolic.h" #include "ir/meta_func_graph.h" diff --git a/mindspore/ccsrc/debug/common.cc b/mindspore/ccsrc/debug/common.cc index 1683a3f8033..36b36753387 100644 --- a/mindspore/ccsrc/debug/common.cc +++ b/mindspore/ccsrc/debug/common.cc @@ -21,7 +21,6 @@ #include "utils/system/env.h" #include "utils/system/file_system.h" #include "utils/log_adapter.h" -#include "utils/ms_context.h" namespace mindspore { std::optional Common::GetRealPath(const std::string &input_path) { diff --git a/mindspore/ccsrc/debug/draw.cc b/mindspore/ccsrc/debug/draw.cc index eac616510ad..3305348572b 100644 --- a/mindspore/ccsrc/debug/draw.cc +++ b/mindspore/ccsrc/debug/draw.cc @@ -15,14 +15,10 @@ */ #include "debug/draw.h" - -#include #include #include -#include #include #include - #include "ir/meta_func_graph.h" #include "ir/param_info.h" #include "ir/primitive.h" diff --git a/mindspore/ccsrc/debug/dump_proto.cc b/mindspore/ccsrc/debug/dump_proto.cc index 9594fa6b52b..1f266123ec6 100644 --- a/mindspore/ccsrc/debug/dump_proto.cc +++ b/mindspore/ccsrc/debug/dump_proto.cc @@ -14,11 +14,8 @@ * limitations under the License. */ -#include #include #include -#include -#include #include #include diff --git a/mindspore/ccsrc/frontend/operator/cc_implementations.cc b/mindspore/ccsrc/frontend/operator/cc_implementations.cc index 3526a001f99..e817b420b68 100644 --- a/mindspore/ccsrc/frontend/operator/cc_implementations.cc +++ b/mindspore/ccsrc/frontend/operator/cc_implementations.cc @@ -15,12 +15,10 @@ */ #include "frontend/operator/cc_implementations.h" -#include #include #include #include #include -#include "utils/misc.h" #include "utils/log_adapter.h" #include "utils/convert_utils.h" #include "utils/ms_utils.h" diff --git a/mindspore/ccsrc/frontend/operator/composite/composite.cc b/mindspore/ccsrc/frontend/operator/composite/composite.cc index b7fa2cf67dc..4f0b42952a7 100644 --- a/mindspore/ccsrc/frontend/operator/composite/composite.cc +++ b/mindspore/ccsrc/frontend/operator/composite/composite.cc @@ -32,7 +32,6 @@ #include "frontend/optimizer/opt.h" #include "utils/symbolic.h" #include "pybind_api/api_register.h" -#include "./common.h" #include "ir/signature.h" #include "debug/trace.h" diff --git a/mindspore/ccsrc/frontend/operator/composite/do_signature.cc b/mindspore/ccsrc/frontend/operator/composite/do_signature.cc index b5d17d27644..50554a37b5e 100644 --- a/mindspore/ccsrc/frontend/operator/composite/do_signature.cc +++ b/mindspore/ccsrc/frontend/operator/composite/do_signature.cc @@ -24,7 +24,6 @@ #include "abstract/param_validator.h" #include "frontend/operator/cc_implementations.h" #include "frontend/optimizer/opt.h" -#include "utils/symbolic.h" #include "./common.h" #include "pybind_api/api_register.h" diff --git a/mindspore/ccsrc/frontend/operator/composite/map.cc b/mindspore/ccsrc/frontend/operator/composite/map.cc index f49c19aa9cf..a402d30be6a 100644 --- a/mindspore/ccsrc/frontend/operator/composite/map.cc +++ b/mindspore/ccsrc/frontend/operator/composite/map.cc @@ -28,7 +28,6 @@ #include "pybind_api/api_register.h" #include "debug/trace.h" #include "frontend/operator/ops.h" -#include "./common.h" namespace mindspore { // namespace to support composite operators definition diff --git a/mindspore/ccsrc/frontend/operator/composite/multitype_funcgraph.cc b/mindspore/ccsrc/frontend/operator/composite/multitype_funcgraph.cc index e9418bb5dbb..d7b7e18d4ee 100644 --- a/mindspore/ccsrc/frontend/operator/composite/multitype_funcgraph.cc +++ b/mindspore/ccsrc/frontend/operator/composite/multitype_funcgraph.cc @@ -18,20 +18,13 @@ */ #include "frontend/operator/composite/multitype_funcgraph.h" -#include #include #include -#include "ir/anf.h" -#include "ir/func_graph.h" -#include "abstract/abstract_value.h" #include "abstract/abstract_function.h" #include "abstract/dshape.h" -#include "abstract/param_validator.h" -#include "frontend/operator/cc_implementations.h" #include "frontend/optimizer/opt.h" #include "utils/ms_context.h" -#include "utils/symbolic.h" #include "pybind_api/api_register.h" #include "./common.h" #include "ir/signature.h" diff --git a/mindspore/ccsrc/frontend/operator/composite/unpack_call.cc b/mindspore/ccsrc/frontend/operator/composite/unpack_call.cc index 1b6f358edd5..27f90f76174 100644 --- a/mindspore/ccsrc/frontend/operator/composite/unpack_call.cc +++ b/mindspore/ccsrc/frontend/operator/composite/unpack_call.cc @@ -21,11 +21,9 @@ #include "./common.h" #include "abstract/abstract_value.h" #include "abstract/dshape.h" -#include "abstract/param_validator.h" #include "frontend/operator/cc_implementations.h" #include "ir/anf.h" #include "frontend/optimizer/opt.h" -#include "utils/symbolic.h" #include "pybind_api/api_register.h" namespace mindspore { diff --git a/mindspore/ccsrc/frontend/operator/ops_extends.cc b/mindspore/ccsrc/frontend/operator/ops_extends.cc index c406682c3ec..0cab7fa7f5d 100755 --- a/mindspore/ccsrc/frontend/operator/ops_extends.cc +++ b/mindspore/ccsrc/frontend/operator/ops_extends.cc @@ -15,10 +15,8 @@ */ #include "frontend/operator/ops.h" -#include #include #include "pipeline/jit/parse/python_adapter.h" -#include "pipeline/jit/parse/data_converter.h" namespace mindspore { // namespace to support primitive operators diff --git a/mindspore/ccsrc/frontend/operator/prim_to_function.cc b/mindspore/ccsrc/frontend/operator/prim_to_function.cc index 7b9592e80e2..f135a2c86cb 100644 --- a/mindspore/ccsrc/frontend/operator/prim_to_function.cc +++ b/mindspore/ccsrc/frontend/operator/prim_to_function.cc @@ -15,9 +15,6 @@ */ #include "frontend/operator/prim_to_function.h" -#include -#include -#include namespace mindspore { // namespace to support prim related definition diff --git a/mindspore/ccsrc/frontend/optimizer/ad/adjoint.cc b/mindspore/ccsrc/frontend/optimizer/ad/adjoint.cc index 60ccf28df46..559836e4fb2 100644 --- a/mindspore/ccsrc/frontend/optimizer/ad/adjoint.cc +++ b/mindspore/ccsrc/frontend/optimizer/ad/adjoint.cc @@ -17,7 +17,6 @@ #include "frontend/optimizer/ad/adjoint.h" #include -#include #include "ir/anf.h" #include "frontend/optimizer/ad/dfunctor.h" diff --git a/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.cc b/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.cc index 5b66f5a933a..ceccf0feb9d 100644 --- a/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.cc +++ b/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.cc @@ -21,19 +21,14 @@ #include #include "ir/anf.h" -#include "ir/meta_func_graph.h" #include "utils/info.h" #include "ir/func_graph_cloner.h" #include "ir/manager.h" #include "pipeline/jit/resource.h" -#include "pipeline/jit/parse/parse.h" #include "frontend/optimizer/ad/adjoint.h" -#include "frontend/optimizer/opt.h" #include "frontend/operator/ops.h" -#include "frontend/operator/composite/composite.h" #include "utils/symbolic.h" #include "utils/ms_context.h" -#include "./common.h" namespace mindspore { namespace ad { diff --git a/mindspore/ccsrc/frontend/optimizer/ad/grad.cc b/mindspore/ccsrc/frontend/optimizer/ad/grad.cc index b11d063db64..bc08bd94bfe 100644 --- a/mindspore/ccsrc/frontend/optimizer/ad/grad.cc +++ b/mindspore/ccsrc/frontend/optimizer/ad/grad.cc @@ -19,7 +19,6 @@ #include "ir/func_graph_cloner.h" #include "utils/ms_context.h" #include "utils/symbolic.h" -#include "ir/graph_utils.h" namespace mindspore { namespace ad { diff --git a/mindspore/ccsrc/frontend/optimizer/ad/kprim.cc b/mindspore/ccsrc/frontend/optimizer/ad/kprim.cc index 2d22012aab5..1b669865806 100644 --- a/mindspore/ccsrc/frontend/optimizer/ad/kprim.cc +++ b/mindspore/ccsrc/frontend/optimizer/ad/kprim.cc @@ -27,7 +27,6 @@ #include "pipeline/jit/resource.h" #include "pipeline/jit/parse/parse.h" #include "frontend/optimizer/ad/dfunctor.h" -#include "frontend/optimizer/opt.h" #include "frontend/operator/ops.h" #include "frontend/operator/composite/composite.h" #include "utils/symbolic.h" @@ -36,8 +35,6 @@ #include "utils/info.h" #include "debug/trace.h" -#include "./common.h" - namespace mindspore { namespace ad { using PatternListType = std::initializer_list; diff --git a/mindspore/ccsrc/frontend/optimizer/clean.cc b/mindspore/ccsrc/frontend/optimizer/clean.cc index edff3a8d790..87ae0b78dec 100644 --- a/mindspore/ccsrc/frontend/optimizer/clean.cc +++ b/mindspore/ccsrc/frontend/optimizer/clean.cc @@ -17,11 +17,9 @@ */ #include "frontend/optimizer/clean.h" -#include #include #include #include -#include #include "./common.h" #include "debug/trace.h" #include "frontend/operator/composite/composite.h" diff --git a/mindspore/ccsrc/frontend/optimizer/control_depend.cc b/mindspore/ccsrc/frontend/optimizer/control_depend.cc index 8cc9bdb7f4f..871cc7d0033 100644 --- a/mindspore/ccsrc/frontend/optimizer/control_depend.cc +++ b/mindspore/ccsrc/frontend/optimizer/control_depend.cc @@ -20,7 +20,6 @@ #include #include #include -#include #include "frontend/optimizer/optimizer.h" diff --git a/mindspore/ccsrc/frontend/optimizer/cse.cc b/mindspore/ccsrc/frontend/optimizer/cse.cc index c80b54097d8..9e287103cf9 100644 --- a/mindspore/ccsrc/frontend/optimizer/cse.cc +++ b/mindspore/ccsrc/frontend/optimizer/cse.cc @@ -20,7 +20,6 @@ #include #include #include -#include "./common.h" namespace mindspore { /* namespace to support opt */ diff --git a/mindspore/ccsrc/frontend/optimizer/graph_kernel_reuse.cc b/mindspore/ccsrc/frontend/optimizer/graph_kernel_reuse.cc index 1bc00122d18..ce3a5d434ed 100644 --- a/mindspore/ccsrc/frontend/optimizer/graph_kernel_reuse.cc +++ b/mindspore/ccsrc/frontend/optimizer/graph_kernel_reuse.cc @@ -18,7 +18,6 @@ #include #include #include -#include "./common.h" #include "ir/graph_utils.h" namespace mindspore { diff --git a/mindspore/ccsrc/frontend/optimizer/irpass.cc b/mindspore/ccsrc/frontend/optimizer/irpass.cc index 3cfcfefc927..acdcb93083a 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass.cc +++ b/mindspore/ccsrc/frontend/optimizer/irpass.cc @@ -14,8 +14,6 @@ * limitations under the License. */ -#include - #include "frontend/optimizer/irpass.h" #include "frontend/optimizer/irpass/arithmetic_simplify.h" #include "frontend/optimizer/irpass/branch_culling.h" diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/branch_culling.cc b/mindspore/ccsrc/frontend/optimizer/irpass/branch_culling.cc index dc580f6b633..01f5b2f1ba8 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/branch_culling.cc +++ b/mindspore/ccsrc/frontend/optimizer/irpass/branch_culling.cc @@ -21,7 +21,6 @@ #include #include "ir/func_graph.h" -#include "ir/func_graph_cloner.h" #include "frontend/operator/ops.h" namespace mindspore { diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/cast_eliminate.cc b/mindspore/ccsrc/frontend/optimizer/irpass/cast_eliminate.cc index e556402c44c..069012f77f8 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/cast_eliminate.cc +++ b/mindspore/ccsrc/frontend/optimizer/irpass/cast_eliminate.cc @@ -16,7 +16,6 @@ #include "frontend/optimizer/irpass/cast_eliminate.h" #include "frontend/optimizer/irpass.h" -#include "frontend/optimizer/optimizer.h" #include "frontend/optimizer/anf_visitor.h" #include "frontend/operator/ops.h" #include "ir/func_graph.h" diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/grad_var_prepare.cc b/mindspore/ccsrc/frontend/optimizer/irpass/grad_var_prepare.cc index 89500844370..86cf60f84a3 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/grad_var_prepare.cc +++ b/mindspore/ccsrc/frontend/optimizer/irpass/grad_var_prepare.cc @@ -23,10 +23,8 @@ #include "frontend/operator/composite/composite.h" #include "frontend/operator/ops.h" #include "frontend/optimizer/irpass.h" -#include "frontend/optimizer/optimizer.h" #include "frontend/optimizer/anf_visitor.h" #include "ir/func_graph.h" -#include "ir/func_graph_cloner.h" namespace mindspore { namespace opt { diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/gradient_eliminate.cc b/mindspore/ccsrc/frontend/optimizer/irpass/gradient_eliminate.cc index 0d98cffa371..671ca63a356 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/gradient_eliminate.cc +++ b/mindspore/ccsrc/frontend/optimizer/irpass/gradient_eliminate.cc @@ -16,8 +16,6 @@ #include "frontend/optimizer/irpass/gradient_eliminate.h" -#include - namespace mindspore { namespace opt { namespace irpass { diff --git a/mindspore/ccsrc/frontend/optimizer/opt.cc b/mindspore/ccsrc/frontend/optimizer/opt.cc index 44917106fad..30f186d499d 100644 --- a/mindspore/ccsrc/frontend/optimizer/opt.cc +++ b/mindspore/ccsrc/frontend/optimizer/opt.cc @@ -16,17 +16,14 @@ #include "frontend/optimizer/opt.h" -#include #include #include -#include #include #include "ir/anf.h" #include "ir/manager.h" #include "frontend/optimizer/optimizer.h" #include "utils/log_adapter.h" -#include "utils/ordered_set.h" namespace mindspore { /* namespace to support opt */ diff --git a/mindspore/ccsrc/frontend/optimizer/pattern.cc b/mindspore/ccsrc/frontend/optimizer/pattern.cc index 412c0bdb46c..9011cde7c80 100644 --- a/mindspore/ccsrc/frontend/optimizer/pattern.cc +++ b/mindspore/ccsrc/frontend/optimizer/pattern.cc @@ -15,7 +15,6 @@ */ #include "frontend/optimizer/pattern.h" #include "pybind_api/api_register.h" -#include "pybind_api/export_flags.h" namespace mindspore { namespace opt { diff --git a/mindspore/ccsrc/frontend/optimizer/py_pass.cc b/mindspore/ccsrc/frontend/optimizer/py_pass.cc index d15a53705f8..dc51842bc7c 100644 --- a/mindspore/ccsrc/frontend/optimizer/py_pass.cc +++ b/mindspore/ccsrc/frontend/optimizer/py_pass.cc @@ -16,14 +16,11 @@ #include "frontend/optimizer/py_pass.h" #include #include -#include -#include #include #include "ir/func_graph.h" #include "ir/manager.h" #include "pybind_api/ir/primitive_py.h" -#include "pipeline/jit/parse/parse_base.h" #include "pipeline/jit/resource.h" namespace mindspore { diff --git a/mindspore/ccsrc/frontend/optimizer/py_pass_manager.cc b/mindspore/ccsrc/frontend/optimizer/py_pass_manager.cc index a269788dfe0..ac80136f7e2 100644 --- a/mindspore/ccsrc/frontend/optimizer/py_pass_manager.cc +++ b/mindspore/ccsrc/frontend/optimizer/py_pass_manager.cc @@ -16,9 +16,7 @@ #include "frontend/optimizer/py_pass_manager.h" #include -#include #include -#include #include "ir/manager.h" #include "frontend/optimizer/pass_group.h" diff --git a/mindspore/ccsrc/frontend/parallel/auto_parallel/graph_costmodel.cc b/mindspore/ccsrc/frontend/parallel/auto_parallel/graph_costmodel.cc index 5313062e9c0..1876c94f8fd 100644 --- a/mindspore/ccsrc/frontend/parallel/auto_parallel/graph_costmodel.cc +++ b/mindspore/ccsrc/frontend/parallel/auto_parallel/graph_costmodel.cc @@ -15,7 +15,6 @@ */ #include #include -#include #include #include #include diff --git a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_cost.cc b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_cost.cc index 0a7e6c59d4d..21f073e3496 100644 --- a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_cost.cc +++ b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_cost.cc @@ -17,9 +17,7 @@ #include "frontend/parallel/auto_parallel/rec_core/rec_cost.h" #include -#include #include -#include #include #include #include diff --git a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_partition.cc b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_partition.cc index 97d230a49f7..9b6417a1971 100644 --- a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_partition.cc +++ b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_partition.cc @@ -18,7 +18,6 @@ #include #include -#include #include #include #include diff --git a/mindspore/ccsrc/frontend/parallel/context.cc b/mindspore/ccsrc/frontend/parallel/context.cc index 24c6823a1a0..20445f9c6a7 100644 --- a/mindspore/ccsrc/frontend/parallel/context.cc +++ b/mindspore/ccsrc/frontend/parallel/context.cc @@ -20,11 +20,9 @@ #include #include #include -#include #include #include -#include "utils/ms_utils.h" #include "frontend/parallel/device_manager.h" namespace mindspore { diff --git a/mindspore/ccsrc/frontend/parallel/device_manager.cc b/mindspore/ccsrc/frontend/parallel/device_manager.cc index 3a6f6878c63..a272702cd1e 100644 --- a/mindspore/ccsrc/frontend/parallel/device_manager.cc +++ b/mindspore/ccsrc/frontend/parallel/device_manager.cc @@ -17,8 +17,6 @@ #include "frontend/parallel/device_manager.h" #include -#include -#include #include #include #include diff --git a/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.cc b/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.cc index 7813332765d..91bd3e9dfe6 100644 --- a/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.cc +++ b/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.cc @@ -19,7 +19,6 @@ #include #include #include -#include using mindspore::tensor::Tensor; diff --git a/mindspore/ccsrc/frontend/parallel/graph_util/get_parallel_info.cc b/mindspore/ccsrc/frontend/parallel/graph_util/get_parallel_info.cc index 0f9a9d4f8c3..65742d1d47e 100644 --- a/mindspore/ccsrc/frontend/parallel/graph_util/get_parallel_info.cc +++ b/mindspore/ccsrc/frontend/parallel/graph_util/get_parallel_info.cc @@ -18,10 +18,8 @@ #include #include -#include #include -#include "utils/ms_utils.h" #include "ir/func_graph.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/graph_util/graph_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/graph_util/graph_info.cc b/mindspore/ccsrc/frontend/parallel/graph_util/graph_info.cc index 0121c70d40a..6b360e4a978 100644 --- a/mindspore/ccsrc/frontend/parallel/graph_util/graph_info.cc +++ b/mindspore/ccsrc/frontend/parallel/graph_util/graph_info.cc @@ -18,7 +18,6 @@ #include "debug/anf_ir_dump.h" #include "debug/anf_ir_utils.h" #include "debug/draw.h" -#include "ir/func_graph.h" #include "utils/ms_context.h" #include "ir/graph_utils.h" diff --git a/mindspore/ccsrc/frontend/parallel/group_manager.cc b/mindspore/ccsrc/frontend/parallel/group_manager.cc index 98fca25b3d2..8c707d7fbd1 100644 --- a/mindspore/ccsrc/frontend/parallel/group_manager.cc +++ b/mindspore/ccsrc/frontend/parallel/group_manager.cc @@ -20,7 +20,6 @@ #include #include "frontend/parallel/device_manager.h" -#include "frontend/parallel/ops_info/ops_utils.h" #include "utils/comm_manager.h" namespace mindspore { diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/arithmetic_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/arithmetic_info.cc index 3517bea32f7..5813008d564 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/arithmetic_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/arithmetic_info.cc @@ -17,7 +17,6 @@ #include "frontend/parallel/ops_info/arithmetic_info.h" #include -#include #include #include diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/batch_parallel_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/batch_parallel_info.cc index 5f727ab55ce..51a7b7dd124 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/batch_parallel_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/batch_parallel_info.cc @@ -18,7 +18,6 @@ #include #include -#include #include "ir/value.h" #include "frontend/parallel/device_manager.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.cc index 25d5e721129..fd92069254e 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.cc @@ -17,7 +17,6 @@ #include "frontend/parallel/ops_info/bias_add_info.h" #include -#include #include #include diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/dropout_do_mask_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/dropout_do_mask_info.cc index c17389ae563..91ee4446c78 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/dropout_do_mask_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/dropout_do_mask_info.cc @@ -26,7 +26,6 @@ #include "frontend/parallel/auto_parallel/costmodel.h" #include "frontend/parallel/device_matrix.h" #include "frontend/parallel/strategy.h" -#include "frontend/parallel/tensor_layout/tensor_redistribution.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/l2_normalize_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/l2_normalize_info.cc index 2a513b2d2e7..e0f0155b2c8 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/l2_normalize_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/l2_normalize_info.cc @@ -23,7 +23,6 @@ #include "frontend/parallel/device_matrix.h" #include "frontend/parallel/strategy.h" -#include "frontend/parallel/tensor_layout/tensor_redistribution.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/loss_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/loss_info.cc index 0ef7fa8e4f7..1f472be0a20 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/loss_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/loss_info.cc @@ -17,7 +17,6 @@ #include "frontend/parallel/ops_info/loss_info.h" #include -#include #include #include diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/operator_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/operator_info.cc index 420e7f9f96a..b7ecb7e34b8 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/operator_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/operator_info.cc @@ -17,7 +17,6 @@ #include "frontend/parallel/ops_info/operator_info.h" #include -#include #include #include #include @@ -30,7 +29,6 @@ #include "frontend/parallel/auto_parallel/edge_costmodel.h" #include "frontend/parallel/auto_parallel/graph_costmodel.h" #include "frontend/parallel/context.h" -#include "utils/ms_context.h" #include "utils/log_adapter.h" namespace mindspore { diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/prelu_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/prelu_info.cc index 90513b712f5..48f06cb7fef 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/prelu_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/prelu_info.cc @@ -23,7 +23,6 @@ #include "frontend/parallel/device_manager.h" #include "frontend/parallel/device_matrix.h" #include "frontend/parallel/step_parallel.h" -#include "utils/convert_utils.h" #include "utils/log_adapter.h" namespace mindspore { diff --git a/mindspore/ccsrc/frontend/parallel/ps/optimizer_info_builder.cc b/mindspore/ccsrc/frontend/parallel/ps/optimizer_info_builder.cc index e1d5ffb32ad..03b11c6f415 100644 --- a/mindspore/ccsrc/frontend/parallel/ps/optimizer_info_builder.cc +++ b/mindspore/ccsrc/frontend/parallel/ps/optimizer_info_builder.cc @@ -15,7 +15,6 @@ */ #include "frontend/parallel/ps/optimizer_info_builder.h" -#include #include #include diff --git a/mindspore/ccsrc/frontend/parallel/ps/scheduler.cc b/mindspore/ccsrc/frontend/parallel/ps/scheduler.cc index 04c259487fa..6ce4e4ac89d 100755 --- a/mindspore/ccsrc/frontend/parallel/ps/scheduler.cc +++ b/mindspore/ccsrc/frontend/parallel/ps/scheduler.cc @@ -15,7 +15,6 @@ */ #include "frontend/parallel/ps/scheduler.h" -#include #include "ps/ps.h" namespace mindspore { diff --git a/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc b/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc index 168c84c7503..9a45a05772b 100644 --- a/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc @@ -44,8 +44,6 @@ #include "frontend/parallel/graph_util/node_info.h" #include "frontend/parallel/step_parallel.h" #include "frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h" -#include "pipeline/jit/parse/python_adapter.h" -#include "pipeline/jit/pipeline.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/frontend/parallel/step_parallel.cc b/mindspore/ccsrc/frontend/parallel/step_parallel.cc index bad1dcb9c13..7e7a44ed970 100644 --- a/mindspore/ccsrc/frontend/parallel/step_parallel.cc +++ b/mindspore/ccsrc/frontend/parallel/step_parallel.cc @@ -43,7 +43,6 @@ #include "frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h" #include "utils/comm_manager.h" #include "utils/symbolic.h" -#include "pipeline/jit/static_analysis/prim.h" using mindspore::tensor::Tensor; diff --git a/mindspore/ccsrc/frontend/parallel/tensor_layout/arrangement.cc b/mindspore/ccsrc/frontend/parallel/tensor_layout/arrangement.cc index 565750b9445..40a793b0e3f 100644 --- a/mindspore/ccsrc/frontend/parallel/tensor_layout/arrangement.cc +++ b/mindspore/ccsrc/frontend/parallel/tensor_layout/arrangement.cc @@ -16,7 +16,6 @@ #include "frontend/parallel/tensor_layout/arrangement.h" #include -#include #include #include "utils/ms_utils.h" #include "frontend/parallel/status.h" diff --git a/mindspore/ccsrc/frontend/parallel/tensor_layout/map.cc b/mindspore/ccsrc/frontend/parallel/tensor_layout/map.cc index b6c6904d4e4..d8d5e475122 100644 --- a/mindspore/ccsrc/frontend/parallel/tensor_layout/map.cc +++ b/mindspore/ccsrc/frontend/parallel/tensor_layout/map.cc @@ -16,7 +16,6 @@ #include "frontend/parallel/tensor_layout/map.h" #include -#include #include #include "utils/ms_utils.h" #include "frontend/parallel/status.h" diff --git a/mindspore/ccsrc/frontend/parallel/tensor_layout/shape_util.cc b/mindspore/ccsrc/frontend/parallel/tensor_layout/shape_util.cc index 453ad8066f6..98285585080 100644 --- a/mindspore/ccsrc/frontend/parallel/tensor_layout/shape_util.cc +++ b/mindspore/ccsrc/frontend/parallel/tensor_layout/shape_util.cc @@ -15,7 +15,6 @@ */ #include "frontend/parallel/tensor_layout/shape_util.h" -#include #include "frontend/parallel/status.h" #include "utils/log_adapter.h" diff --git a/mindspore/ccsrc/frontend/parallel/tensor_layout/tensor_layout.cc b/mindspore/ccsrc/frontend/parallel/tensor_layout/tensor_layout.cc index 203b4f9958c..f3f548738dc 100644 --- a/mindspore/ccsrc/frontend/parallel/tensor_layout/tensor_layout.cc +++ b/mindspore/ccsrc/frontend/parallel/tensor_layout/tensor_layout.cc @@ -21,7 +21,6 @@ #include "ir/value.h" #include "frontend/parallel/device_matrix.h" #include "frontend/parallel/status.h" -#include "frontend/parallel/tensor_layout/array.h" #include "frontend/parallel/tensor_layout/shape_util.h" #include "utils/log_adapter.h" diff --git a/mindspore/ccsrc/frontend/parallel/tensor_layout/tensor_redistribution.cc b/mindspore/ccsrc/frontend/parallel/tensor_layout/tensor_redistribution.cc index 87d385c81bf..d03dceead6a 100644 --- a/mindspore/ccsrc/frontend/parallel/tensor_layout/tensor_redistribution.cc +++ b/mindspore/ccsrc/frontend/parallel/tensor_layout/tensor_redistribution.cc @@ -15,7 +15,6 @@ */ #include "frontend/parallel/tensor_layout/tensor_redistribution.h" -#include #include #include #include "utils/ms_utils.h" diff --git a/mindspore/ccsrc/pipeline/jit/init.cc b/mindspore/ccsrc/pipeline/jit/init.cc index 94a0a5f92a7..3b96b8938e0 100644 --- a/mindspore/ccsrc/pipeline/jit/init.cc +++ b/mindspore/ccsrc/pipeline/jit/init.cc @@ -15,12 +15,10 @@ */ #include -#include #include "backend/kernel_compiler/oplib/oplib.h" #include "backend/kernel_compiler/oplib/oploader.h" #include "pipeline/jit/pipeline.h" #include "frontend/operator/composite/composite.h" -#include "ir/signature.h" #include "pipeline/pynative/pynative_execute.h" #include "utils/symbolic.h" #include "pybind_api/api_register.h" @@ -29,7 +27,6 @@ #include "utils/config_manager.h" #include "utils/mpi/mpi_config.h" #include "frontend/parallel/context.h" -#include "frontend/parallel/device_manager.h" #include "frontend/parallel/costmodel_context.h" #ifdef ENABLE_GPU_COLLECTIVE #include "runtime/device/gpu/distribution/collective_init.h" diff --git a/mindspore/ccsrc/pipeline/jit/parse/data_converter.cc b/mindspore/ccsrc/pipeline/jit/parse/data_converter.cc index 13dfaeb6c07..f9be48c85d1 100644 --- a/mindspore/ccsrc/pipeline/jit/parse/data_converter.cc +++ b/mindspore/ccsrc/pipeline/jit/parse/data_converter.cc @@ -18,12 +18,10 @@ #include "pipeline/jit/parse/data_converter.h" #include -#include #include #include #include #include -#include #include "pipeline/jit/parse/resolve.h" #include "pipeline/jit/parse/python_adapter.h" #include "frontend/operator/ops.h" @@ -31,8 +29,6 @@ #include "ir/func_graph_cloner.h" #include "utils/symbolic.h" #include "utils/ms_context.h" -#include "debug/trace.h" -#include "frontend/optimizer/ad/grad.h" namespace mindspore { namespace parse { diff --git a/mindspore/ccsrc/pipeline/jit/parse/function_block.cc b/mindspore/ccsrc/pipeline/jit/parse/function_block.cc index e424c9c2f7b..1b342f5ec34 100644 --- a/mindspore/ccsrc/pipeline/jit/parse/function_block.cc +++ b/mindspore/ccsrc/pipeline/jit/parse/function_block.cc @@ -19,7 +19,6 @@ #include "pipeline/jit/parse/function_block.h" #include #include -#include #include "pipeline/jit/parse/resolve.h" #include "pipeline/jit/parse/parse.h" #include "frontend/operator/ops.h" diff --git a/mindspore/ccsrc/pipeline/jit/parse/parse.cc b/mindspore/ccsrc/pipeline/jit/parse/parse.cc index be75d6ac2e6..4ef457427d9 100644 --- a/mindspore/ccsrc/pipeline/jit/parse/parse.cc +++ b/mindspore/ccsrc/pipeline/jit/parse/parse.cc @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include "pipeline/jit/parse/resolve.h" diff --git a/mindspore/ccsrc/pipeline/jit/parse/python_adapter.cc b/mindspore/ccsrc/pipeline/jit/parse/python_adapter.cc index 17be74b2a1f..b4b4b1b1afc 100644 --- a/mindspore/ccsrc/pipeline/jit/parse/python_adapter.cc +++ b/mindspore/ccsrc/pipeline/jit/parse/python_adapter.cc @@ -15,7 +15,6 @@ */ #include "pipeline/jit/parse/python_adapter.h" -#include #include #include diff --git a/mindspore/ccsrc/pipeline/jit/parse/resolve.cc b/mindspore/ccsrc/pipeline/jit/parse/resolve.cc index aa0e542829a..484989d7ba7 100644 --- a/mindspore/ccsrc/pipeline/jit/parse/resolve.cc +++ b/mindspore/ccsrc/pipeline/jit/parse/resolve.cc @@ -19,7 +19,6 @@ #include #include #include -#include #include "ir/param_info.h" #include "pipeline/jit/parse/data_converter.h" @@ -29,7 +28,6 @@ #include "frontend/operator/ops.h" #include "frontend/optimizer/opt.h" #include "frontend/optimizer/irpass.h" -#include "./common.h" namespace mindspore { namespace parse { diff --git a/mindspore/ccsrc/pipeline/jit/pass.cc b/mindspore/ccsrc/pipeline/jit/pass.cc index e282b589cd6..29d2358cd0d 100644 --- a/mindspore/ccsrc/pipeline/jit/pass.cc +++ b/mindspore/ccsrc/pipeline/jit/pass.cc @@ -17,17 +17,13 @@ #include "pipeline/jit/pass.h" #include -#include #include #include #include #include -#include #include "ir/func_graph_cloner.h" -#include "debug/anf_ir_utils.h" #include "pipeline/jit/parse/parse_base.h" -#include "pipeline/jit/parse/data_converter.h" #include "pipeline/jit/resource.h" #include "pipeline/jit/validator.h" #include "pipeline/jit/remove_value_node_dup.h" @@ -40,7 +36,6 @@ #include "frontend/parallel/step_parallel.h" #include "frontend/parallel/step_auto_parallel.h" #include "frontend/parallel/allreduce_fusion/step_allreduce_fusion.h" -#include "utils/any.h" #include "utils/log_adapter.h" namespace mindspore { diff --git a/mindspore/ccsrc/pipeline/jit/pipeline.cc b/mindspore/ccsrc/pipeline/jit/pipeline.cc index 3cd1445539b..7a70f68a185 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline.cc +++ b/mindspore/ccsrc/pipeline/jit/pipeline.cc @@ -33,7 +33,6 @@ #include "utils/config_manager.h" #include "utils/convert_utils.h" #include "utils/context/context_extends.h" -#include "utils/utils.h" #include "vm/segment_runner.h" #include "frontend/parallel/context.h" #include "frontend/parallel/graph_util/get_parallel_info.h" diff --git a/mindspore/ccsrc/pipeline/jit/resource.cc b/mindspore/ccsrc/pipeline/jit/resource.cc index 01251087340..aff91ae22ac 100644 --- a/mindspore/ccsrc/pipeline/jit/resource.cc +++ b/mindspore/ccsrc/pipeline/jit/resource.cc @@ -22,7 +22,6 @@ #include "ir/dtype.h" #include "pipeline/jit/parse/data_converter.h" #include "frontend/operator/ops.h" -#include "ir/graph_utils.h" #include "frontend/optimizer/ad/dfunctor.h" namespace mindspore { diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/program_specialize.cc b/mindspore/ccsrc/pipeline/jit/static_analysis/program_specialize.cc index 25b34d3681f..91d865d7190 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/program_specialize.cc +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/program_specialize.cc @@ -20,13 +20,11 @@ #include #include -#include "./common.h" #include "frontend/operator/ops.h" #include "frontend/operator/composite/do_signature.h" #include "abstract/abstract_function.h" #include "ir/graph_utils.h" #include "utils/log_adapter.h" -#include "utils/profile.h" #include "debug/trace.h" namespace mindspore { diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/static_analysis.cc b/mindspore/ccsrc/pipeline/jit/static_analysis/static_analysis.cc index e5f9cdb6b21..a7f014705ec 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/static_analysis.cc +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/static_analysis.cc @@ -29,7 +29,6 @@ #include "ir/func_graph_cloner.h" #include "./common.h" #include "pipeline/jit/parse/data_converter.h" -#include "debug/draw.h" #include "pipeline/jit/static_analysis/evaluator.h" #include "debug/trace.h" diff --git a/mindspore/ccsrc/pipeline/jit/validator.cc b/mindspore/ccsrc/pipeline/jit/validator.cc index 95a54eebb2f..7cf8071ec3a 100644 --- a/mindspore/ccsrc/pipeline/jit/validator.cc +++ b/mindspore/ccsrc/pipeline/jit/validator.cc @@ -23,7 +23,6 @@ #include "ir/manager.h" #include "ir/dtype.h" -#include "./common.h" #include "pipeline/jit/static_analysis/prim.h" namespace mindspore { diff --git a/mindspore/ccsrc/pipeline/pynative/pynative_execute_ge.cc b/mindspore/ccsrc/pipeline/pynative/pynative_execute_ge.cc index 6552c118642..0eb8b202062 100644 --- a/mindspore/ccsrc/pipeline/pynative/pynative_execute_ge.cc +++ b/mindspore/ccsrc/pipeline/pynative/pynative_execute_ge.cc @@ -29,6 +29,7 @@ #include "pipeline/jit/static_analysis/prim.h" #include "backend/session/session_factory.h" #include "pybind_api/ir/tensor_py.h" +#include "transform/graph_ir/op_declare/array_ops_declare.h" const char SINGLE_OP_GRAPH[] = "single_op_graph"; diff --git a/mindspore/ccsrc/pybind_api/api_register.cc b/mindspore/ccsrc/pybind_api/api_register.cc index 0c8c47d6589..7e575da0b07 100644 --- a/mindspore/ccsrc/pybind_api/api_register.cc +++ b/mindspore/ccsrc/pybind_api/api_register.cc @@ -16,8 +16,6 @@ #include "pybind_api/api_register.h" -#include - namespace mindspore { PybindDefineRegister &PybindDefineRegister::GetSingleton() { diff --git a/mindspore/ccsrc/pybind_api/export_flags.cc b/mindspore/ccsrc/pybind_api/export_flags.cc index c8f988d4ef6..909951d4700 100644 --- a/mindspore/ccsrc/pybind_api/export_flags.cc +++ b/mindspore/ccsrc/pybind_api/export_flags.cc @@ -13,8 +13,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - #include "pybind_api/export_flags.h" + namespace mindspore { const char PYTHON_PRIMITIVE_FLAG[] = "__primitive_flag__"; const char PYTHON_CELL_AS_LIST[] = "__cell_as_list__"; diff --git a/mindspore/ccsrc/pybind_api/ir/dtype_py.cc b/mindspore/ccsrc/pybind_api/ir/dtype_py.cc index 2b62bc0b845..b279532d061 100644 --- a/mindspore/ccsrc/pybind_api/ir/dtype_py.cc +++ b/mindspore/ccsrc/pybind_api/ir/dtype_py.cc @@ -15,13 +15,9 @@ */ #include "ir/dtype.h" -#include -#include -#include #include "utils/log_adapter.h" #include "abstract/abstract_value.h" #include "pybind_api/api_register.h" -#include "pybind_api/export_flags.h" namespace mindspore { // Define python wrapper to handle data types. diff --git a/mindspore/ccsrc/pybind_api/ir/func_graph_py.cc b/mindspore/ccsrc/pybind_api/ir/func_graph_py.cc index cdddb7b08d0..768befffe3a 100644 --- a/mindspore/ccsrc/pybind_api/ir/func_graph_py.cc +++ b/mindspore/ccsrc/pybind_api/ir/func_graph_py.cc @@ -18,7 +18,6 @@ #include "ir/func_graph.h" #include "pybind_api/api_register.h" -#include "pybind_api/export_flags.h" namespace mindspore { REGISTER_PYBIND_DEFINE(FuncGraph, ([](const pybind11::module *m) { diff --git a/mindspore/ccsrc/pybind_api/ir/signature_py.cc b/mindspore/ccsrc/pybind_api/ir/signature_py.cc index f513df8533f..4dbf070462f 100644 --- a/mindspore/ccsrc/pybind_api/ir/signature_py.cc +++ b/mindspore/ccsrc/pybind_api/ir/signature_py.cc @@ -17,7 +17,6 @@ #include "ir/signature.h" #include "pybind11/operators.h" #include "pybind_api/api_register.h" -#include "pipeline/jit/parse/data_converter.h" namespace py = pybind11; diff --git a/mindspore/ccsrc/pybind_api/ir/tensor_py.cc b/mindspore/ccsrc/pybind_api/ir/tensor_py.cc index 76453f8662f..7596d75c284 100644 --- a/mindspore/ccsrc/pybind_api/ir/tensor_py.cc +++ b/mindspore/ccsrc/pybind_api/ir/tensor_py.cc @@ -16,14 +16,11 @@ #include "pybind_api/ir/tensor_py.h" -#include -#include #include #include #include #include "pybind_api/api_register.h" -#include "pybind_api/export_flags.h" #include "abstract/abstract_value.h" namespace mindspore { diff --git a/mindspore/ccsrc/runtime/device/ascend/ascend_device_address.cc b/mindspore/ccsrc/runtime/device/ascend/ascend_device_address.cc index 8494656d3d4..484a2b1a12b 100644 --- a/mindspore/ccsrc/runtime/device/ascend/ascend_device_address.cc +++ b/mindspore/ccsrc/runtime/device/ascend/ascend_device_address.cc @@ -26,11 +26,9 @@ #include "runtime/device/convert_tensor_utils.h" #include "ir/dtype/type.h" #include "ir/tensor.h" -#include "backend/kernel_compiler/common_utils.h" #include "backend/kernel_compiler/tbe/tbe_kernel_build.h" #include "backend/kernel_compiler/tbe/tbe_kernel_parallel_build.h" #include "utils/utils.h" -#include "utils/ms_utils.h" #include "common/trans.h" #ifdef ENABLE_DUMP_E2E #include "debug/e2e_dump.h" diff --git a/mindspore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.cc b/mindspore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.cc index 3f86010de8b..38d03ced80d 100644 --- a/mindspore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.cc +++ b/mindspore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.cc @@ -32,15 +32,16 @@ #include "runtime/context.h" #include "runtime/device/ascend/ascend_label_assign.h" #include "runtime/device/ascend/ascend_stream_assign.h" -#include "runtime/device/ascend/ascend_memory_pool.h" #include "framework/ge_runtime/model_runner.h" #include "runtime/device/ascend/tasksink/task_generator.h" #include "backend/session/anf_runtime_algorithm.h" #include "runtime/device/ascend/profiling/profiling_utils.h" #include "backend/kernel_compiler/tbe/tbe_utils.h" -#include "backend/optimizer/mem_reuse/mem_reuse_checker.h" #include "runtime/device/ascend/ascend_memory_manager.h" #include "debug/tensor_load.h" +#ifdef MEM_REUSE_DEBUG +#include "backend/optimizer/mem_reuse/mem_reuse_checker.h" +#endif using ge::model_runner::ModelRunner; using mindspore::device::ascend::ProfilingManager; diff --git a/mindspore/ccsrc/runtime/device/ascend/kernel_build_ascend.cc b/mindspore/ccsrc/runtime/device/ascend/kernel_build_ascend.cc index 39cefcb0201..6a891c6f2af 100644 --- a/mindspore/ccsrc/runtime/device/ascend/kernel_build_ascend.cc +++ b/mindspore/ccsrc/runtime/device/ascend/kernel_build_ascend.cc @@ -19,12 +19,10 @@ #include #include #include -#include #include "runtime/device/ascend/kernel_select_ascend.h" #include "runtime/device/kernel_info.h" #include "backend/kernel_compiler/kernel.h" -#include "backend/kernel_compiler/tbe/tbe_kernel_build.h" #include "backend/kernel_compiler/tbe/tbe_kernel_parallel_build.h" #include "backend/kernel_compiler/akg/ascend/akg_ascend_kernel_build.h" #include "backend/kernel_compiler/aicpu/aicpu_kernel_build.h" diff --git a/mindspore/ccsrc/runtime/device/ascend/kernel_select_ascend.cc b/mindspore/ccsrc/runtime/device/ascend/kernel_select_ascend.cc index f4a135ce70b..ac37bb5f1af 100644 --- a/mindspore/ccsrc/runtime/device/ascend/kernel_select_ascend.cc +++ b/mindspore/ccsrc/runtime/device/ascend/kernel_select_ascend.cc @@ -22,16 +22,10 @@ #include #include #include -#include -#include -#include "utils/ms_utils.h" #include "debug/anf_ir_dump.h" #include "frontend/operator/ops.h" -#include "ir/func_graph.h" #include "utils/ms_context.h" #include "backend/session/anf_runtime_algorithm.h" -#include "runtime/device/kernel_info.h" -#include "backend/kernel_compiler/common_utils.h" #include "backend/kernel_compiler/kernel_query.h" #include "backend/kernel_compiler/oplib/oplib.h" #include "backend/kernel_compiler/kernel_build_info.h" diff --git a/mindspore/ccsrc/runtime/device/ascend/tasksink/task_generator.cc b/mindspore/ccsrc/runtime/device/ascend/tasksink/task_generator.cc index 2b419367f9d..8b5dc8cfe28 100644 --- a/mindspore/ccsrc/runtime/device/ascend/tasksink/task_generator.cc +++ b/mindspore/ccsrc/runtime/device/ascend/tasksink/task_generator.cc @@ -18,7 +18,6 @@ #include #include "backend/kernel_compiler/task_stream.h" -#include "utils/ms_context.h" #include "utils/ms_utils.h" #include "runtime/device/ascend/profiling/profiling_utils.h" #include "runtime/device/ascend/profiling/profiling_manager.h" diff --git a/mindspore/ccsrc/runtime/device/cpu/cpu_kernel_runtime.cc b/mindspore/ccsrc/runtime/device/cpu/cpu_kernel_runtime.cc index 77389f16e5f..114c20ae653 100644 --- a/mindspore/ccsrc/runtime/device/cpu/cpu_kernel_runtime.cc +++ b/mindspore/ccsrc/runtime/device/cpu/cpu_kernel_runtime.cc @@ -19,15 +19,9 @@ #include #include #include -#include -#include -#include #include "backend/kernel_compiler/kernel.h" #include "runtime/device/cpu/cpu_device_address.h" #include "utils/ms_context.h" -#include "utils/config_manager.h" -#include "utils/profile.h" -#include "utils/ms_utils.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/session/session_basic.h" #include "frontend/operator/ops.h" diff --git a/mindspore/ccsrc/runtime/device/kernel_adjust.cc b/mindspore/ccsrc/runtime/device/kernel_adjust.cc index 2aa44397c11..b3a94a1f6cd 100644 --- a/mindspore/ccsrc/runtime/device/kernel_adjust.cc +++ b/mindspore/ccsrc/runtime/device/kernel_adjust.cc @@ -19,8 +19,6 @@ #include #include #include -#include -#include #include #include "backend/session/anf_runtime_algorithm.h" @@ -31,7 +29,6 @@ #include "backend/kernel_compiler/kernel_build_info.h" #include "utils/utils.h" #include "runtime/device/ascend/profiling/profiling_manager.h" -#include "runtime/device/ascend/kernel_select_ascend.h" #include "runtime/base.h" #include "runtime/device/ascend/ascend_stream_assign.h" diff --git a/mindspore/ccsrc/runtime/device/kernel_runtime.cc b/mindspore/ccsrc/runtime/device/kernel_runtime.cc index 6173aa4faf4..6e3ee23bb48 100644 --- a/mindspore/ccsrc/runtime/device/kernel_runtime.cc +++ b/mindspore/ccsrc/runtime/device/kernel_runtime.cc @@ -18,17 +18,13 @@ #include #include #include -#include #include "utils/ms_utils.h" #include "common/trans.h" #include "utils/utils.h" #include "utils/ms_context.h" #include "frontend/operator/ops.h" -#include "pipeline/jit/parse/python_adapter.h" #include "backend/session/kernel_graph.h" #include "backend/session/anf_runtime_algorithm.h" -#include "backend/kernel_compiler/common_utils.h" -#include "backend/kernel_compiler/oplib/oplib.h" #include "backend/optimizer/common/helper.h" #include "ir/value.h" using mindspore::kernel::Address; diff --git a/mindspore/ccsrc/transform/graph_ir/convert.cc b/mindspore/ccsrc/transform/graph_ir/convert.cc index 1562df80f8a..c48e0833e1a 100644 --- a/mindspore/ccsrc/transform/graph_ir/convert.cc +++ b/mindspore/ccsrc/transform/graph_ir/convert.cc @@ -29,415 +29,26 @@ #include "utils/convert_utils.h" #include "./common.h" #include "utils/ms_context.h" +#include "transform/graph_ir/op_adapter_map.h" +#include "ops/state_ops.h" +#include "ops/array_ops.h" +#include "ops/elewise_calculation_ops.h" +#include "ops/math_ops.h" namespace mindspore { namespace transform { using std::endl; -#define ADPT_DESC_ONE(T) std::make_shared(std::make_shared>()) -#define ADPT_DESC_TWO(T, I) \ - std::make_shared(std::make_shared>(), std::make_shared>()) -#define GET_MACRO(_1, _2, DESC, ...) DESC -#define ADPT_DESC(...) GET_MACRO(__VA_ARGS__, ADPT_DESC_TWO, ADPT_DESC_ONE, ...)(__VA_ARGS__) - using ge::Operator; using mindspore::kAnyValue; using std::make_shared; using std::shared_ptr; using std::string; using std::vector; - -const char kNameCustomOp[] = "CustomOp"; -const char kNameConst[] = "Const"; -const char kNameParam[] = "parameter"; -const char kNameRandomUniform[] = "RandomUniform"; -const char kNameSimpleMean[] = "SimpleMean"; -const char kNameSimpleMeanGrad[] = "SimpleMeanGrad"; -const char kNameAllReduce[] = "AllReduce"; -const char kNameBroadcast[] = "Broadcast"; -const char kNameAllgather[] = "AllGather"; -const char kNameReduceScatter[] = "ReduceScatter"; -const char kNameReduceSum[] = "ReduceSum"; -const char kNameIsFinite[] = "isFinite"; -const char kNameReciprocal[] = "Reciprocal"; -const char kNameRsqrt[] = "Rsqrt"; -const char kNameSqrt[] = "Sqrt"; -const char kNameSquare[] = "Square"; -const char kNameSquaredDifference[] = "SquaredDifference"; -const char kNamePow[] = "Pow"; -const char kNameBatchMatMul[] = "BatchMatMul"; -const char kNameStridedSlice[] = "StridedSlice"; -const char kNameStridedSliceGrad[] = "StridedSliceGrad"; -const char kNameExpandDims[] = "ExpandDims"; -const char kNameLog[] = "Log"; -const char kNameLogicalAnd[] = "LogicalAnd"; -const char kNameLogicalNot[] = "LogicalNot"; -const char kNameLogicalOr[] = "LogicalOr"; -const char kNameExp[] = "Exp"; -const char kNameLessEqual[] = "LessEqual"; -const char kNameGreaterEqual[] = "GreaterEqual"; -const char kNameEqual[] = "Equal"; -const char kNameNotEqual[] = "NotEqual"; -const char kNameFlattenGrad[] = "FlattenGrad"; -const char kNameConvolution[] = "Convolution"; -const char kNameBiasAdd[] = "BiasAdd"; -const char kNameMaxPoolGrad[] = "MaxPoolGrad"; -const char kNameRsqrtGrad[] = "RsqrtGrad"; -const char kNameSqrtGrad[] = "SqrtGrad"; -const char kNameReciprocalGrad[] = "ReciprocalGrad"; -const char kNameAvgPoolGrad[] = "AvgPoolGrad"; -const char kNameMaxPoolGradWithArgmax[] = "MaxPoolGradWithArgmax"; -const char kNameApplyMomentum[] = "ApplyMomentum"; -const char kNameDropoutDoMask[] = "DropoutDoMask"; -const char kNameResizeBilinear[] = "ResizeBilinear"; -const char kNameResizeBilinearGrad[] = "ResizeBilinearGrad"; -const char kNameZerosLike[] = "ZerosLike"; -const char kNameOnesLike[] = "OnesLike"; -const char kNameTruncatedNormal[] = "TruncatedNormal"; -const char kNameSpaceToBatchNd[] = "SpaceToBatchNd"; -const char kNameConfusionMatrix[] = "ConfusionMatrix"; -const char kNameResizeNearestNeighborD[] = "ResizeNearestNeighbor"; -const char kNameResizeNearestNeighborGrad[] = "ResizeNearestNeighborGrad"; -const char kNameApplyAdam[] = "Adam"; -const char kNameExtractImagePatches[] = "ExtractImagePatches"; -const char kNameReLU6[] = "ReLU6"; -const char kNameReLU6Grad[] = "ReLU6Grad"; -const char kNameElu[] = "Elu"; -const char kNameEluGrad[] = "EluGrad"; -const char kNameTensorScatterUpdate[] = "TensorScatterUpdate"; -const char kNameScatterUpdate[] = "ScatterUpdate"; -const char kNameScatterNdUpdate[] = "ScatterNdUpdate"; -const char kNameScatterMax[] = "ScatterMax"; -const char kNameNMSWithMask[] = "NMSWithMask"; -const char kNameCheckValid[] = "CheckValid"; -const char kNameSmoothL1Loss[] = "SmoothL1Loss"; -const char kNameSmoothL1LossGrad[] = "SmoothL1LossGrad"; -const char kNameSGD[] = "SGD"; -const char kNameSigmoidCrossEntropyWithLogits[] = "SigmoidCrossEntropyWithLogits"; -const char kNameSigmoidCrossEntropyWithLogitsGrad[] = "SigmoidCrossEntropyWithLogitsGrad"; -const char kNameScatterNdD[] = "ScatterNd"; -const char kNamePadD[] = "Pad"; -const char kNameMirrorPad[] = "MirrorPad"; -const char kNameMirrorPadGrad[] = "MirrorPadGrad"; -const char kNameGatherNd[] = "GatherNd"; -const char kNameArgmax[] = "Argmax"; -const char kNameArgmin[] = "Argmin"; -const char kNameArgMaxWithValue[] = "ArgMaxWithValue"; -const char kNameArgMinWithValue[] = "ArgMinWithValue"; -const char kNameReduceProd[] = "ReduceProd"; -const char kNameCumProd[] = "CumProd"; -const char kNameDiagpart[] = "Diagpart"; -const char kNameSplitD[] = "Split"; -const char kNameBatchToSpaceNd[] = "BatchToSpaceNd"; -const char kNameFloor[] = "Floor"; -const char kNameNPUGetFloatStatus[] = "NPUGetFloatStatus"; -const char kNameAssign[] = "Assign"; -const char kNameAssignAdd[] = "AssignAdd"; -const char kNameAssignSub[] = "AssignSub"; -const char kNameNPUAllocFloatStatus[] = "NPUAllocFloatStatus"; -const char kNameNPUClearFloatStatus[] = "NPUClearFloatStatus"; -const char kNameReshape[] = "Reshape"; -const char kNameTransShape[] = "TransShape"; -const char kNameRealDiv[] = "RealDiv"; -const char kNameTile[] = "Tile"; -const char kNameCos[] = "Cos"; -const char kNameACos[] = "ACos"; -const char kNameACosGrad[] = "ACosGrad"; -const char kNameFloorDiv[] = "FloorDiv"; -const char kNameSin[] = "Sin"; -const char kNamePrelu[] = "PReLU"; -const char kNamePreluGrad[] = "PReLUGrad"; -const char kNameSigmoid[] = "Sigmoid"; -const char kNameSigmoidGrad[] = "SigmoidGrad"; -const char kNameL2Normalize[] = "L2Normalize"; -const char kNameL2NormalizeGrad[] = "L2NormalizeGrad"; -const char kNameSoftmax[] = "Softmax"; -const char kNameIOU[] = "IOU"; -const char kNameBoundingBoxDecode[] = "BoundingBoxDecode"; -const char kNameBoundingBoxEncode[] = "BoundingBoxEncode"; -const char kNameSlice[] = "Slice"; -const char kNameAddN[] = "AddN"; -const char kNameLess[] = "Less"; -const char kNameGreater[] = "Greater"; -const char kNamePack[] = "Pack"; -const char kNameUnpack[] = "Unpack"; -const char kNameMerge[] = "Merge"; -const char kNameGeSwitch[] = "GeSwitch"; - -const char kNameHuberLoss[] = "HuberLoss"; -const char kNameCumSum[] = "CumSum"; -const char kNameHuberLossGrad[] = "HuberLossGrad"; -const char kNameSparseSoftmaxCrossEntropy[] = "SparseSoftmaxCrossEntropy"; -const char kNameSparseSoftmaxCrossEntropyGrad[] = "SparseSoftmaxCrossEntropyGrad"; -const char kNameTopK[] = "TopK"; -const char kNameSoftmaxGrad[] = "SoftmaxGrad"; -const char kNameMaxPool[] = "MaxPool"; -const char kNameAvgPool[] = "AvgPool"; -const char kNameMaxPoolWithArgmax[] = "MaxPoolWithArgmax"; -const char kNameBatchNorm[] = "BatchNorm"; -const char kNameBatchNormGrad[] = "BatchNormGrad"; -const char kNameROIAlign[] = "ROIAlign"; -const char kNameROIAlignGrad[] = "ROIAlignGrad"; -const char kNameRandomChoiceWithMask[] = "RandomChoiceWithMask"; -const char kNameAbs[] = "Abs"; -const char kNameAbsGrad[] = "AbsGrad"; -const char kNameBinaryCrossEntropy[] = "BinaryCrossEntropy"; -const char kNameBinaryCrossEntropyGrad[] = "BinaryCrossEntropyGrad"; -const char kNameSparseApplyAdagrad[] = "SparseApplyAdagrad"; -const char kNameSparseApplyFtrlD[] = "SparseApplyFtrlD"; -const char kNameApplyProximalAdagrad[] = "ApplyProximalAdagrad"; -const char kNameAcosh[] = "Acosh"; -const char kNameAcoshGrad[] = "AcoshGrad"; -const char kNameFloorMod[] = "FloorMod"; -const char kNameSpaceToDepth[] = "SpaceToDepth"; -const char kNameDepthToSpace[] = "DepthToSpace"; -const char kNameSign[] = "Sign"; -const char kNameLARSUpdate[] = "LARSUpdate"; -const char kNameRound[] = "Round"; -const char kNamePrint[] = "Print"; -const char kNameApplyFtrl[] = "ApplyFtrl"; -const char kNameDiag[] = "Diag"; -const char kNameDiagPart[] = "DiagPart"; -const char kNameSpaceToBatch[] = "SpaceToBatch"; -const char kNameBatchToSpace[] = "BatchToSpace"; -const char kNameAtan2[] = "Atan2"; -const char kNameApplyRMSProp[] = "ApplyRMSProp"; -const char kNameApplyCenteredRMSProp[] = "ApplyCenteredRMSProp"; -const char kNameBasicLSTMCell[] = "BasicLSTMCell"; -const char kNameBasicLSTMCellInputGrad[] = "BasicLSTMCellInputGrad"; -const char kNameBasicLSTMCellWeightGrad[] = "BasicLSTMCellWeightGrad"; -const char kNameBasicLSTMCellCStateGrad[] = "BasicLSTMCellCStateGrad"; -const char kNameL2Loss[] = "L2Loss"; -const char kNameCTCLoss[] = "CTCLoss"; -const char kNameRange[] = "Range"; -const char kNameSquareSumAll[] = "SquareSumAll"; -const char kNameAscendQuant[] = "Quant"; -const char kNameAscendDequant[] = "Dequant"; -const char kNameReverseSequence[] = "ReverseSequence"; -const char kNameCase[] = "Case"; - -// -----------------OpAdapter initialization-------------- -std::unordered_map &DfGraphConvertor::get_adpt_map() { - static std::unordered_map adpt_map = { - {string(kNameCustomOp), ADPT_DESC(Operator)}, - {string(kNameIOU), ADPT_DESC(Iou)}, - {string(kNameGreaterEqual), ADPT_DESC(GreaterEqual)}, - {string(kNameSlice), ADPT_DESC(SliceD)}, - {string(kNameApplyMomentum), ADPT_DESC(ApplyMomentum)}, - {string(kNameMaxPool), ADPT_DESC(MaxPool)}, - {string(kNameAvgPool), ADPT_DESC(AvgPool)}, - {string(kNameMaxPoolWithArgmax), ADPT_DESC(MaxPoolWithArgmax)}, - {string(kNameTopK), ADPT_DESC(TopK)}, - {string(kNamePack), ADPT_DESC(Pack)}, - {string(kNameUnpack), ADPT_DESC(Unpack)}, - {string(kNameSplitD), ADPT_DESC(SplitD)}, - {string(kNameAllReduce), ADPT_DESC(HcomAllReduce)}, - {string(kNameBroadcast), ADPT_DESC(HcomBroadcast)}, - {string(kNameAllgather), ADPT_DESC(HcomAllGather)}, - {string(kNameReduceScatter), ADPT_DESC(HcomReduceScatter)}, - {string(kNameMaxPoolGrad), ADPT_DESC(MaxPoolGrad)}, - {string(kNameSqrtGrad), ADPT_DESC(SqrtGrad)}, - {string(kNameReciprocalGrad), ADPT_DESC(ReciprocalGrad)}, - {string(kNameRsqrtGrad), ADPT_DESC(RsqrtGrad)}, - {string(kNameAvgPoolGrad), ADPT_DESC(AvgPoolGrad)}, - {string(kNameMaxPoolGradWithArgmax), ADPT_DESC(MaxPoolGradWithArgmax)}, - {string(kNameExtractImagePatches), ADPT_DESC(ExtractImagePatches)}, - {prim::kPrimAssign->name(), ADPT_DESC(Assign)}, - {prim::kPrimStateSetItem->name(), ADPT_DESC(Assign)}, - {prim::kPrimReluGrad->name(), ADPT_DESC(ReluGrad)}, - {prim::kPrimBiasAddGrad->name(), ADPT_DESC(BiasAddGrad)}, - {prim::kPrimConv2D->name(), ADPT_DESC(Conv2D)}, - {prim::kPrimConv2DBackpropInput->name(), ADPT_DESC(Conv2DBackpropInputD)}, - {prim::kPrimConv2DBackpropFilter->name(), ADPT_DESC(Conv2DBackpropFilterD)}, - {prim::kPrimDepthwiseConv2dNative->name(), ADPT_DESC(DepthwiseConv2D)}, - {prim::kPrimDepthwiseConv2dNativeBackpropFilter->name(), ADPT_DESC(DepthwiseConv2DBackpropFilterD)}, - {prim::kPrimDepthwiseConv2dNativeBackpropInput->name(), ADPT_DESC(DepthwiseConv2DBackpropInputD)}, - {string(kNameBatchNorm), ADPT_DESC(BatchNorm)}, - {string(kNameBatchNormGrad), ADPT_DESC(BatchNormGrad)}, - {string(kNameReshape), ADPT_DESC(Reshape)}, - {string(kNameTransShape), ADPT_DESC(TransShape)}, - {string(kNameFlattenGrad), ADPT_DESC(Reshape)}, - {prim::kPrimFlatten->name(), ADPT_DESC(Flatten)}, - {string(kNameAddN), ADPT_DESC(AddN)}, - {string(kNameLess), ADPT_DESC(Less)}, - {string(kNameSqrt), ADPT_DESC(Sqrt)}, - {string(kNameRsqrt), ADPT_DESC(Rsqrt)}, - {string(kNameSquare), ADPT_DESC(Square)}, - {prim::kPrimTanh->name(), ADPT_DESC(Tanh)}, - {prim::kPrimTanhGrad->name(), ADPT_DESC(TanhGrad)}, - {string(kNameResizeNearestNeighborD), ADPT_DESC(ResizeNearestNeighborV2D)}, - {string(kNameResizeNearestNeighborGrad), ADPT_DESC(ResizeNearestNeighborV2Grad)}, - {string(kNameApplyAdam), ADPT_DESC(ApplyAdam)}, - {string(kNameReLU6), ADPT_DESC(Relu6)}, - {string(kNameReLU6Grad), ADPT_DESC(Relu6Grad)}, - {string(kNameElu), ADPT_DESC(Elu)}, - {string(kNameEluGrad), ADPT_DESC(EluGrad)}, - {string(kNameResizeBilinearGrad), ADPT_DESC(ResizeBilinearV2Grad)}, - {string(kNameResizeBilinear), ADPT_DESC(ResizeBilinearV2D)}, - {string(kNameZerosLike), ADPT_DESC(ZerosLike)}, - {string(kNameOnesLike), ADPT_DESC(OnesLike)}, - {string(kNameTensorScatterUpdate), ADPT_DESC(TensorScatterUpdate)}, - {string(kNameScatterUpdate), ADPT_DESC(ScatterUpdate)}, - {string(kNameScatterNdUpdate), ADPT_DESC(ScatterNdUpdate)}, - {string(kNameScatterMax), ADPT_DESC(ScatterMax)}, - {string(kNameNMSWithMask), ADPT_DESC(NMSWithMask)}, - {string(kNameCheckValid), ADPT_DESC(CheckValid)}, - {string(kNameSmoothL1Loss), ADPT_DESC(SmoothL1Loss)}, - {string(kNameSmoothL1LossGrad), ADPT_DESC(SmoothL1LossGrad)}, - {string(kNameSigmoidCrossEntropyWithLogits), ADPT_DESC(SigmoidCrossEntropyWithLogits)}, - {string(kNameSigmoidCrossEntropyWithLogitsGrad), ADPT_DESC(SigmoidCrossEntropyWithLogitsGrad)}, - {string(kNameScatterNdD), ADPT_DESC(ScatterNdD)}, - {string(kNamePadD), ADPT_DESC(PadD)}, - {string(kNameMirrorPad), ADPT_DESC(MirrorPad)}, - {string(kNameMirrorPadGrad), ADPT_DESC(MirrorPadGrad)}, - {string(kNameGatherNd), ADPT_DESC(GatherNd)}, - {string(kNameArgmax), ADPT_DESC(ArgMaxD)}, - {string(kNameArgmin), ADPT_DESC(ArgMinD)}, - {string(kNameArgMaxWithValue), ADPT_DESC(ArgMaxWithValue)}, - {string(kNameArgMinWithValue), ADPT_DESC(ArgMinWithValue)}, - {prim::kPrimReduceSum->name(), ADPT_DESC(ReduceSumD)}, - {prim::kPrimReduceMean->name(), ADPT_DESC(ReduceMeanD)}, - {prim::kPrimReduceAll->name(), ADPT_DESC(ReduceAllD)}, - {prim::kPrimReduceMin->name(), ADPT_DESC(ReduceMinD)}, - {prim::kPrimReduceMax->name(), ADPT_DESC(ReduceMaxD)}, - {string(kNameLARSUpdate), ADPT_DESC(LarsV2Update)}, - {string(kNameReduceProd), ADPT_DESC(ReduceProdD)}, - {string(kNameCumProd), ADPT_DESC(CumprodD)}, - {string(kNameMerge), ADPT_DESC(Merge)}, - {string(kNameGeSwitch), ADPT_DESC(Switch)}, - {string(kNameCumSum), ADPT_DESC(CumsumD)}, - - {prim::kPrimMul->name(), ADPT_DESC(Mul)}, - {string(kNameTile), ADPT_DESC(TileD)}, - {prim::kPrimOneHot->name(), ADPT_DESC(OneHot)}, - - {prim::kPrimGatherV2->name(), ADPT_DESC(GatherV2D)}, - {string(kNameCos), ADPT_DESC(Cos)}, - {string(kNameACos), ADPT_DESC(Acos)}, - {string(kNameACosGrad), ADPT_DESC(AcosGrad)}, - {string(kNameFloor), ADPT_DESC(Floor)}, - {string(kNameFloorDiv), ADPT_DESC(FloorDiv)}, - {string(kNameSin), ADPT_DESC(Sin)}, - {string(kNameExp), ADPT_DESC(Exp)}, - {string(kNameBoundingBoxEncode), ADPT_DESC(BoundingBoxEncode)}, - {string(kNameBoundingBoxDecode), ADPT_DESC(BoundingBoxDecode)}, - - {prim::kPrimCast->name(), ADPT_DESC(Cast)}, - {string(kNameRealDiv), ADPT_DESC(RealDiv)}, - {prim::kPrimNeg->name(), ADPT_DESC(Neg)}, - {prim::kPrimTranspose->name(), ADPT_DESC(TransposeD)}, - {prim::kPrimSub->name(), ADPT_DESC(Sub)}, - {string(kNameReciprocal), ADPT_DESC(Reciprocal)}, - {prim::kPrimDropoutGenMask->name(), ADPT_DESC(DropOutGenMask)}, - {string(kNameAssignAdd), ADPT_DESC(AssignAdd)}, - {string(kNameAssignSub), ADPT_DESC(AssignSub)}, - {prim::kPrimConcat->name(), ADPT_DESC(ConcatD)}, - {string(kNamePow), ADPT_DESC(Pow)}, - {string(kNameExp), ADPT_DESC(Exp)}, - {string(kNameEqual), ADPT_DESC(Equal)}, - {string(kNameNotEqual), ADPT_DESC(NotEqual)}, - {string(kNameLog), ADPT_DESC(Log)}, - {string(kNameLogicalAnd), ADPT_DESC(LogicalAnd)}, - {string(kNameLogicalNot), ADPT_DESC(LogicalNot)}, - {string(kNameLogicalOr), ADPT_DESC(LogicalOr)}, - {string(kNameGreater), ADPT_DESC(Greater)}, - {prim::kPrimMaximum->name(), ADPT_DESC(Maximum)}, - {prim::kPrimRelu->name(), ADPT_DESC(Relu)}, - {string(kNamePrelu), ADPT_DESC(PRelu)}, - {string(kNamePreluGrad), ADPT_DESC(PReluGrad)}, - {string(kNameSigmoid), ADPT_DESC(Sigmoid)}, - {string(kNameSigmoidGrad), ADPT_DESC(SigmoidGrad)}, - {string(kNameSGD), ADPT_DESC(SGD)}, - {prim::kPrimLogSoftmaxGrad->name(), ADPT_DESC(LogSoftmaxGrad)}, - {prim::kPrimMaximumGrad->name(), ADPT_DESC(MaximumGrad)}, - {prim::kPrimMinimumGrad->name(), ADPT_DESC(MinimumGrad)}, - {string(kNameL2Normalize), ADPT_DESC(L2Normalize)}, - {string(kNameL2NormalizeGrad), ADPT_DESC(L2NormalizeGrad)}, - - {prim::kPrimMinimum->name(), ADPT_DESC(Minimum)}, - {prim::kPrimSelect->name(), ADPT_DESC(Select)}, - {string(kNameLessEqual), ADPT_DESC(LessEqual)}, - {prim::kPrimLogSoftmax->name(), ADPT_DESC(LogSoftmaxV2)}, - {string(kNameTruncatedNormal), ADPT_DESC(TruncatedNormal)}, - {string(kNameStridedSliceGrad), ADPT_DESC(StridedSliceGrad)}, - {prim::kPrimGelu->name(), ADPT_DESC(Gelu)}, - {prim::kPrimGeluGrad->name(), ADPT_DESC(GeluGrad)}, - {string(kNameStridedSlice), ADPT_DESC(StridedSlice)}, - {prim::kPrimUnsortedSegmentMin->name(), ADPT_DESC(UnsortedSegmentMin)}, - {prim::kPrimUnsortedSegmentSum->name(), ADPT_DESC(UnsortedSegmentSumD)}, - {string(kNameExpandDims), ADPT_DESC(ExpandDims)}, - {prim::kPrimSqueeze->name(), ADPT_DESC(Squeeze)}, - {prim::kPrimLayerNorm->name(), ADPT_DESC(LayerNorm)}, - {prim::kPrimLayerNormGrad->name(), ADPT_DESC(LayerNormGrad)}, - {string(kNameBatchMatMul), ADPT_DESC(BatchMatMul)}, - {string(kNameDropoutDoMask), ADPT_DESC(DropOutDoMask)}, - - {string(kNameNPUGetFloatStatus), ADPT_DESC(NPUGetFloatStatus)}, - {string(kNameNPUAllocFloatStatus), ADPT_DESC(NPUAllocFloatStatus)}, - {string(kNameNPUClearFloatStatus), ADPT_DESC(NPUClearFloatStatus)}, - - {string(kNameRandomChoiceWithMask), ADPT_DESC(RandomChoiceWithMask)}, - {prim::kPrimSoftmaxCrossEntropyWithLogits->name(), ADPT_DESC(SoftmaxCrossEntropyWithLogits)}, - - {prim::kPrimScalarSummary->name(), ADPT_DESC(Summary)}, - {prim::kPrimImageSummary->name(), ADPT_DESC(Summary)}, - {prim::kPrimTensorSummary->name(), ADPT_DESC(Summary)}, - {prim::kPrimHistogramSummary->name(), ADPT_DESC(Summary)}, - {prim::kPrimDebug->name(), ADPT_DESC(Summary)}, - {prim::kPrimTensorAdd->name(), - std::make_shared(std::make_shared>(ExtraAttr({{"mode", MakeValue(1)}})), - std::make_shared>(ExtraAttr({{"mode", MakeValue(1)}})))}, - {string(kNameBiasAdd), ADPT_DESC(BiasAdd)}, - {prim::kPrimRelu->name(), ADPT_DESC(Relu)}, - - {prim::kPrimMatMul->name(), ADPT_DESC(MatMulV2)}, - - {string(kNameConst), ADPT_DESC(Constant, Const)}, - {string(kNameSoftmax), ADPT_DESC(SoftmaxV2)}, - {string(kNameSoftmaxGrad), ADPT_DESC(SoftmaxGrad)}, - {string(kNameParam), ADPT_DESC(Data)}, - {string(kNameROIAlign), ADPT_DESC(ROIAlign)}, - {string(kNameROIAlignGrad), ADPT_DESC(ROIAlignGrad)}, - {string(kNameAbs), ADPT_DESC(Abs)}, - {string(kNameAbsGrad), ADPT_DESC(AbsGrad)}, - {string(kNameBinaryCrossEntropy), ADPT_DESC(BinaryCrossEntropy)}, - {string(kNameBinaryCrossEntropyGrad), ADPT_DESC(BinaryCrossEntropyGrad)}, - {string(kNameSparseApplyAdagrad), ADPT_DESC(SparseApplyAdagradD)}, - {string(kNameSparseApplyFtrlD), ADPT_DESC(SparseApplyFtrlD)}, - {string(kNameApplyProximalAdagrad), ADPT_DESC(ApplyProximalAdagradD)}, - {string(kNameAcosh), ADPT_DESC(Acosh)}, - {string(kNameAcoshGrad), ADPT_DESC(AcoshGrad)}, - {string(kNameFloorMod), ADPT_DESC(FloorMod)}, - {string(kNameSpaceToDepth), ADPT_DESC(SpaceToDepth)}, - {string(kNameDepthToSpace), ADPT_DESC(DepthToSpace)}, - {string(kNameSign), ADPT_DESC(Sign)}, - {string(kNameRound), ADPT_DESC(Round)}, - {string(kNameApplyFtrl), ADPT_DESC(ApplyFtrlD)}, - {string(kNameDiag), ADPT_DESC(Diag)}, - {string(kNameDiagPart), ADPT_DESC(DiagPart)}, - {string(kNameSpaceToBatch), ADPT_DESC(SpaceToBatchD)}, - {string(kNameBatchToSpace), ADPT_DESC(BatchToSpaceD)}, - {string(kNameAtan2), ADPT_DESC(Atan2)}, - {string(kNameApplyRMSProp), ADPT_DESC(ApplyRMSPropD)}, - {string(kNameApplyCenteredRMSProp), ADPT_DESC(ApplyCenteredRMSPropD)}, - {string(kNameBasicLSTMCell), ADPT_DESC(BasicLSTMCell)}, - {string(kNameBasicLSTMCellInputGrad), ADPT_DESC(BasicLSTMCellInputGrad)}, - {string(kNameBasicLSTMCellWeightGrad), ADPT_DESC(BasicLSTMCellWeightGrad)}, - {string(kNameBasicLSTMCellCStateGrad), ADPT_DESC(BasicLSTMCellCStateGrad)}, - {string(kNameL2Loss), ADPT_DESC(L2Loss)}, - {string(kNameCTCLoss), ADPT_DESC(CTCLoss)}, - {string(kNameRange), ADPT_DESC(RangeD)}, - {string(kNameSquareSumAll), ADPT_DESC(SquareSumAll)}, - {string(kNameAscendQuant), ADPT_DESC(AscendQuant)}, - {string(kNameAscendDequant), ADPT_DESC(AscendDequant)}, - {string(kNameReverseSequence), ADPT_DESC(ReverseSequence)}, - {string(kNameCase), ADPT_DESC(Case)}}; -#ifdef ENABLE_GE - adpt_map[string(kNamePrint)] = ADPT_DESC(Print); - adpt_map[string(kNameApplyAdam)] = ADPT_DESC(ApplyAdamD); -#endif - return adpt_map; -} +using Variable = ge::op::Variable; +using Constant = ge::op::Constant; +using Assign = ge::op::Assign; +using Data = ge::op::Data; // ---------------implement of DfGraphConvertor------------- PrimType GetCNodeFuncType(const CNodePtr cnode) { @@ -481,18 +92,18 @@ OpAdapterPtr DfGraphConvertor::FindAdapter(const AnfNodePtr node, bool train) { name = GetCNodeTargetFuncName(cnode); } - auto it_adpt = get_adpt_map().find(name); - if (it_adpt != get_adpt_map().end()) { + auto it_adpt = OpAdapterMap::get().find(name); + if (it_adpt != OpAdapterMap::get().end()) { return it_adpt->second->Get(train); } MS_LOG(EXCEPTION) << "Can't find OpAdapter for " << name; } if (node->isa()) { - return get_adpt_map()[kNameConst]->Get(train); + return OpAdapterMap::get()[kNameConst]->Get(train); } if (node->isa()) { - return get_adpt_map()[kNameParam]->Get(train); + return OpAdapterMap::get()[kNameParam]->Get(train); } return OpAdapterPtr(nullptr); } @@ -571,8 +182,8 @@ void DfGraphConvertor::InitLoopVar(std::vector *init_input) { } OpAdapterPtr DfGraphConvertor::FindAdapter(const std::string &name, bool train) { - auto it = get_adpt_map().find(name); - if (it != get_adpt_map().end()) { + auto it = OpAdapterMap::get().find(name); + if (it != OpAdapterMap::get().end()) { return it->second->Get(train); } MS_LOG(EXCEPTION) << "Can't find OpAdapter for " << name; @@ -2086,5 +1697,12 @@ void DfGraphConvertor::DrawCNode(const CNodePtr node, const OpAdapterPtr adpt) { compute_sout_ << "> shape=plaintext]" << endl; } +void DfGraphConvertor::RegisterAdapter(const std::string &name, OpAdapterPtr adpt) { + OpAdapterMap::get()[name] = std::make_shared(adpt); +} +void DfGraphConvertor::RegisterAdapter(const std::string &name, OpAdapterPtr train_adpt, OpAdapterPtr infer_adpt) { + OpAdapterMap::get()[name] = std::make_shared(train_adpt, infer_adpt); +} + } // namespace transform } // namespace mindspore diff --git a/mindspore/ccsrc/transform/graph_ir/convert.h b/mindspore/ccsrc/transform/graph_ir/convert.h index 24c9fbff671..24dceb3a50b 100644 --- a/mindspore/ccsrc/transform/graph_ir/convert.h +++ b/mindspore/ccsrc/transform/graph_ir/convert.h @@ -35,7 +35,7 @@ #include "ir/tensor.h" #include "transform/graph_ir/df_graph_manager.h" #include "utils/config_manager.h" -#include "transform/graph_ir/op_declare.h" +#include "transform/graph_ir/op_adapter.h" #include "graph/operator_reg.h" #ifdef OPEN_SOURCE #include "ge/client/ge_api.h" @@ -43,60 +43,12 @@ #include "external/ge/ge_api.h" #endif #include "graph/tensor.h" -#include "ops/all_ops.h" +#include "ops/hcom_ops.h" namespace mindspore { namespace transform { -class OpAdapterDesc { - public: - OpAdapterDesc() : train_(nullptr), infer_(nullptr) {} - - OpAdapterDesc(const OpAdapterPtr &train, const OpAdapterPtr &infer) : train_(train), infer_(infer) {} - - explicit OpAdapterDesc(const OpAdapterPtr &common) : train_(common), infer_(common) {} - - OpAdapterDesc(const OpAdapterDesc &desc) { - this->train_ = desc.train_; - this->infer_ = desc.infer_; - } - - OpAdapterDesc(OpAdapterDesc &&desc) { - this->train_ = desc.train_; - this->infer_ = desc.infer_; - desc.train_ = nullptr; - desc.infer_ = nullptr; - } - - ~OpAdapterDesc() = default; - - OpAdapterPtr Get(bool train) const { return train ? train_ : infer_; } - - OpAdapterDesc &operator=(const OpAdapterDesc &desc) { - if (this != &desc) { - this->train_ = desc.train_; - this->infer_ = desc.infer_; - } - return *this; - } - - OpAdapterDesc &operator=(OpAdapterDesc &&desc) { - if (this != &desc) { - this->train_ = desc.train_; - this->infer_ = desc.infer_; - desc.train_ = nullptr; - desc.infer_ = nullptr; - } - return *this; - } - - private: - OpAdapterPtr train_; - OpAdapterPtr infer_; -}; - -using OpAdapterDescPtr = std::shared_ptr; using TensorOrderMap = std::map>; - +using HcomBroadcast = ge::op::HcomBroadcast; class DfGraphConvertor { public: explicit DfGraphConvertor(const AnfGraphPtr &anf_graph) @@ -118,12 +70,8 @@ class DfGraphConvertor { ~DfGraphConvertor() {} - static void RegisterAdapter(const std::string &name, OpAdapterPtr adpt) { - get_adpt_map()[name] = std::make_shared(adpt); - } - static void RegisterAdapter(const std::string &name, OpAdapterPtr train_adpt, OpAdapterPtr infer_adpt) { - get_adpt_map()[name] = std::make_shared(train_adpt, infer_adpt); - } + static void RegisterAdapter(const std::string &name, OpAdapterPtr adpt); + static void RegisterAdapter(const std::string &name, OpAdapterPtr train_adpt, OpAdapterPtr infer_adpt); void DrawComputeGraph(const std::string &name) { std::ofstream fout(name); @@ -174,7 +122,6 @@ class DfGraphConvertor { static OpAdapterPtr FindAdapter(AnfNodePtr node, bool train = false); int ErrCode() const { return static_cast(error_); } - static std::unordered_map &get_adpt_map(); bool is_training() const { return training_; } void set_training(bool is_training) { training_ = is_training; } diff --git a/mindspore/ccsrc/transform/graph_ir/df_graph_manager.cc b/mindspore/ccsrc/transform/graph_ir/df_graph_manager.cc index 29985d6784c..ca8f0286692 100644 --- a/mindspore/ccsrc/transform/graph_ir/df_graph_manager.cc +++ b/mindspore/ccsrc/transform/graph_ir/df_graph_manager.cc @@ -16,15 +16,10 @@ #include "transform/graph_ir/df_graph_manager.h" -#include -#include -#include #include -#include "securec/include/securec.h" #include "pipeline/jit/parse/python_adapter.h" #include "pipeline/jit/pipeline.h" -#include "utils/config_manager.h" #ifndef NO_DLIB #include "tdt/tsd_client.h" #endif diff --git a/mindspore/ccsrc/transform/graph_ir/graph_builder.cc b/mindspore/ccsrc/transform/graph_ir/graph_builder.cc index 6ee45feef8c..d807d0f2fc6 100644 --- a/mindspore/ccsrc/transform/graph_ir/graph_builder.cc +++ b/mindspore/ccsrc/transform/graph_ir/graph_builder.cc @@ -17,7 +17,8 @@ #include "transform/graph_ir/graph_builder.h" #include -#include + +#include "ops/math_ops.h" namespace mindspore { namespace transform { diff --git a/mindspore/ccsrc/transform/graph_ir/graph_runner.cc b/mindspore/ccsrc/transform/graph_ir/graph_runner.cc index d20c49a3818..2d3eb028aa9 100644 --- a/mindspore/ccsrc/transform/graph_ir/graph_runner.cc +++ b/mindspore/ccsrc/transform/graph_ir/graph_runner.cc @@ -21,7 +21,6 @@ #include "utils/log_adapter.h" #include "utils/config_manager.h" #include "sys/time.h" -#include "utils/callbacks.h" #include "utils/utils.h" #include "./common.h" #ifdef ENABLE_GE diff --git a/mindspore/ccsrc/transform/graph_ir/op_adapter.cc b/mindspore/ccsrc/transform/graph_ir/op_adapter.cc new file mode 100644 index 00000000000..b72230e2c74 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_adapter.cc @@ -0,0 +1,664 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "transform/graph_ir/op_adapter.h" + +namespace mindspore { +namespace transform { +static uint32_t CustomInferFunc(const Operator &) { return 0; } + +bool OpAdapterImpl::IsCustomOp(const OperatorPtr &op) { + MS_EXCEPTION_IF_NULL(op); + auto it = cus_input_map_->find(op->GetOpType()); + if (it == cus_input_map_->end()) { + return false; + } + return true; +} + +Status OpAdapterImpl::GenerateCustomOpInputMap(const CusOperatorPtr &op, const PrimitivePtr &prim) { + MS_EXCEPTION_IF_NULL(op); + MS_EXCEPTION_IF_NULL(prim); + // Create the map of custom op from input index to input name. + std::unordered_map input_map; + auto value = prim->GetAttr("input_names"); + if (value == nullptr) { + (*cus_output_map_)[prim->name()] = input_map; + return NOT_FOUND; + } + + auto input_names = GetValue>(value); + for (size_t i = 0; i < input_names.size(); ++i) { + // input_map begin form 1 + input_map[i + 1] = input_names[i]; + op->CustomInputRegister(input_names[i]); + } + + if (cus_input_map_->find(prim->name()) == cus_input_map_->end()) { + (*cus_input_map_)[prim->name()] = input_map; + } + return SUCCESS; +} + +Status OpAdapterImpl::GenerateCustomOpOutputMap(const CusOperatorPtr &op, const PrimitivePtr &prim) { + MS_EXCEPTION_IF_NULL(op); + MS_EXCEPTION_IF_NULL(prim); + // Create the map of custom op from output index to output name. + std::unordered_map output_map; + auto value = prim->GetAttr("output_names"); + if (value == nullptr) { + // generate a empty output_map for it + (*cus_output_map_)[prim->name()] = output_map; + return NOT_FOUND; + } + + auto output_names = GetValue>(value); + for (size_t i = 0; i < output_names.size(); ++i) { + // output_map begin form 0 + output_map[i] = output_names[i]; + op->CustomOutputRegister(output_names[i]); + } + + if (cus_output_map_->find(prim->name()) == cus_output_map_->end()) { + (*cus_output_map_)[prim->name()] = output_map; + } + return SUCCESS; +} + +OperatorPtr OpAdapterImpl::GenerateCustomOp(const AnfNodePtr anf) { + MS_EXCEPTION_IF_NULL(anf); + auto node = anf->cast(); + if (node == nullptr) { + return nullptr; + } + + if (node->inputs().empty()) { + MS_LOG(EXCEPTION) << "length of node inputs is empty"; + } + + auto prim = GetValueNode(node->inputs()[0]); + MS_EXCEPTION_IF_NULL(prim); + auto op = std::make_shared(node->fullname_with_scope(), prim->name()); + if (GenerateCustomOpInputMap(op, prim) != SUCCESS) { + MS_LOG(WARNING) << "Custom op node has no input_names, op[" << prim->name() << "]."; + } + + if (GenerateCustomOpOutputMap(op, prim) != SUCCESS) { + MS_LOG(WARNING) << "Custom op node has no output_names, op[" << prim->name() << "]."; + } + + op->CustomInferFuncRegister(CustomInferFunc); + + return op; +} + +Status OpAdapterImpl::SetOpSubgraphFunc(const OperatorPtr &op, int index, + const std::shared_ptr> &branches) { + MS_EXCEPTION_IF_NULL(op); + auto it = dyn_subgraph_map_.find(index); + if (it != dyn_subgraph_map_.end()) { + auto size = branches->size(); + it->second.create_dyn_subgraph(op, static_cast(size)); + for (size_t i = 0; i < size; i++) { + it->second.set_subgraph(op, static_cast(i), std::make_shared((*branches)[i])); + } + return SUCCESS; + } + return NOT_FOUND; +} + +Status OpAdapterImpl::SetCustomOpInput(const CusOperatorPtr &op, int index, const OperatorPtr &input) { + MS_EXCEPTION_IF_NULL(op); + MS_EXCEPTION_IF_NULL(input); + auto it = cus_input_map_->find(op->GetOpType()); + if (it == cus_input_map_->end()) { + return NOT_FOUND; + } + std::unordered_map &input_map = it->second; + + if ((input_map.find(index) != input_map.end())) { + MS_LOG(DEBUG) << "Link op " << input->GetName() << " to " << op->GetName() << ":" << input_map[index]; + (void)op->SetInput(input_map[index], *input); + return SUCCESS; + } + return NOT_FOUND; +} + +Status OpAdapterImpl::SetNormalOpInput(const OperatorPtr &op, int index, const OperatorPtr &input) { + MS_EXCEPTION_IF_NULL(op); + auto it = input_map_.find(index); + if (it != input_map_.end()) { + MS_EXCEPTION_IF_NULL(input); + MS_LOG(DEBUG) << "Link op " << input->GetName() << " to " << op->GetName() << ":" << it->second.name; + it->second.set_op(op, input); + return SUCCESS; + } + return NOT_FOUND; +} + +int OpAdapterImpl::setInput(const OperatorPtr &op, int index, const OperatorPtr &input) { + if (IsCustomOp(op)) { + auto cus_op = std::dynamic_pointer_cast(op); + return static_cast(SetCustomOpInput(cus_op, index, input)); + } else { + return static_cast(SetNormalOpInput(op, index, input)); + } +} + +Status OpAdapterImpl::SetCustomOpInput(const CusOperatorPtr &op, int index, const OutHandler &handle) { + MS_EXCEPTION_IF_NULL(op); + auto it = cus_input_map_->find(op->GetOpType()); + if (it == cus_input_map_->end()) { + return NOT_FOUND; + } + + std::unordered_map &input_map = it->second; + if ((handle.op != nullptr) && (input_map.find(index) != input_map.end())) { + if (handle.out.empty()) { + MS_LOG(DEBUG) << "Link op " << handle.op->GetName() << " to " << op->GetName() << ":" << input_map[index]; + (void)op->SetInput(input_map[index], *(handle.op)); + } else { + MS_LOG(DEBUG) << "Link op " << handle.op->GetName() << ":" << handle.out << " to " << op->GetName() << ":" + << input_map[index]; + (void)op->SetInput(input_map[index], *(handle.op), handle.out); + } + return SUCCESS; + } + return NOT_FOUND; +} + +Status OpAdapterImpl::SetNormalOpInput(const OperatorPtr &op, int index, const OutHandler &handle) { + MS_EXCEPTION_IF_NULL(op); + auto it = input_map_.find(index); + if ((handle.op != nullptr) && (it != input_map_.end())) { + if (handle.out.empty()) { + MS_LOG(DEBUG) << "Link op " << handle.op->GetName() << " to " << op->GetName() << ":" << it->second.name; + it->second.set_op(op, handle.op); + } else { + MS_LOG(DEBUG) << "Link op " << handle.op->GetName() << ":" << handle.out << " to " << op->GetName() << ":" + << it->second.name; + it->second.set_handle(op, handle); + } + return SUCCESS; + } + return NOT_FOUND; +} + +int OpAdapterImpl::setInput(const OperatorPtr &op, int index, const OutHandler &handle) { + if (IsCustomOp(op)) { + auto cus_op = std::dynamic_pointer_cast(op); + return static_cast(SetCustomOpInput(cus_op, index, handle)); + } else { + return static_cast(SetNormalOpInput(op, index, handle)); + } +} + +int OpAdapterImpl::setInput(const OperatorPtr &op, int index, + const std::shared_ptr> &handler_vec) { + MS_EXCEPTION_IF_NULL(handler_vec); + if (IsCustomOp(op)) { + MS_LOG(ERROR) << "Custom Op do not support dynamic input"; + return static_cast(FAILED); + } + MS_EXCEPTION_IF_NULL(op); + auto it = dyn_input_map_.find(index); + if (it != dyn_input_map_.end()) { + it->second.create_dyn_input(op, static_cast(handler_vec->size())); + for (unsigned int i = 0; i < handler_vec->size(); ++i) { + OutHandler h = (*handler_vec)[i]; + MS_EXCEPTION_IF_NULL(h.op); + if (h.out.empty()) { + MS_LOG(DEBUG) << "Link op " << h.op->GetName() << " to " << op->GetName() << ":" << it->second.name; + it->second.set_op(op, (i) /* index start from 0 */, h.op); + } else { + MS_LOG(DEBUG) << "Link op " << h.op->GetName() << ":" << h.out << " to " << op->GetName() << ":" + << it->second.name; + it->second.set_handle(op, i, h); + } + } + return 0; + } + return static_cast(NOT_FOUND); +} + +OutHandler OpAdapterImpl::getOutput(const OperatorPtr &op, int index) { + MS_EXCEPTION_IF_NULL(op); + if (IsCustomOp(op)) { + return getCustomOutput(op, index); + } + return getNormalOutput(op, index); +} + +OutHandler OpAdapterImpl::getCustomOutput(const OperatorPtr &op, int index) { + MS_EXCEPTION_IF_NULL(op); + auto it = cus_output_map_->find(op->GetOpType()); + if (it == cus_output_map_->end()) { + MS_LOG(ERROR) << "OpAdpator(" << op->GetName() << ") has both OUTPUT is not supported!"; + return OutHandler(); + } + + std::unordered_map &output_map = it->second; + + if ((output_map.find(index) != output_map.end())) { + return OutHandler(op, output_map[index]); + } + MS_LOG(ERROR) << "OpAdpator(" << op->GetName() << ") has no OUTPUT index(" << index << ")!"; + return OutHandler(); +} + +OutHandler OpAdapterImpl::getNormalOutput(const OperatorPtr &op, int index) { + MS_EXCEPTION_IF_NULL(op); + if (!dyn_output_map_.empty() && !output_map_.empty()) { + MS_LOG(ERROR) << "OpAdpator(" << op->GetName() << ") has both OUTPUT and DYN_OUTPUT is not supported!"; + return OutHandler(); + } + auto it = output_map_.find(index); + if (it != output_map_.end()) { + return OutHandler(op, it->second.name); + } else if (!dyn_output_map_.empty()) { + return OutHandler(op, dyn_output_map_.begin()->second.name + std::to_string(index)); + } else { + MS_LOG(ERROR) << "OpAdpator(" << op->GetName() << ") has no OUTPUT and DYN_OUTPUT index(" << index << ")!"; + return OutHandler(); + } +} + +Status OpAdapterImpl::UpdateSingleOutputDesc(const OperatorPtr &op, const abstract::BaseShapePtr &shp, + const TypePtr &type) { + MS_EXCEPTION_IF_NULL(type); + std::string format = "NCHW"; + if (op->GetOpType() == kExtractImagePatchesOpName) { + format = "NHWC"; + } + + auto desc = CreateOutputDesc(dyn_cast(shp), type, format); + if (desc == nullptr) { + MS_LOG(ERROR) << "Update output descriptor failed!"; + return FAILED; + } + + if (IsCustomOp(op)) { + if (cus_output_map_->find(op->GetOpType()) == cus_output_map_->end() || + ((*cus_output_map_)[op->GetOpType()].empty())) { + MS_LOG(ERROR) << "This op does not create custom output map"; + return FAILED; + } + auto cus_op = std::dynamic_pointer_cast(op); + MS_EXCEPTION_IF_NULL(cus_op); + std::unordered_map output_map = (*cus_output_map_)[op->GetOpType()]; + (void)cus_op->UpdateOutputDesc(output_map[0], *desc); + } else { + if (output_map_.empty()) { + MS_LOG(INFO) << "This op does not have output map"; + return FAILED; + } + output_map_.begin()->second.update_out_desc(op, *desc); + } + return SUCCESS; +} + +size_t OpAdapterImpl::GetCustomOpOutputSize(const CusOperatorPtr &cus_op) { + MS_EXCEPTION_IF_NULL(cus_op); + if (cus_output_map_->find(cus_op->GetOpType()) == cus_output_map_->end()) { + MS_LOG(ERROR) << "This op does not create custom output map"; + return 0; + } + size_t output_size = (*cus_output_map_)[cus_op->GetOpType()].size(); + return output_size; +} + +std::shared_ptr OpAdapterImpl::CreateOutputDesc(const abstract::ShapePtr &shape_ptr, const TypePtr &type, + const std::string &format) { + if (shape_ptr == nullptr) { + MS_LOG(ERROR) << "Shape ptr is nullptr"; + return nullptr; + } + + if (type == nullptr) { + MS_LOG(ERROR) << "Type ptr is nullptr"; + return nullptr; + } + + TypeId me_type = type->type_id(); + if (kObjectTypeTensorType == me_type) { + me_type = dyn_cast(type)->element()->type_id(); + } + auto desc = TransformUtil::GetGeTensorDesc(shape_ptr->shape(), me_type, format); + return desc; +} + +Status OpAdapterImpl::UpdateMultiOutputDesc(const OperatorPtr &op, const abstract::BaseShapePtr &shp, + const TypePtr &type) { + auto tuple_shp = dyn_cast(shp); + MS_EXCEPTION_IF_NULL(tuple_shp); + + size_t output_size = 0; + bool is_custom_op = IsCustomOp(op); + if (is_custom_op) { + output_size = GetCustomOpOutputSize(std::dynamic_pointer_cast(op)); + } else { + output_size = output_map_.size(); + } + + if (output_size == 0) { + MS_LOG(INFO) << "This op does not have output map"; + return FAILED; + } + + if (output_size != tuple_shp->shape().size()) { + MS_LOG(ERROR) << "output_map is not equal tuple_shape size"; + return FAILED; + } + std::string format = "NCHW"; + if (op->GetOpType() == kTopKOpName) { + format = "NHWC"; + } + for (size_t i = 0; i < tuple_shp->shape().size(); ++i) { + auto tuple_type = dyn_cast(type); + MS_EXCEPTION_IF_NULL(tuple_type); + TypePtr type_elem = tuple_type->elements()[i]; + + auto desc = CreateOutputDesc(dyn_cast(tuple_shp->shape()[i]), type_elem, format); + if (desc == nullptr) { + MS_LOG(ERROR) << "Create output descriptor failed!"; + return FAILED; + } + + if (is_custom_op) { + (void)std::dynamic_pointer_cast(op)->UpdateOutputDesc((*cus_output_map_)[op->GetOpType()][i], + *desc); + } else { + auto it = output_map_.find(i); + if (it != output_map_.end()) { + it->second.update_out_desc(op, *desc); + } + } + } + return SUCCESS; +} + +std::shared_ptr OpAdapterImpl::CreateNodeDesc(const AnfNodePtr &node) { + MS_EXCEPTION_IF_NULL(node); + TypeId me_type = node->Type()->type_id(); + if (kObjectTypeTensorType == me_type) { + me_type = dyn_cast(node->Type())->element()->type_id(); + } + if (me_type <= kNumberTypeBegin || me_type >= kNumberTypeEnd) { + return nullptr; + } + + std::vector shape; + auto shape_ptr = dyn_cast(node->Shape()); + if (nullptr != shape_ptr) { + shape = shape_ptr->shape(); + } + + auto desc = TransformUtil::GetGeTensorDesc(shape, me_type, "NCHW"); + if (desc == nullptr) { + MS_LOG(ERROR) << "Update output descriptor failed!"; + return nullptr; + } + return desc; +} + +void OpAdapterImpl::UpdateNormalOpInputDesc(const OperatorPtr &op, const AnfNodePtr &node) { + if (op == nullptr) { + MS_LOG(ERROR) << "op is nullptr"; + return; + } + MS_EXCEPTION_IF_NULL(node); + + auto inputs = node->cast()->inputs(); + for (size_t i = 1; i < inputs.size(); ++i) { + auto it = input_map_.find(i); + if (it != input_map_.end()) { + auto desc = CreateNodeDesc(inputs[i]); + if (desc == nullptr) { + continue; + } + if (op->GetOpType() == kExtractImagePatchesOpName) { + desc->SetFormat(ge::Format::FORMAT_NHWC); + } + it->second.update_input_desc(op, *desc); + } + } +} + +void OpAdapterImpl::UpdateCustomOpInputDesc(const CusOperatorPtr &op, const AnfNodePtr &node) { + if (op == nullptr) { + MS_LOG(ERROR) << "op is nullptr"; + return; + } + MS_EXCEPTION_IF_NULL(node); + + if (cus_input_map_->find(op->GetOpType()) == cus_input_map_->end() || ((*cus_input_map_)[op->GetOpType()].empty())) { + MS_LOG(ERROR) << "This op does not create custom input map"; + return; + } + + std::unordered_map &input_map = (*cus_input_map_)[op->GetOpType()]; + auto inputs = node->cast()->inputs(); + for (size_t i = 1; i < inputs.size(); ++i) { + if (input_map.find(i) != input_map.end()) { + auto desc = CreateNodeDesc(inputs[i]); + if (desc == nullptr) { + continue; + } + (void)op->UpdateInputDesc(input_map[i], *desc); + } + } +} + +void OpAdapterImpl::updateInputDesc(const OperatorPtr &op, const AnfNodePtr &node) { + MS_EXCEPTION_IF_NULL(op); + MS_EXCEPTION_IF_NULL(node); + if (IsCustomOp(op)) { + auto cus_op = std::dynamic_pointer_cast(op); + UpdateCustomOpInputDesc(cus_op, node); + } else { + UpdateNormalOpInputDesc(op, node); + } +} + +void OpAdapterImpl::updateOutputDesc(const OperatorPtr &op, const abstract::BaseShapePtr &shp, const TypePtr &type, + const AnfNodePtr &node) { + if (op == nullptr) { + MS_LOG(ERROR) << "op is nullptr"; + return; + } + MS_EXCEPTION_IF_NULL(node); + MS_LOG(INFO) << "Op name is " << op->GetName(); + + auto normal_shape_ptr = dyn_cast(shp); + auto no_shape_ptr = dyn_cast(shp); + + if ((nullptr != normal_shape_ptr) || (nullptr != no_shape_ptr)) { + if (UpdateSingleOutputDesc(op, shp, type) != SUCCESS) { + return; + } + } else if (nullptr != dyn_cast(shp)) { + if (UpdateMultiOutputDesc(op, shp, type) != SUCCESS) { + return; + } + } else { + MS_LOG(WARNING) << "Update output desc failed, unknown output shape type"; + return; + } + MS_EXCEPTION_IF_NULL(node); + if (!node->isa()) { + return; + } + + // Need to update input_desc while the output_desc is updated + updateInputDesc(op, node); +} + +int OpAdapterImpl::setAttr(const OperatorPtr &op, const std::string &attr_key, const ValuePtr &attr_value) { + auto it = attr_map_.find(attr_key); + if (it != attr_map_.end()) { + // switch case for each avalilable attribute type + MS_LOG(INFO) << "Set attr: " << attr_key << "(" << it->second.name << "), value: " << attr_value->ToString(); + adpt_->AddAttrToDrawGraph(attr_key + std::string("=") + attr_value->ToString()); + it->second.set_attr(op, attr_value); + return 0; + } + return static_cast(NOT_FOUND); +} + +int OpAdapterImpl::SetCustomOpAttr(const CusOperatorPtr &op, const PrimitivePtr &prim) { + enum ValueType { + SINGLE_VALUE = 0, + SEQUEUE_VALUE, + UNKNOWN_VALUE, + }; + + MS_EXCEPTION_IF_NULL(prim); + MS_EXCEPTION_IF_NULL(op); + + ValueType value_type = SINGLE_VALUE; + for (auto item : prim->attrs()) { + if (item.second->isa()) { + (void)op->SetAttr(item.first, GetValue(item.second)); + } else if (item.second->isa()) { + (void)op->SetAttr(item.first, GetValue(item.second)); + } else if (item.second->isa()) { + (void)op->SetAttr(item.first, GetValue(item.second)); + } else if (item.second->isa()) { + (void)op->SetAttr(item.first, GetValue(item.second)); + } else if (item.second->isa()) { + value_type = SEQUEUE_VALUE; + auto val_seq = item.second->cast(); + if ((*val_seq)[0]->isa()) { + (void)op->SetAttr(item.first, GetValue>(item.second)); + } else if ((*val_seq)[0]->isa()) { + (void)op->SetAttr(item.first, GetValue>(item.second)); + } else if ((*val_seq)[0]->isa()) { + (void)op->SetAttr(item.first, GetValue>(item.second)); + } else if ((*val_seq)[0]->isa()) { + (void)op->SetAttr(item.first, GetValue>(item.second)); + } else { + MS_LOG(EXCEPTION) << "Unsupported custom attribute type in adaptor, prim name: " << prim->name() + << ", attr name: " << item.first << ", value: " << item.second->ToString(); + } + } else { + MS_LOG(WARNING) << "Unsupported custom attribute type in adaptor, prim name: " << prim->name() + << ", attr name: " << item.first << ", value: " << item.second->ToString(); + return static_cast(NOT_FOUND); + } + + if (value_type == SINGLE_VALUE) { + adpt_->AddAttrToDrawGraph(item.first + std::string("=") + item.second->ToString()); + } else if (value_type == SEQUEUE_VALUE) { + adpt_->AddAttrToDrawGraph(item.first + std::string("=") + "[...]"); + } + } + return 0; +} + +int OpAdapterImpl::SetNormalOpAttr(const OperatorPtr &op, const PrimitivePtr &prim) { + MS_EXCEPTION_IF_NULL(prim); + MS_EXCEPTION_IF_NULL(op); + for (auto &it : attr_map_) { + auto value = prim->GetAttr(it.first); + if (value != nullptr) { + // set attr from primitive + int ret = setAttr(op, it.first, value); + if (ret) { + return ret; + } + } else { + // set attr from extra_attr + auto it_extra = extra_attr_->find(it.first); + if (it_extra != extra_attr_->end()) { + int ret = setAttr(op, it.first, it_extra->second); + if (ret) { + return ret; + } + } + } + } + return 0; +} + +int OpAdapterImpl::setAttr(const OperatorPtr &op, const PrimitivePtr &prim) { + int ret = 0; + if (IsCustomPrim(prim)) { + auto cus_op = std::dynamic_pointer_cast(op); + ret = SetCustomOpAttr(cus_op, prim); + } else { + ret = SetNormalOpAttr(op, prim); + } + return ret; +} + +int OpAdapterImpl::setAttr(const OperatorPtr &op, const AnfNodePtr &node) { + // no attribute for lonely node + MS_EXCEPTION_IF_NULL(node); + if (!node->isa()) { + return 0; + } + + auto cnode = node->cast(); + if (cnode == nullptr) { + return 0; + } + + auto &inputs = cnode->inputs(); + if (inputs.empty()) { + return 0; + } + + // get Attr T from abstract of anfnode first, + // if attr "T" appears in primitive, the primitive T will cover this one + if (attr_map_.find("T") != attr_map_.end()) { + // get dtype from inputs[1], if the node has no inputs, set the attr T with output dtype + TypePtr type; + if (inputs.size() > 1) { + type = inputs[1]->Type(); + } else { + type = node->Type(); + } + if (type != nullptr) { + (void)setAttr(op, "T", MakeValue(type)); + } + } + + // set attr from primitive and ExtraAttr + if (IsValueNode(inputs[0])) { + // set attr from primitive + PrimitivePtr prim = GetValueNode(inputs[0]); + int ret = setAttr(op, prim); + if (ret != 0) { + return ret; + } + } + + // set attr from const input + for (auto &it : input_attr_map_) { + if (inputs.size() <= it.first || !inputs[it.first]->isa()) { + continue; + } + auto const_value = GetValueNode(inputs[it.first]); + MS_LOG(INFO) << "Set attr: input_" << it.first << "(" << it.second.name << "), value: " << const_value->ToString(); + if (const_value->isa()) { + continue; + } + adpt_->AddAttrToDrawGraph(it.second.name + std::string("=") + const_value->ToString()); + it.second.set_attr(op, const_value); + } + return 0; +} +} // namespace transform +} // namespace mindspore diff --git a/mindspore/ccsrc/transform/graph_ir/op_adapter.h b/mindspore/ccsrc/transform/graph_ir/op_adapter.h index 52dac483ed0..b02fe1886c3 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_adapter.h +++ b/mindspore/ccsrc/transform/graph_ir/op_adapter.h @@ -26,101 +26,119 @@ #include "utils/utils.h" namespace mindspore { namespace transform { -static uint32_t CustomInferFunc(const Operator &) { return 0; } +class OpAdapterImpl { + public: + OpAdapterImpl(const std::unordered_map &input_map, + const std::unordered_map &dyn_input_map, + const std::unordered_map &output_map, + const std::unordered_map &dyn_output_map, + const std::unordered_map &dyn_subgraph_map, + const std::unordered_map &attr_map, + const std::unordered_map &enum_map, + const std::unordered_map &input_attr_map, + std::unordered_map> *cus_input_map, + std::unordered_map> *cus_output_map, + std::unordered_map *extra_attr, + std::unordered_map *name_counts, BaseOpAdapter *adpt) + : input_map_(input_map), + dyn_input_map_(dyn_input_map), + output_map_(output_map), + dyn_output_map_(dyn_output_map), + dyn_subgraph_map_(dyn_subgraph_map), + attr_map_(attr_map), + enum_map_(enum_map), + input_attr_map_(input_attr_map), + cus_input_map_(cus_input_map), + cus_output_map_(cus_output_map), + extra_attr_(extra_attr), + name_counts_(name_counts), + adpt_(adpt) { + MS_EXCEPTION_IF_NULL(cus_input_map_); + MS_EXCEPTION_IF_NULL(cus_output_map_); + MS_EXCEPTION_IF_NULL(extra_attr_); + MS_EXCEPTION_IF_NULL(name_counts_); + MS_EXCEPTION_IF_NULL(adpt_); + } + ~OpAdapterImpl() {} + bool IsCustomOp(const OperatorPtr &op); + Status GenerateCustomOpInputMap(const CusOperatorPtr &op, const PrimitivePtr &prim); + Status GenerateCustomOpOutputMap(const CusOperatorPtr &op, const PrimitivePtr &prim); + OperatorPtr GenerateCustomOp(const AnfNodePtr anf); + Status SetOpSubgraphFunc(const OperatorPtr &op, int index, const std::shared_ptr> &branches); + Status SetCustomOpInput(const CusOperatorPtr &op, int index, const OperatorPtr &input); + Status SetNormalOpInput(const OperatorPtr &op, int index, const OperatorPtr &input); + int setInput(const OperatorPtr &op, int index, const OperatorPtr &input); + Status SetCustomOpInput(const CusOperatorPtr &op, int index, const OutHandler &handle); + Status SetNormalOpInput(const OperatorPtr &op, int index, const OutHandler &handle); + int setInput(const OperatorPtr &op, int index, const OutHandler &handle); + int setInput(const OperatorPtr &op, int index, const std::shared_ptr> &handler_vec); + OutHandler getOutput(const OperatorPtr &op, int index); + OutHandler getCustomOutput(const OperatorPtr &op, int index); + OutHandler getNormalOutput(const OperatorPtr &op, int index); + Status UpdateSingleOutputDesc(const OperatorPtr &op, const abstract::BaseShapePtr &shp, const TypePtr &type); + size_t GetCustomOpOutputSize(const CusOperatorPtr &cus_op); + std::shared_ptr CreateOutputDesc(const abstract::ShapePtr &shape_ptr, const TypePtr &type, + const std::string &format); + Status UpdateMultiOutputDesc(const OperatorPtr &op, const abstract::BaseShapePtr &shp, const TypePtr &type); + std::shared_ptr CreateNodeDesc(const AnfNodePtr &node); + void UpdateNormalOpInputDesc(const OperatorPtr &op, const AnfNodePtr &node); + void UpdateCustomOpInputDesc(const CusOperatorPtr &op, const AnfNodePtr &node); + void updateInputDesc(const OperatorPtr &op, const AnfNodePtr &node); + void updateOutputDesc(const OperatorPtr &op, const abstract::BaseShapePtr &shp, const TypePtr &type, + const AnfNodePtr &node); + int setAttr(const OperatorPtr &op, const std::string &attr_key, const ValuePtr &attr_value); + int SetCustomOpAttr(const CusOperatorPtr &op, const PrimitivePtr &prim); + int SetNormalOpAttr(const OperatorPtr &op, const PrimitivePtr &prim); + int setAttr(const OperatorPtr &op, const PrimitivePtr &prim); + int setAttr(const OperatorPtr &op, const AnfNodePtr &node); + + private: + const std::unordered_map &input_map_; + const std::unordered_map &dyn_input_map_; + const std::unordered_map &output_map_; + const std::unordered_map &dyn_output_map_; + const std::unordered_map &dyn_subgraph_map_; + const std::unordered_map &attr_map_; + const std::unordered_map &enum_map_; + const std::unordered_map &input_attr_map_; + std::unordered_map> *const cus_input_map_; + std::unordered_map> *const cus_output_map_; + std::unordered_map *const extra_attr_; + std::unordered_map *const name_counts_; + BaseOpAdapter *const adpt_; +}; template class OpAdapter : public BaseOpAdapter { public: using OpType = T; - OpAdapter() {} - explicit OpAdapter(const ExtraAttr &extra_attr) : extra_attr_(extra_attr) {} + OpAdapter() + : impl_(std::make_shared(input_map_, dyn_input_map_, output_map_, dyn_output_map_, + dyn_subgraph_map_, attr_map_, enum_map_, input_attr_map_, &cus_input_map_, + &cus_output_map_, &extra_attr_, &name_counts_, this)) { + MS_EXCEPTION_IF_NULL(impl_); + } + explicit OpAdapter(const ExtraAttr &extra_attr) + : extra_attr_(extra_attr), + impl_(std::make_shared(input_map_, dyn_input_map_, output_map_, dyn_output_map_, + dyn_subgraph_map_, attr_map_, enum_map_, input_attr_map_, &cus_input_map_, + &cus_output_map_, &extra_attr_, &name_counts_, this)) { + MS_EXCEPTION_IF_NULL(impl_); + } ~OpAdapter() override {} - bool IsCustomOp(const OperatorPtr &op) { - MS_EXCEPTION_IF_NULL(op); - auto it = cus_input_map_.find(op->GetOpType()); - if (it == cus_input_map_.end()) { - return false; - } - return true; - } + bool IsCustomOp(const OperatorPtr &op) { return impl_->IsCustomOp(op); } Status GenerateCustomOpInputMap(const CusOperatorPtr &op, const PrimitivePtr &prim) { - MS_EXCEPTION_IF_NULL(op); - MS_EXCEPTION_IF_NULL(prim); - // Create the map of custom op from input index to input name. - std::unordered_map input_map; - auto value = prim->GetAttr("input_names"); - if (value == nullptr) { - cus_output_map_[prim->name()] = input_map; - return NOT_FOUND; - } - - auto input_names = GetValue>(value); - for (size_t i = 0; i < input_names.size(); ++i) { - // input_map begin form 1 - input_map[i + 1] = input_names[i]; - op->CustomInputRegister(input_names[i]); - } - - if (cus_input_map_.find(prim->name()) == cus_input_map_.end()) { - cus_input_map_[prim->name()] = input_map; - } - return SUCCESS; + return impl_->GenerateCustomOpInputMap(op, prim); } Status GenerateCustomOpOutputMap(const CusOperatorPtr &op, const PrimitivePtr &prim) { - MS_EXCEPTION_IF_NULL(op); - MS_EXCEPTION_IF_NULL(prim); - // Create the map of custom op from output index to output name. - std::unordered_map output_map; - auto value = prim->GetAttr("output_names"); - if (value == nullptr) { - // generate a empty output_map for it - cus_output_map_[prim->name()] = output_map; - return NOT_FOUND; - } - - auto output_names = GetValue>(value); - for (size_t i = 0; i < output_names.size(); ++i) { - // output_map begin form 0 - output_map[i] = output_names[i]; - op->CustomOutputRegister(output_names[i]); - } - - if (cus_output_map_.find(prim->name()) == cus_output_map_.end()) { - cus_output_map_[prim->name()] = output_map; - } - return SUCCESS; + return impl_->GenerateCustomOpOutputMap(op, prim); } // Convert ME UserCustom AnfNode to GE CustomOp. And set it's attrs. - OperatorPtr GenerateCustomOp(const AnfNodePtr anf) { - MS_EXCEPTION_IF_NULL(anf); - auto node = anf->cast(); - if (node == nullptr) { - return nullptr; - } - - if (node->inputs().empty()) { - MS_LOG(EXCEPTION) << "length of node inputs is empty"; - } - - auto prim = GetValueNode(node->inputs()[0]); - MS_EXCEPTION_IF_NULL(prim); - auto op = std::make_shared(node->fullname_with_scope(), prim->name()); - if (GenerateCustomOpInputMap(op, prim) != SUCCESS) { - MS_LOG(WARNING) << "Custom op node has no input_names, op[" << prim->name() << "]."; - } - - if (GenerateCustomOpOutputMap(op, prim) != SUCCESS) { - MS_LOG(WARNING) << "Custom op node has no output_names, op[" << prim->name() << "]."; - } - - op->CustomInferFuncRegister(CustomInferFunc); - - return op; - } + OperatorPtr GenerateCustomOp(const AnfNodePtr anf) { return impl_->GenerateCustomOp(anf); } OperatorPtr GenerateNormalOp(const AnfNodePtr &anf) { OperatorPtr op = nullptr; @@ -128,10 +146,10 @@ class OpAdapter : public BaseOpAdapter { // GE will generate unique name automatically if (anf != nullptr && anf->fullname_with_scope() != "") { MS_LOG(DEBUG) << anf->fullname_with_scope(); - op = std::make_shared(anf->fullname_with_scope()); + op = std::make_shared(anf->fullname_with_scope()); } else { MS_LOG(DEBUG) << "no fullname_with_scope"; - op = std::make_shared(); + op = std::make_shared(); } // set dynamic output num if op use DYNAMIC_OUTPUT @@ -158,7 +176,7 @@ class OpAdapter : public BaseOpAdapter { return op; } - OperatorPtr generate(const std::string &op_name) override { return std::make_shared(op_name); } + OperatorPtr generate(const std::string &op_name) override { return std::make_shared(op_name); } const std::unordered_map &getInputMap() override { return input_map_; } const std::unordered_map &getInputAttrMap() override { return input_attr_map_; } @@ -167,17 +185,7 @@ class OpAdapter : public BaseOpAdapter { const std::unordered_map &getDynSubgraphMap() override { return dyn_subgraph_map_; } Status SetOpSubgraphFunc(const OperatorPtr &op, int index, std::shared_ptr> branches) { - MS_EXCEPTION_IF_NULL(op); - auto it = dyn_subgraph_map_.find(index); - if (it != dyn_subgraph_map_.end()) { - auto size = branches->size(); - it->second.create_dyn_subgraph(op, static_cast(size)); - for (size_t i = 0; i < size; i++) { - it->second.set_subgraph(op, static_cast(i), std::make_shared((*branches)[i])); - } - return SUCCESS; - } - return NOT_FOUND; + return impl_->SetOpSubgraphFunc(op, index, branches); } int setSubgraph(const OperatorPtr &op, int index, std::shared_ptr> branches) override { @@ -185,546 +193,82 @@ class OpAdapter : public BaseOpAdapter { } Status SetCustomOpInput(const CusOperatorPtr &op, int index, const OperatorPtr &input) { - MS_EXCEPTION_IF_NULL(op); - MS_EXCEPTION_IF_NULL(input); - auto it = cus_input_map_.find(op->GetOpType()); - if (it == cus_input_map_.end()) { - return NOT_FOUND; - } - std::unordered_map &input_map = it->second; - - if ((input_map.find(index) != input_map.end())) { - MS_LOG(DEBUG) << "Link op " << input->GetName() << " to " << op->GetName() << ":" << input_map[index]; - (void)op->SetInput(input_map[index], *input); - return SUCCESS; - } - return NOT_FOUND; + return impl_->SetCustomOpInput(op, index, input); } Status SetNormalOpInput(const OperatorPtr &op, int index, const OperatorPtr &input) { - MS_EXCEPTION_IF_NULL(op); - auto it = input_map_.find(index); - if (it != input_map_.end()) { - MS_EXCEPTION_IF_NULL(input); - MS_LOG(DEBUG) << "Link op " << input->GetName() << " to " << op->GetName() << ":" << it->second.name; - it->second.set_op(op, input); - return SUCCESS; - } - return NOT_FOUND; + return impl_->SetNormalOpInput(op, index, input); } int setInput(const OperatorPtr &op, int index, const OperatorPtr &input) override { - if (IsCustomOp(op)) { - auto cus_op = std::dynamic_pointer_cast(op); - return static_cast(SetCustomOpInput(cus_op, index, input)); - } else { - return static_cast(SetNormalOpInput(op, index, input)); - } + return impl_->setInput(op, index, input); } Status SetCustomOpInput(const CusOperatorPtr &op, int index, const OutHandler &handle) { - MS_EXCEPTION_IF_NULL(op); - auto it = cus_input_map_.find(op->GetOpType()); - if (it == cus_input_map_.end()) { - return NOT_FOUND; - } - - std::unordered_map &input_map = it->second; - if ((handle.op != nullptr) && (input_map.find(index) != input_map.end())) { - if (handle.out.empty()) { - MS_LOG(DEBUG) << "Link op " << handle.op->GetName() << " to " << op->GetName() << ":" << input_map[index]; - (void)op->SetInput(input_map[index], *(handle.op)); - } else { - MS_LOG(DEBUG) << "Link op " << handle.op->GetName() << ":" << handle.out << " to " << op->GetName() << ":" - << input_map[index]; - (void)op->SetInput(input_map[index], *(handle.op), handle.out); - } - return SUCCESS; - } - return NOT_FOUND; + return impl_->SetCustomOpInput(op, index, handle); } Status SetNormalOpInput(const OperatorPtr &op, int index, const OutHandler &handle) { - MS_EXCEPTION_IF_NULL(op); - auto it = input_map_.find(index); - if ((handle.op != nullptr) && (it != input_map_.end())) { - if (handle.out.empty()) { - MS_LOG(DEBUG) << "Link op " << handle.op->GetName() << " to " << op->GetName() << ":" << it->second.name; - it->second.set_op(op, handle.op); - } else { - MS_LOG(DEBUG) << "Link op " << handle.op->GetName() << ":" << handle.out << " to " << op->GetName() << ":" - << it->second.name; - it->second.set_handle(op, handle); - } - return SUCCESS; - } - return NOT_FOUND; + return impl_->SetNormalOpInput(op, index, handle); } int setInput(const OperatorPtr &op, int index, const OutHandler &handle) override { - if (IsCustomOp(op)) { - auto cus_op = std::dynamic_pointer_cast(op); - return static_cast(SetCustomOpInput(cus_op, index, handle)); - } else { - return static_cast(SetNormalOpInput(op, index, handle)); - } + return impl_->setInput(op, index, handle); } int setInput(const OperatorPtr &op, int index, const std::shared_ptr> &handler_vec) override { - MS_EXCEPTION_IF_NULL(handler_vec); - if (IsCustomOp(op)) { - MS_LOG(ERROR) << "Custom Op do not support dynamic input"; - return static_cast(FAILED); - } - MS_EXCEPTION_IF_NULL(op); - auto it = dyn_input_map_.find(index); - if (it != dyn_input_map_.end()) { - it->second.create_dyn_input(op, static_cast(handler_vec->size())); - for (unsigned int i = 0; i < handler_vec->size(); ++i) { - OutHandler h = (*handler_vec)[i]; - MS_EXCEPTION_IF_NULL(h.op); - if (h.out.empty()) { - MS_LOG(DEBUG) << "Link op " << h.op->GetName() << " to " << op->GetName() << ":" << it->second.name; - it->second.set_op(op, (i) /* index start from 0 */, h.op); - } else { - MS_LOG(DEBUG) << "Link op " << h.op->GetName() << ":" << h.out << " to " << op->GetName() << ":" - << it->second.name; - it->second.set_handle(op, i, h); - } - } - return 0; - } - return static_cast(NOT_FOUND); + return impl_->setInput(op, index, handler_vec); } - OutHandler getOutput(const OperatorPtr &op, int index) override { - MS_EXCEPTION_IF_NULL(op); - if (IsCustomOp(op)) { - return getCustomOutput(op, index); - } - return getNormalOutput(op, index); - } + OutHandler getOutput(const OperatorPtr &op, int index) override { return impl_->getOutput(op, index); } - OutHandler getCustomOutput(const OperatorPtr &op, int index) { - MS_EXCEPTION_IF_NULL(op); - auto it = cus_output_map_.find(op->GetOpType()); - if (it == cus_output_map_.end()) { - MS_LOG(ERROR) << "OpAdpator(" << op->GetName() << ") has both OUTPUT is not supported!"; - return OutHandler(); - } + OutHandler getCustomOutput(const OperatorPtr &op, int index) { return impl_->getCustomOutput(op, index); } - std::unordered_map &output_map = it->second; - - if ((output_map.find(index) != output_map.end())) { - return OutHandler(op, output_map[index]); - } - MS_LOG(ERROR) << "OpAdpator(" << op->GetName() << ") has no OUTPUT index(" << index << ")!"; - return OutHandler(); - } - - OutHandler getNormalOutput(const OperatorPtr &op, int index) { - MS_EXCEPTION_IF_NULL(op); - if (!dyn_output_map_.empty() && !output_map_.empty()) { - MS_LOG(ERROR) << "OpAdpator(" << op->GetName() << ") has both OUTPUT and DYN_OUTPUT is not supported!"; - return OutHandler(); - } - auto it = output_map_.find(index); - if (it != output_map_.end()) { - return OutHandler(op, it->second.name); - } else if (!dyn_output_map_.empty()) { - return OutHandler(op, dyn_output_map_.begin()->second.name + std::to_string(index)); - } else { - MS_LOG(ERROR) << "OpAdpator(" << op->GetName() << ") has no OUTPUT and DYN_OUTPUT index(" << index << ")!"; - return OutHandler(); - } - } + OutHandler getNormalOutput(const OperatorPtr &op, int index) { return impl_->getNormalOutput(op, index); } Status UpdateSingleOutputDesc(const OperatorPtr &op, const abstract::BaseShapePtr &shp, const TypePtr &type) { - MS_EXCEPTION_IF_NULL(type); - std::string format = "NCHW"; - if (op->GetOpType() == kExtractImagePatchesOpName) { - format = "NHWC"; - } - - auto desc = CreateOutputDesc(dyn_cast(shp), type, format); - if (desc == nullptr) { - MS_LOG(ERROR) << "Update output descriptor failed!"; - return FAILED; - } - - if (IsCustomOp(op)) { - if (cus_output_map_.find(op->GetOpType()) == cus_output_map_.end() || - (cus_output_map_[op->GetOpType()].empty())) { - MS_LOG(ERROR) << "This op does not create custom output map"; - return FAILED; - } - auto cus_op = std::dynamic_pointer_cast(op); - MS_EXCEPTION_IF_NULL(cus_op); - std::unordered_map output_map = cus_output_map_[op->GetOpType()]; - (void)cus_op->UpdateOutputDesc(output_map[0], *desc); - } else { - if (output_map_.empty()) { - MS_LOG(INFO) << "This op does not have output map"; - return FAILED; - } - output_map_.begin()->second.update_out_desc(op, *desc); - } - return SUCCESS; + return impl_->UpdateSingleOutputDesc(op, shp, type); } - size_t GetCustomOpOutputSize(const CusOperatorPtr &cus_op) { - MS_EXCEPTION_IF_NULL(cus_op); - if (cus_output_map_.find(cus_op->GetOpType()) == cus_output_map_.end()) { - MS_LOG(ERROR) << "This op does not create custom output map"; - return 0; - } - size_t output_size = cus_output_map_[cus_op->GetOpType()].size(); - return output_size; - } + size_t GetCustomOpOutputSize(const CusOperatorPtr &cus_op) { return impl_->GetCustomOpOutputSize(cus_op); } std::shared_ptr CreateOutputDesc(const abstract::ShapePtr &shape_ptr, const TypePtr &type, const std::string &format) { - if (shape_ptr == nullptr) { - MS_LOG(ERROR) << "Shape ptr is nullptr"; - return nullptr; - } - - if (type == nullptr) { - MS_LOG(ERROR) << "Type ptr is nullptr"; - return nullptr; - } - - TypeId me_type = type->type_id(); - if (kObjectTypeTensorType == me_type) { - me_type = dyn_cast(type)->element()->type_id(); - } - auto desc = TransformUtil::GetGeTensorDesc(shape_ptr->shape(), me_type, format); - return desc; + return impl_->CreateOutputDesc(shape_ptr, type, format); } Status UpdateMultiOutputDesc(const OperatorPtr &op, const abstract::BaseShapePtr &shp, const TypePtr &type) { - auto tuple_shp = dyn_cast(shp); - MS_EXCEPTION_IF_NULL(tuple_shp); - - size_t output_size = 0; - bool is_custom_op = IsCustomOp(op); - if (is_custom_op) { - output_size = GetCustomOpOutputSize(std::dynamic_pointer_cast(op)); - } else { - output_size = output_map_.size(); - } - - if (output_size == 0) { - MS_LOG(INFO) << "This op does not have output map"; - return FAILED; - } - - if (output_size != tuple_shp->shape().size()) { - MS_LOG(ERROR) << "output_map is not equal tuple_shape size"; - return FAILED; - } - std::string format = "NCHW"; - if (op->GetOpType() == kTopKOpName) { - format = "NHWC"; - } - for (size_t i = 0; i < tuple_shp->shape().size(); ++i) { - auto tuple_type = dyn_cast(type); - MS_EXCEPTION_IF_NULL(tuple_type); - TypePtr type_elem = tuple_type->elements()[i]; - - auto desc = CreateOutputDesc(dyn_cast(tuple_shp->shape()[i]), type_elem, format); - if (desc == nullptr) { - MS_LOG(ERROR) << "Create output descriptor failed!"; - return FAILED; - } - - if (is_custom_op) { - (void)std::dynamic_pointer_cast(op)->UpdateOutputDesc(cus_output_map_[op->GetOpType()][i], - *desc); - } else { - auto it = output_map_.find(i); - if (it != output_map_.end()) { - it->second.update_out_desc(op, *desc); - } - } - } - return SUCCESS; + return impl_->UpdateMultiOutputDesc(op, shp, type); } - std::shared_ptr CreateNodeDesc(const AnfNodePtr &node) { - MS_EXCEPTION_IF_NULL(node); - TypeId me_type = node->Type()->type_id(); - if (kObjectTypeTensorType == me_type) { - me_type = dyn_cast(node->Type())->element()->type_id(); - } - if (me_type <= kNumberTypeBegin || me_type >= kNumberTypeEnd) { - return nullptr; - } - - std::vector shape; - auto shape_ptr = dyn_cast(node->Shape()); - if (nullptr != shape_ptr) { - shape = shape_ptr->shape(); - } - - auto desc = TransformUtil::GetGeTensorDesc(shape, me_type, "NCHW"); - if (desc == nullptr) { - MS_LOG(ERROR) << "Update output descriptor failed!"; - return nullptr; - } - return desc; - } + std::shared_ptr CreateNodeDesc(const AnfNodePtr &node) { return impl_->CreateNodeDesc(node); } void UpdateNormalOpInputDesc(const OperatorPtr &op, const AnfNodePtr node) { - if (op == nullptr) { - MS_LOG(ERROR) << "op is nullptr"; - return; - } - MS_EXCEPTION_IF_NULL(node); - - auto inputs = node->cast()->inputs(); - for (size_t i = 1; i < inputs.size(); ++i) { - auto it = input_map_.find(i); - if (it != input_map_.end()) { - auto desc = CreateNodeDesc(inputs[i]); - if (desc == nullptr) { - continue; - } - if (op->GetOpType() == kExtractImagePatchesOpName) { - desc->SetFormat(ge::Format::FORMAT_NHWC); - } - it->second.update_input_desc(op, *desc); - } - } + return impl_->UpdateNormalOpInputDesc(op, node); } void UpdateCustomOpInputDesc(const CusOperatorPtr &op, const AnfNodePtr &node) { - if (op == nullptr) { - MS_LOG(ERROR) << "op is nullptr"; - return; - } - MS_EXCEPTION_IF_NULL(node); - - if (cus_input_map_.find(op->GetOpType()) == cus_input_map_.end() || (cus_input_map_[op->GetOpType()].empty())) { - MS_LOG(ERROR) << "This op does not create custom input map"; - return; - } - - std::unordered_map &input_map = cus_input_map_[op->GetOpType()]; - auto inputs = node->cast()->inputs(); - for (size_t i = 1; i < inputs.size(); ++i) { - if (input_map.find(i) != input_map.end()) { - auto desc = CreateNodeDesc(inputs[i]); - if (desc == nullptr) { - continue; - } - (void)op->UpdateInputDesc(input_map[i], *desc); - } - } + return impl_->UpdateCustomOpInputDesc(op, node); } - void updateInputDesc(const OperatorPtr &op, const AnfNodePtr &node) { - MS_EXCEPTION_IF_NULL(op); - MS_EXCEPTION_IF_NULL(node); - if (IsCustomOp(op)) { - auto cus_op = std::dynamic_pointer_cast(op); - UpdateCustomOpInputDesc(cus_op, node); - } else { - UpdateNormalOpInputDesc(op, node); - } - } + void updateInputDesc(const OperatorPtr &op, const AnfNodePtr &node) { impl_->updateInputDesc(op, node); } void updateOutputDesc(const OperatorPtr &op, const abstract::BaseShapePtr &shp, const TypePtr &type, const AnfNodePtr &node) override { - if (op == nullptr) { - MS_LOG(ERROR) << "op is nullptr"; - return; - } - MS_EXCEPTION_IF_NULL(node); - MS_LOG(INFO) << "Op name is " << op->GetName(); - - auto normal_shape_ptr = dyn_cast(shp); - auto no_shape_ptr = dyn_cast(shp); - - if ((nullptr != normal_shape_ptr) || (nullptr != no_shape_ptr)) { - if (UpdateSingleOutputDesc(op, shp, type) != SUCCESS) { - return; - } - } else if (nullptr != dyn_cast(shp)) { - if (UpdateMultiOutputDesc(op, shp, type) != SUCCESS) { - return; - } - } else { - MS_LOG(WARNING) << "Update output desc failed, unknow output shape type"; - return; - } - MS_EXCEPTION_IF_NULL(node); - if (!node->isa()) { - return; - } - - // Need to update input_desc while the output_desc is updated - updateInputDesc(op, node); + impl_->updateOutputDesc(op, shp, type, node); } int setAttr(const OperatorPtr &op, const std::string &attrKey, const ValuePtr &attrValue) override { - auto it = attr_map_.find(attrKey); - if (it != attr_map_.end()) { - // switch case for each avalilable attribute type - MS_LOG(INFO) << "Set attr: " << attrKey << "(" << it->second.name << "), value: " << attrValue->ToString(); - AddAttrToDrawGraph(attrKey + std::string("=") + attrValue->ToString()); - it->second.set_attr(op, attrValue); - return 0; - } - return static_cast(NOT_FOUND); + return impl_->setAttr(op, attrKey, attrValue); } - int SetCustomOpAttr(const CusOperatorPtr &op, const PrimitivePtr &prim) { - enum ValueType { - SINGLE_VALUE = 0, - SEQUEUE_VALUE, - UNKNOWN_VALUE, - }; + int SetCustomOpAttr(const CusOperatorPtr &op, const PrimitivePtr &prim) { return impl_->SetCustomOpAttr(op, prim); } - MS_EXCEPTION_IF_NULL(prim); - MS_EXCEPTION_IF_NULL(op); + int SetNormalOpAttr(const OperatorPtr &op, const PrimitivePtr &prim) { return impl_->SetNormalOpAttr(op, prim); } - ValueType value_type = SINGLE_VALUE; - for (auto item : prim->attrs()) { - if (item.second->isa()) { - (void)op->SetAttr(item.first, GetValue(item.second)); - } else if (item.second->isa()) { - (void)op->SetAttr(item.first, GetValue(item.second)); - } else if (item.second->isa()) { - (void)op->SetAttr(item.first, GetValue(item.second)); - } else if (item.second->isa()) { - (void)op->SetAttr(item.first, GetValue(item.second)); - } else if (item.second->isa()) { - value_type = SEQUEUE_VALUE; - auto val_seq = item.second->cast(); - if ((*val_seq)[0]->isa()) { - (void)op->SetAttr(item.first, GetValue>(item.second)); - } else if ((*val_seq)[0]->isa()) { - (void)op->SetAttr(item.first, GetValue>(item.second)); - } else if ((*val_seq)[0]->isa()) { - (void)op->SetAttr(item.first, GetValue>(item.second)); - } else if ((*val_seq)[0]->isa()) { - (void)op->SetAttr(item.first, GetValue>(item.second)); - } else { - MS_LOG(EXCEPTION) << "Unsupported custom attribute type in adaptor, prim name: " << prim->name() - << ", attr name: " << item.first << ", value: " << item.second->ToString(); - } - } else { - value_type = UNKNOWN_VALUE; - MS_LOG(WARNING) << "Unsupported custom attribute type in adaptor, prim name: " << prim->name() - << ", attr name: " << item.first << ", value: " << item.second->ToString(); - return static_cast(NOT_FOUND); - } + int setAttr(const OperatorPtr &op, const PrimitivePtr &prim) override { return impl_->setAttr(op, prim); } - if (value_type == SINGLE_VALUE) { - AddAttrToDrawGraph(item.first + std::string("=") + item.second->ToString()); - } else if (value_type == SEQUEUE_VALUE) { - AddAttrToDrawGraph(item.first + std::string("=") + "[...]"); - } - } - return 0; - } - - int SetNormalOpAttr(const OperatorPtr &op, const PrimitivePtr &prim) { - int ret = 0; - MS_EXCEPTION_IF_NULL(prim); - MS_EXCEPTION_IF_NULL(op); - for (auto &it : attr_map_) { - auto value = prim->GetAttr(it.first); - if (value != nullptr) { - // set attr from primitive - ret = setAttr(op, it.first, value); - if (ret) { - return ret; - } - } else { - // set attr from extra_attr - auto it_extra = extra_attr_.find(it.first); - if (it_extra != extra_attr_.end()) { - ret = setAttr(op, it.first, it_extra->second); - if (ret) { - return ret; - } - } - } - } - return 0; - } - - int setAttr(const OperatorPtr &op, const PrimitivePtr &prim) override { - int ret = 0; - if (IsCustomPrim(prim)) { - auto cus_op = std::dynamic_pointer_cast(op); - ret = SetCustomOpAttr(cus_op, prim); - } else { - ret = SetNormalOpAttr(op, prim); - } - return ret; - } - - int setAttr(const OperatorPtr &op, const AnfNodePtr &node) override { - // no attribute for lonely node - MS_EXCEPTION_IF_NULL(node); - if (!node->isa()) { - return 0; - } - - auto cnode = node->cast(); - if (cnode == nullptr) { - return 0; - } - - auto &inputs = cnode->inputs(); - if (inputs.empty()) { - return 0; - } - - // get Attr T from abstract of anfnode first, - // if attr "T" appears in primitive, the primitive T will cover this one - if (attr_map_.find("T") != attr_map_.end()) { - // get dtype from inputs[1], if the node has no inputs, set the attr T with output dtype - TypePtr type; - if (inputs.size() > 1) { - type = inputs[1]->Type(); - } else { - type = node->Type(); - } - if (type != nullptr) { - (void)setAttr(op, "T", MakeValue(type)); - } - } - - // set attr from primitive and ExtraAttr - if (IsValueNode(inputs[0])) { - // set attr from primitive - PrimitivePtr prim = GetValueNode(inputs[0]); - int ret = setAttr(op, prim); - if (ret != 0) { - return ret; - } - } - - // set attr from const input - for (auto &it : input_attr_map_) { - if (inputs.size() <= it.first || !inputs[it.first]->isa()) { - continue; - } - auto const_value = GetValueNode(inputs[it.first]); - MS_LOG(INFO) << "Set attr: input_" << it.first << "(" << it.second.name - << "), value: " << const_value->ToString(); - if (const_value->isa()) { - continue; - } - AddAttrToDrawGraph(it.second.name + std::string("=") + const_value->ToString()); - it.second.set_attr(op, const_value); - } - return 0; - } + int setAttr(const OperatorPtr &op, const AnfNodePtr &node) override { return impl_->setAttr(op, node); } std::unordered_map GetExtraAttr() override { return extra_attr_; } @@ -883,6 +427,7 @@ class OpAdapter : public BaseOpAdapter { static std::unordered_map> cus_output_map_; std::unordered_map extra_attr_; std::unordered_map name_counts_; + const std::shared_ptr impl_; }; template diff --git a/mindspore/ccsrc/transform/graph_ir/op_adapter_base.h b/mindspore/ccsrc/transform/graph_ir/op_adapter_base.h index 9d59534fb43..ae63a45960d 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_adapter_base.h +++ b/mindspore/ccsrc/transform/graph_ir/op_adapter_base.h @@ -42,7 +42,6 @@ #include "external/ge/ge_api.h" #endif #include "graph/tensor.h" -#include "transform/graph_ir/all_ops.h" namespace ge { class CustomOperator : public Operator { diff --git a/mindspore/ccsrc/transform/graph_ir/op_adapter_desc.h b/mindspore/ccsrc/transform/graph_ir/op_adapter_desc.h new file mode 100644 index 00000000000..926c53e5559 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_adapter_desc.h @@ -0,0 +1,75 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_ADAPTER_DESC_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_ADAPTER_DESC_H_ + +#include +#include "transform/graph_ir/op_adapter.h" + +namespace mindspore { +namespace transform { +class OpAdapterDesc { + public: + OpAdapterDesc() : train_(nullptr), infer_(nullptr) {} + + OpAdapterDesc(const OpAdapterPtr &train, const OpAdapterPtr &infer) : train_(train), infer_(infer) {} + + explicit OpAdapterDesc(const OpAdapterPtr &common) : train_(common), infer_(common) {} + + OpAdapterDesc(const OpAdapterDesc &desc) { + this->train_ = desc.train_; + this->infer_ = desc.infer_; + } + + OpAdapterDesc(OpAdapterDesc &&desc) { + this->train_ = desc.train_; + this->infer_ = desc.infer_; + desc.train_ = nullptr; + desc.infer_ = nullptr; + } + + ~OpAdapterDesc() = default; + + OpAdapterPtr Get(bool train) const { return train ? train_ : infer_; } + + OpAdapterDesc &operator=(const OpAdapterDesc &desc) { + if (this != &desc) { + this->train_ = desc.train_; + this->infer_ = desc.infer_; + } + return *this; + } + + OpAdapterDesc &operator=(OpAdapterDesc &&desc) { + if (this != &desc) { + this->train_ = desc.train_; + this->infer_ = desc.infer_; + desc.train_ = nullptr; + desc.infer_ = nullptr; + } + return *this; + } + + private: + OpAdapterPtr train_; + OpAdapterPtr infer_; +}; + +using OpAdapterDescPtr = std::shared_ptr; +} // namespace transform +} // namespace mindspore +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_ADAPTER_DESC_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_adapter_map.cc b/mindspore/ccsrc/transform/graph_ir/op_adapter_map.cc new file mode 100644 index 00000000000..d13c148f342 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_adapter_map.cc @@ -0,0 +1,32 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "transform/graph_ir/op_adapter_map.h" +#include +#include "graph/operator.h" + +namespace mindspore { +namespace transform { +template <> +std::unordered_map> OpAdapter::cus_input_map_{}; +template <> +std::unordered_map> OpAdapter::cus_output_map_{}; + +std::unordered_map OpAdapterMap::adpt_map_ = { + {kNameCustomOp, std::make_shared(std::make_shared>())}}; + +std::unordered_map &OpAdapterMap::get() { return adpt_map_; } +} // namespace transform +} // namespace mindspore diff --git a/mindspore/ccsrc/transform/graph_ir/op_adapter_map.h b/mindspore/ccsrc/transform/graph_ir/op_adapter_map.h new file mode 100644 index 00000000000..6b806b331d1 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_adapter_map.h @@ -0,0 +1,205 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_ADAPTER_MAP_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_ADAPTER_MAP_H_ + +#include +#include +#include "transform/graph_ir/op_adapter_desc.h" + +namespace mindspore { +namespace transform { +constexpr const char kNameCustomOp[] = "CustomOp"; +constexpr const char kNameConst[] = "Const"; +constexpr const char kNameParam[] = "parameter"; +constexpr const char kNameRandomUniform[] = "RandomUniform"; +constexpr const char kNameSimpleMean[] = "SimpleMean"; +constexpr const char kNameSimpleMeanGrad[] = "SimpleMeanGrad"; +constexpr const char kNameAllReduce[] = "AllReduce"; +constexpr const char kNameBroadcast[] = "Broadcast"; +constexpr const char kNameAllgather[] = "AllGather"; +constexpr const char kNameReduceScatter[] = "ReduceScatter"; +constexpr const char kNameReduceSum[] = "ReduceSum"; +constexpr const char kNameIsFinite[] = "isFinite"; +constexpr const char kNameReciprocal[] = "Reciprocal"; +constexpr const char kNameRsqrt[] = "Rsqrt"; +constexpr const char kNameSqrt[] = "Sqrt"; +constexpr const char kNameSquare[] = "Square"; +constexpr const char kNameSquaredDifference[] = "SquaredDifference"; +constexpr const char kNamePow[] = "Pow"; +constexpr const char kNameBatchMatMul[] = "BatchMatMul"; +constexpr const char kNameStridedSlice[] = "StridedSlice"; +constexpr const char kNameStridedSliceGrad[] = "StridedSliceGrad"; +constexpr const char kNameExpandDims[] = "ExpandDims"; +constexpr const char kNameLog[] = "Log"; +constexpr const char kNameLogicalAnd[] = "LogicalAnd"; +constexpr const char kNameLogicalNot[] = "LogicalNot"; +constexpr const char kNameLogicalOr[] = "LogicalOr"; +constexpr const char kNameExp[] = "Exp"; +constexpr const char kNameLessEqual[] = "LessEqual"; +constexpr const char kNameGreaterEqual[] = "GreaterEqual"; +constexpr const char kNameEqual[] = "Equal"; +constexpr const char kNameNotEqual[] = "NotEqual"; +constexpr const char kNameFlattenGrad[] = "FlattenGrad"; +constexpr const char kNameConvolution[] = "Convolution"; +constexpr const char kNameBiasAdd[] = "BiasAdd"; +constexpr const char kNameMaxPoolGrad[] = "MaxPoolGrad"; +constexpr const char kNameRsqrtGrad[] = "RsqrtGrad"; +constexpr const char kNameSqrtGrad[] = "SqrtGrad"; +constexpr const char kNameReciprocalGrad[] = "ReciprocalGrad"; +constexpr const char kNameAvgPoolGrad[] = "AvgPoolGrad"; +constexpr const char kNameMaxPoolGradWithArgmax[] = "MaxPoolGradWithArgmax"; +constexpr const char kNameApplyMomentum[] = "ApplyMomentum"; +constexpr const char kNameDropoutDoMask[] = "DropoutDoMask"; +constexpr const char kNameResizeBilinear[] = "ResizeBilinear"; +constexpr const char kNameResizeBilinearGrad[] = "ResizeBilinearGrad"; +constexpr const char kNameZerosLike[] = "ZerosLike"; +constexpr const char kNameOnesLike[] = "OnesLike"; +constexpr const char kNameTruncatedNormal[] = "TruncatedNormal"; +constexpr const char kNameSpaceToBatchNd[] = "SpaceToBatchNd"; +constexpr const char kNameConfusionMatrix[] = "ConfusionMatrix"; +constexpr const char kNameResizeNearestNeighborD[] = "ResizeNearestNeighbor"; +constexpr const char kNameResizeNearestNeighborGrad[] = "ResizeNearestNeighborGrad"; +constexpr const char kNameApplyAdam[] = "Adam"; +constexpr const char kNameExtractImagePatches[] = "ExtractImagePatches"; +constexpr const char kNameReLU6[] = "ReLU6"; +constexpr const char kNameReLU6Grad[] = "ReLU6Grad"; +constexpr const char kNameElu[] = "Elu"; +constexpr const char kNameEluGrad[] = "EluGrad"; +constexpr const char kNameTensorScatterUpdate[] = "TensorScatterUpdate"; +constexpr const char kNameScatterUpdate[] = "ScatterUpdate"; +constexpr const char kNameScatterNdUpdate[] = "ScatterNdUpdate"; +constexpr const char kNameScatterMax[] = "ScatterMax"; +constexpr const char kNameNMSWithMask[] = "NMSWithMask"; +constexpr const char kNameCheckValid[] = "CheckValid"; +constexpr const char kNameSmoothL1Loss[] = "SmoothL1Loss"; +constexpr const char kNameSmoothL1LossGrad[] = "SmoothL1LossGrad"; +constexpr const char kNameSGD[] = "SGD"; +constexpr const char kNameSigmoidCrossEntropyWithLogits[] = "SigmoidCrossEntropyWithLogits"; +constexpr const char kNameSigmoidCrossEntropyWithLogitsGrad[] = "SigmoidCrossEntropyWithLogitsGrad"; +constexpr const char kNameScatterNdD[] = "ScatterNd"; +constexpr const char kNamePadD[] = "Pad"; +constexpr const char kNameMirrorPad[] = "MirrorPad"; +constexpr const char kNameMirrorPadGrad[] = "MirrorPadGrad"; +constexpr const char kNameGatherNd[] = "GatherNd"; +constexpr const char kNameArgmax[] = "Argmax"; +constexpr const char kNameArgmin[] = "Argmin"; +constexpr const char kNameArgMaxWithValue[] = "ArgMaxWithValue"; +constexpr const char kNameArgMinWithValue[] = "ArgMinWithValue"; +constexpr const char kNameReduceProd[] = "ReduceProd"; +constexpr const char kNameCumProd[] = "CumProd"; +constexpr const char kNameDiagpart[] = "Diagpart"; +constexpr const char kNameSplitD[] = "Split"; +constexpr const char kNameBatchToSpaceNd[] = "BatchToSpaceNd"; +constexpr const char kNameFloor[] = "Floor"; +constexpr const char kNameNPUGetFloatStatus[] = "NPUGetFloatStatus"; +constexpr const char kNameAssign[] = "Assign"; +constexpr const char kNameAssignAdd[] = "AssignAdd"; +constexpr const char kNameAssignSub[] = "AssignSub"; +constexpr const char kNameNPUAllocFloatStatus[] = "NPUAllocFloatStatus"; +constexpr const char kNameNPUClearFloatStatus[] = "NPUClearFloatStatus"; +constexpr const char kNameReshape[] = "Reshape"; +constexpr const char kNameTransShape[] = "TransShape"; +constexpr const char kNameRealDiv[] = "RealDiv"; +constexpr const char kNameTile[] = "Tile"; +constexpr const char kNameCos[] = "Cos"; +constexpr const char kNameACos[] = "ACos"; +constexpr const char kNameACosGrad[] = "ACosGrad"; +constexpr const char kNameFloorDiv[] = "FloorDiv"; +constexpr const char kNameSin[] = "Sin"; +constexpr const char kNamePrelu[] = "PReLU"; +constexpr const char kNamePreluGrad[] = "PReLUGrad"; +constexpr const char kNameSigmoid[] = "Sigmoid"; +constexpr const char kNameSigmoidGrad[] = "SigmoidGrad"; +constexpr const char kNameL2Normalize[] = "L2Normalize"; +constexpr const char kNameL2NormalizeGrad[] = "L2NormalizeGrad"; +constexpr const char kNameSoftmax[] = "Softmax"; +constexpr const char kNameIOU[] = "IOU"; +constexpr const char kNameBoundingBoxDecode[] = "BoundingBoxDecode"; +constexpr const char kNameBoundingBoxEncode[] = "BoundingBoxEncode"; +constexpr const char kNameSlice[] = "Slice"; +constexpr const char kNameAddN[] = "AddN"; +constexpr const char kNameLess[] = "Less"; +constexpr const char kNameGreater[] = "Greater"; +constexpr const char kNamePack[] = "Pack"; +constexpr const char kNameUnpack[] = "Unpack"; +constexpr const char kNameMerge[] = "Merge"; +constexpr const char kNameGeSwitch[] = "GeSwitch"; + +constexpr const char kNameHuberLoss[] = "HuberLoss"; +constexpr const char kNameCumSum[] = "CumSum"; +constexpr const char kNameHuberLossGrad[] = "HuberLossGrad"; +constexpr const char kNameSparseSoftmaxCrossEntropy[] = "SparseSoftmaxCrossEntropy"; +constexpr const char kNameSparseSoftmaxCrossEntropyGrad[] = "SparseSoftmaxCrossEntropyGrad"; +constexpr const char kNameTopK[] = "TopK"; +constexpr const char kNameSoftmaxGrad[] = "SoftmaxGrad"; +constexpr const char kNameMaxPool[] = "MaxPool"; +constexpr const char kNameAvgPool[] = "AvgPool"; +constexpr const char kNameMaxPoolWithArgmax[] = "MaxPoolWithArgmax"; +constexpr const char kNameBatchNorm[] = "BatchNorm"; +constexpr const char kNameBatchNormGrad[] = "BatchNormGrad"; +constexpr const char kNameROIAlign[] = "ROIAlign"; +constexpr const char kNameROIAlignGrad[] = "ROIAlignGrad"; +constexpr const char kNameRandomChoiceWithMask[] = "RandomChoiceWithMask"; +constexpr const char kNameAbs[] = "Abs"; +constexpr const char kNameAbsGrad[] = "AbsGrad"; +constexpr const char kNameBinaryCrossEntropy[] = "BinaryCrossEntropy"; +constexpr const char kNameBinaryCrossEntropyGrad[] = "BinaryCrossEntropyGrad"; +constexpr const char kNameSparseApplyAdagrad[] = "SparseApplyAdagrad"; +constexpr const char kNameSparseApplyFtrlD[] = "SparseApplyFtrlD"; +constexpr const char kNameApplyProximalAdagrad[] = "ApplyProximalAdagrad"; +constexpr const char kNameAcosh[] = "Acosh"; +constexpr const char kNameAcoshGrad[] = "AcoshGrad"; +constexpr const char kNameFloorMod[] = "FloorMod"; +constexpr const char kNameSpaceToDepth[] = "SpaceToDepth"; +constexpr const char kNameDepthToSpace[] = "DepthToSpace"; +constexpr const char kNameSign[] = "Sign"; +constexpr const char kNameLARSUpdate[] = "LARSUpdate"; +constexpr const char kNameRound[] = "Round"; +constexpr const char kNamePrint[] = "Print"; +constexpr const char kNameApplyFtrl[] = "ApplyFtrl"; +constexpr const char kNameDiag[] = "Diag"; +constexpr const char kNameDiagPart[] = "DiagPart"; +constexpr const char kNameSpaceToBatch[] = "SpaceToBatch"; +constexpr const char kNameBatchToSpace[] = "BatchToSpace"; +constexpr const char kNameAtan2[] = "Atan2"; +constexpr const char kNameApplyRMSProp[] = "ApplyRMSProp"; +constexpr const char kNameApplyCenteredRMSProp[] = "ApplyCenteredRMSProp"; +constexpr const char kNameBasicLSTMCell[] = "BasicLSTMCell"; +constexpr const char kNameBasicLSTMCellInputGrad[] = "BasicLSTMCellInputGrad"; +constexpr const char kNameBasicLSTMCellWeightGrad[] = "BasicLSTMCellWeightGrad"; +constexpr const char kNameBasicLSTMCellCStateGrad[] = "BasicLSTMCellCStateGrad"; +constexpr const char kNameL2Loss[] = "L2Loss"; +constexpr const char kNameCTCLoss[] = "CTCLoss"; +constexpr const char kNameRange[] = "Range"; +constexpr const char kNameSquareSumAll[] = "SquareSumAll"; +constexpr const char kNameAscendQuant[] = "Quant"; +constexpr const char kNameAscendDequant[] = "Dequant"; +constexpr const char kNameReverseSequence[] = "ReverseSequence"; +constexpr const char kNameCase[] = "Case"; + +class OpAdapterMap { + public: + static std::unordered_map &get(); + + private: + static std::unordered_map adpt_map_; +}; + +} // namespace transform +} // namespace mindspore +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_ADAPTER_MAP_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare.cc deleted file mode 100644 index 3632d62651e..00000000000 --- a/mindspore/ccsrc/transform/graph_ir/op_declare.cc +++ /dev/null @@ -1,1370 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "transform/graph_ir/op_declare.h" - -#include - -#include "transform/graph_ir/all_ops.h" -#include "utils/utils.h" - -namespace mindspore { -namespace transform { -#define INPUT_MAP(T) \ - template <> \ - const std::unordered_map OpAdapter::input_map_ -#define EMPTY_INPUT_MAP std::unordered_map() -#define INPUT_DESC(name) \ - { \ -#name, \ - [](const OperatorPtr op, const OperatorPtr input) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->set_input_##name(*input); \ - }, \ - [](const OperatorPtr op, const OutHandler& handle) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->set_input_##name(*(handle.op), handle.out); \ - }, \ - [](const OperatorPtr op, const GeTensorDesc desc) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->update_input_desc_##name(desc); \ - } \ - } - -#define DYN_INPUT_MAP(T) \ - template <> \ - const std::unordered_map OpAdapter::dyn_input_map_ -#define DYN_INPUT_DESC(name) \ - { \ -#name, \ - [](const OperatorPtr op, unsigned int num) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->create_dynamic_input_##name(num); \ - }, \ - [](const OperatorPtr op, unsigned int index, const OperatorPtr input) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->set_dynamic_input_##name(index, *input); \ - }, \ - [](const OperatorPtr op, unsigned int index, const OutHandler& handle) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->set_dynamic_input_##name(index, *(handle.op), handle.out); \ - } \ - } - -#define DYN_SUBGRAPH_MAP(T) \ - template <> \ - const std::unordered_map OpAdapter::dyn_subgraph_map_ -#define DYN_SUBGRAPH_DESC(name) \ - { \ -#name, \ - [](const OperatorPtr op, unsigned int num) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->create_dynamic_subgraph_##name(num); \ - }, \ - [](const OperatorPtr op, unsigned int index, const DfGraphPtr graph) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->set_dynamic_subgraph_builder_##name(index, [graph](){return *graph;}); \ - } \ - } - -#define ATTR_MAP(T) \ - template <> \ - const std::unordered_map OpAdapter::attr_map_ -#define EMPTY_ATTR_MAP std::unordered_map() -#define ATTR_DESC(name, ...) \ - { \ -#name, \ - [](const OperatorPtr op, const ValuePtr& value) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->set_attr_##name(ConvertAny(value, __VA_ARGS__)); \ - } \ - } - -#define INPUT_ATTR_MAP(T) \ - template <> \ - const std::unordered_map OpAdapter::input_attr_map_ - -#define OUTPUT_MAP(T) \ - template <> \ - const std::unordered_map OpAdapter::output_map_ -#define OUTPUT_DESC(name) \ - { \ -#name, \ - [](const OperatorPtr op, const GeTensorDesc desc) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->update_output_desc_##name(desc); \ - } \ - } - -#define DYN_OUTPUT_MAP(T) \ - template <> \ - const std::unordered_map OpAdapter::dyn_output_map_ - -#define DYN_OUTPUT_DESC(name) \ - { \ -#name, \ - [](const OperatorPtr op, unsigned int num) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->create_dynamic_output_##name(num); \ - } \ - } - -template <> -std::unordered_map> OpAdapter::cus_input_map_{}; -template <> -std::unordered_map> OpAdapter::cus_output_map_{}; - -// --------------specialization for each operator---------- -// const -INPUT_MAP(Const) = EMPTY_INPUT_MAP; -ATTR_MAP(Const) = {{"value", ATTR_DESC(value, AnyTraits())}}; -OUTPUT_MAP(Const) = {{0, OUTPUT_DESC(y)}}; - -// Assign -INPUT_MAP(Assign) = {{1, INPUT_DESC(ref)}, {2, INPUT_DESC(value)}}; -ATTR_MAP(Assign) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Assign) = {{0, OUTPUT_DESC(ref)}}; - -// Constant -INPUT_MAP(Constant) = EMPTY_INPUT_MAP; -ATTR_MAP(Constant) = {{"value", ATTR_DESC(value, AnyTraits())}}; -OUTPUT_MAP(Constant) = {{0, OUTPUT_DESC(y)}}; - -// ApplyMomentum -INPUT_MAP(ApplyMomentum) = { - {1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INPUT_DESC(lr)}, {4, INPUT_DESC(grad)}, {5, INPUT_DESC(momentum)}}; -ATTR_MAP(ApplyMomentum) = {{"use_nesterov", ATTR_DESC(use_nesterov, AnyTraits())}, - {"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ApplyMomentum) = {{0, OUTPUT_DESC(var)}}; - -// ScalarSummary -INPUT_MAP(Summary) = {{2, INPUT_DESC(x)}}; -ATTR_MAP(Summary) = EMPTY_ATTR_MAP; - -// Data -INPUT_MAP(Data) = EMPTY_INPUT_MAP; -ATTR_MAP(Data) = EMPTY_ATTR_MAP; - -// BatchNorm -INPUT_MAP(BatchNorm) = {{1, INPUT_DESC(x)}, - {2, INPUT_DESC(scale)}, - {3, INPUT_DESC(offset)}, - {4, INPUT_DESC(mean)}, - {5, INPUT_DESC(variance)}}; -ATTR_MAP(BatchNorm) = {{"data_format", ATTR_DESC(data_format, AnyTraits())}, - {"epsilon", ATTR_DESC(epsilon, AnyTraits())}, - {"is_training", ATTR_DESC(is_training, AnyTraits())}}; -OUTPUT_MAP(BatchNorm) = {{0, OUTPUT_DESC(y)}, - {1, OUTPUT_DESC(batch_mean)}, - {2, OUTPUT_DESC(batch_variance)}, - {3, OUTPUT_DESC(reserve_space_1)}, - {4, OUTPUT_DESC(reserve_space_2)}}; - -// BatchNormGrad -INPUT_MAP(BatchNormGrad) = {{1, INPUT_DESC(y_backprop)}, - {2, INPUT_DESC(x)}, - {3, INPUT_DESC(scale)}, - {4, INPUT_DESC(reserve_space_1)}, - {5, INPUT_DESC(reserve_space_2)}}; -ATTR_MAP(BatchNormGrad) = {{"data_format", ATTR_DESC(data_format, AnyTraits())}, - {"epsilon", ATTR_DESC(epsilon, AnyTraits())}, - {"is_training", ATTR_DESC(is_training, AnyTraits())}}; -OUTPUT_MAP(BatchNormGrad) = {{0, OUTPUT_DESC(x_backprop)}, - {1, OUTPUT_DESC(scale_backprop)}, - {2, OUTPUT_DESC(offset_backprop)}, - {3, OUTPUT_DESC(reserve_space_4)}, - {4, OUTPUT_DESC(reserve_space_5)}}; - -// Relu -INPUT_MAP(Relu) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Relu) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Relu) = {{0, OUTPUT_DESC(y)}}; - -// Elu -INPUT_MAP(Elu) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Elu) = {{"alpha", ATTR_DESC(alpha, AnyTraits())}}; -OUTPUT_MAP(Elu) = {{0, OUTPUT_DESC(y)}}; - -// EluGrad -INPUT_MAP(EluGrad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(activations)}}; -ATTR_MAP(EluGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(EluGrad) = {{0, OUTPUT_DESC(y)}}; - -// PRelu -INPUT_MAP(PRelu) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(weight)}}; -ATTR_MAP(PRelu) = EMPTY_ATTR_MAP; -OUTPUT_MAP(PRelu) = {{0, OUTPUT_DESC(y)}}; - -// PReluGrad -INPUT_MAP(PReluGrad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(features)}, {3, INPUT_DESC(weights)}}; -ATTR_MAP(PReluGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(PReluGrad) = {{0, OUTPUT_DESC(dx)}, {1, OUTPUT_DESC(da)}}; - -// Sigmoid -INPUT_MAP(Sigmoid) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Sigmoid) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Sigmoid) = {{0, OUTPUT_DESC(y)}}; - -// SigmoidGrad -INPUT_MAP(SigmoidGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; -ATTR_MAP(SigmoidGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(SigmoidGrad) = {{0, OUTPUT_DESC(z)}}; - -// L2NormalizeGrad -INPUT_MAP(L2NormalizeGrad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}, {3, INPUT_DESC(dy)}}; -ATTR_MAP(L2NormalizeGrad) = { - {"axis", ATTR_DESC(dim, AnyTraits>(), AnyTraits>())}, - {"epsilon", ATTR_DESC(eps, AnyTraits())}}; -OUTPUT_MAP(L2NormalizeGrad) = {{0, OUTPUT_DESC(dx)}}; - -// LarsV2Update -INPUT_MAP(LarsV2Update) = {{1, INPUT_DESC(w)}, - {2, INPUT_DESC(g)}, - {3, INPUT_DESC(w_square_sum)}, - {4, INPUT_DESC(g_square_sum)}, - {5, INPUT_DESC(weight_decay)}, - {6, INPUT_DESC(learning_rate)}}; -ATTR_MAP(LarsV2Update) = {{"epsilon", ATTR_DESC(epsilon, AnyTraits())}, - {"hyperpara", ATTR_DESC(hyperpara, AnyTraits())}, - {"use_clip", ATTR_DESC(use_clip, AnyTraits())}}; -OUTPUT_MAP(LarsV2Update) = {{0, OUTPUT_DESC(g_new)}}; - -// L2Normalize -INPUT_MAP(L2Normalize) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(L2Normalize) = { - {"axis", ATTR_DESC(axis, AnyTraits>(), AnyTraits>())}, - {"epsilon", ATTR_DESC(eps, AnyTraits())}}; -OUTPUT_MAP(L2Normalize) = {{0, OUTPUT_DESC(y)}}; - -// CumsumD -INPUT_MAP(CumsumD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(CumsumD) = {{2, ATTR_DESC(axis, AnyTraits())}}; -ATTR_MAP(CumsumD) = {{"exclusive", ATTR_DESC(exclusive, AnyTraits())}, - {"reverse", ATTR_DESC(reverse, AnyTraits())}}; -OUTPUT_MAP(CumsumD) = {{0, OUTPUT_DESC(y)}}; - -// SoftmaxV2 -INPUT_MAP(SoftmaxV2) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(SoftmaxV2) = { - {"axis", ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}, -}; -OUTPUT_MAP(SoftmaxV2) = {{0, OUTPUT_DESC(y)}}; - -// SoftmaxGrad -INPUT_MAP(SoftmaxGrad) = {{1, INPUT_DESC(softmax)}, {2, INPUT_DESC(grad_softmax)}}; -OUTPUT_MAP(SoftmaxGrad) = {{0, OUTPUT_DESC(grad_x)}}; -ATTR_MAP(SoftmaxGrad) = EMPTY_ATTR_MAP; - -// Flatten -INPUT_MAP(Flatten) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Flatten) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Flatten) = {{0, OUTPUT_DESC(y)}}; - -// add -INPUT_MAP(Add) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Add) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Add) = {{0, OUTPUT_DESC(y)}}; - -// GatherV2 -INPUT_MAP(GatherV2) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(axis)}}; -ATTR_MAP(GatherV2) = EMPTY_ATTR_MAP; -OUTPUT_MAP(GatherV2) = {{0, OUTPUT_DESC(y)}}; - -// ReduceSumD -INPUT_MAP(ReduceSumD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ReduceSumD) = { - {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ReduceSumD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ReduceSumD) = {{0, OUTPUT_DESC(y)}}; - -// ReduceProdD -INPUT_MAP(ReduceProdD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ReduceProdD) = { - {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ReduceProdD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ReduceProdD) = {{0, OUTPUT_DESC(y)}}; - -// CumprodD -INPUT_MAP(CumprodD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(CumprodD) = {{2, ATTR_DESC(axis, AnyTraits())}}; -ATTR_MAP(CumprodD) = {{"exclusive", ATTR_DESC(exclusive, AnyTraits())}, - {"reverse", ATTR_DESC(reverse, AnyTraits())}}; -OUTPUT_MAP(CumprodD) = {{0, OUTPUT_DESC(y)}}; - -// SoftmaxCrossEntropyWithLogits -INPUT_MAP(SoftmaxCrossEntropyWithLogits) = {{1, INPUT_DESC(features)}, {2, INPUT_DESC(labels)}}; -ATTR_MAP(SoftmaxCrossEntropyWithLogits) = EMPTY_ATTR_MAP; -OUTPUT_MAP(SoftmaxCrossEntropyWithLogits) = {{0, OUTPUT_DESC(loss)}, {1, OUTPUT_DESC(backprop)}}; - -INPUT_MAP(SliceD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(SliceD) = {{2, ATTR_DESC(offsets, AnyTraits(), AnyTraits>())}, - {3, ATTR_DESC(size, AnyTraits(), AnyTraits>())}}; -ATTR_MAP(SliceD) = EMPTY_ATTR_MAP; -OUTPUT_MAP(SliceD) = {{0, OUTPUT_DESC(y)}}; - -// MaxPool -INPUT_MAP(MaxPool) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(MaxPool) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}}; -OUTPUT_MAP(MaxPool) = {{0, OUTPUT_DESC(y)}}; - -// AvgPool -INPUT_MAP(AvgPool) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(AvgPool) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}}; -OUTPUT_MAP(AvgPool) = {{0, OUTPUT_DESC(y)}}; - -// GreaterEqual -INPUT_MAP(GreaterEqual) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(GreaterEqual) = EMPTY_ATTR_MAP; -OUTPUT_MAP(GreaterEqual) = {{0, OUTPUT_DESC(y)}}; - -// AssignAdd -INPUT_MAP(AssignAdd) = {{1, INPUT_DESC(ref)}, {2, INPUT_DESC(value)}}; -ATTR_MAP(AssignAdd) = EMPTY_ATTR_MAP; -OUTPUT_MAP(AssignAdd) = {{0, OUTPUT_DESC(ref)}}; - -// AssignSub -INPUT_MAP(AssignSub) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(value)}}; -ATTR_MAP(AssignSub) = EMPTY_ATTR_MAP; -OUTPUT_MAP(AssignSub) = {{0, OUTPUT_DESC(var)}}; - -// Cos -INPUT_MAP(Cos) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Cos) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Cos) = {{0, OUTPUT_DESC(y)}}; - -// Acos -INPUT_MAP(Acos) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Acos) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Acos) = {{0, OUTPUT_DESC(y)}}; - -// AcosGrad -INPUT_MAP(AcosGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; -ATTR_MAP(AcosGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(AcosGrad) = {{0, OUTPUT_DESC(z)}}; - -// Acosh -INPUT_MAP(Acosh) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Acosh) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Acosh) = {{0, OUTPUT_DESC(y)}}; - -// AcoshGrad -INPUT_MAP(AcoshGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; -ATTR_MAP(AcoshGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(AcoshGrad) = {{0, OUTPUT_DESC(z)}}; - -// Floor -INPUT_MAP(Floor) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Floor) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Floor) = {{0, OUTPUT_DESC(y)}}; - -// FloorDiv -INPUT_MAP(FloorDiv) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(FloorDiv) = EMPTY_ATTR_MAP; -OUTPUT_MAP(FloorDiv) = {{0, OUTPUT_DESC(y)}}; - -// FloorMod -INPUT_MAP(FloorMod) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(FloorMod) = EMPTY_ATTR_MAP; -OUTPUT_MAP(FloorMod) = {{0, OUTPUT_DESC(y)}}; - -// Sin -INPUT_MAP(Sin) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Sin) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Sin) = {{0, OUTPUT_DESC(y)}}; - -// Exp -INPUT_MAP(Exp) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Exp) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Exp) = {{0, OUTPUT_DESC(y)}}; - -// BoundingBoxEncode -INPUT_MAP(BoundingBoxEncode) = { - {1, INPUT_DESC(anchor_box)}, - {2, INPUT_DESC(ground_truth_box)}, -}; -ATTR_MAP(BoundingBoxEncode) = { - {"means", ATTR_DESC(means, AnyTraits>(), AnyTraits())}, - {"stds", ATTR_DESC(stds, AnyTraits>(), AnyTraits())}, -}; -OUTPUT_MAP(BoundingBoxEncode) = {{0, OUTPUT_DESC(delats)}}; - -// BoundingBoxDecode -INPUT_MAP(BoundingBoxDecode) = { - {1, INPUT_DESC(rois)}, - {2, INPUT_DESC(deltas)}, -}; -ATTR_MAP(BoundingBoxDecode) = { - {"means", ATTR_DESC(means, AnyTraits>(), AnyTraits())}, - {"stds", ATTR_DESC(stds, AnyTraits>(), AnyTraits())}, - {"max_shape", ATTR_DESC(max_shape, AnyTraits>(), AnyTraits>())}, - {"wh_ratio_clip", ATTR_DESC(wh_ratio_clip, AnyTraits())}, -}; -OUTPUT_MAP(BoundingBoxDecode) = {{0, OUTPUT_DESC(bboxes)}}; - -// TopK -INPUT_MAP(TopK) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(k)}}; -ATTR_MAP(TopK) = {{"sorted", ATTR_DESC(sorted, AnyTraits())}}; -OUTPUT_MAP(TopK) = {{0, OUTPUT_DESC(values)}, {1, OUTPUT_DESC(indices)}}; - -// TileD -INPUT_MAP(TileD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(TileD) = {{2, ATTR_DESC(multiples, AnyTraits(), AnyTraits>())}}; -ATTR_MAP(TileD) = EMPTY_ATTR_MAP; -OUTPUT_MAP(TileD) = {{0, OUTPUT_DESC(y)}}; - -// OneHot -INPUT_MAP(OneHot) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(depth)}, {3, INPUT_DESC(on_value)}, {4, INPUT_DESC(off_value)}}; -ATTR_MAP(OneHot) = {{"axis", ATTR_DESC(axis, AnyTraits())}}; -OUTPUT_MAP(OneHot) = {{0, OUTPUT_DESC(y)}}; - -// GatherV2D -INPUT_MAP(GatherV2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}}; -INPUT_ATTR_MAP(GatherV2D) = {{3, ATTR_DESC(axis, AnyTraits())}}; -ATTR_MAP(GatherV2D) = EMPTY_ATTR_MAP; -OUTPUT_MAP(GatherV2D) = {{0, OUTPUT_DESC(y)}}; - -// Reshape -INPUT_MAP(Reshape) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(shape)}}; -ATTR_MAP(Reshape) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Reshape) = {{0, OUTPUT_DESC(y)}}; - -// TransShape -INPUT_MAP(TransShape) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(TransShape) = {{2, ATTR_DESC(outShape, AnyTraits(), AnyTraits>())}}; -ATTR_MAP(TransShape) = EMPTY_ATTR_MAP; -OUTPUT_MAP(TransShape) = {{0, OUTPUT_DESC(y)}}; - -// BiasAdd -INPUT_MAP(BiasAdd) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(bias)}}; -ATTR_MAP(BiasAdd) = {{"data_format", ATTR_DESC(data_format, AnyTraits())}}; -OUTPUT_MAP(BiasAdd) = {{0, OUTPUT_DESC(y)}}; - -// Iou -INPUT_MAP(Iou) = {{1, INPUT_DESC(bboxes)}, {2, INPUT_DESC(gtboxes)}}; -ATTR_MAP(Iou) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; -OUTPUT_MAP(Iou) = {{0, OUTPUT_DESC(overlap)}}; - -// ResizeNearestNeighborV2D -INPUT_MAP(ResizeNearestNeighborV2D) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ResizeNearestNeighborV2D) = { - {"size", ATTR_DESC(size, AnyTraits>(), AnyTraits>())}, - {"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; -OUTPUT_MAP(ResizeNearestNeighborV2D) = {{0, OUTPUT_DESC(y)}}; - -// ResizeNearestNeighborV2Grad -INPUT_MAP(ResizeNearestNeighborV2Grad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(size)}}; -ATTR_MAP(ResizeNearestNeighborV2Grad) = {{"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; -OUTPUT_MAP(ResizeNearestNeighborV2Grad) = {{0, OUTPUT_DESC(y)}}; - -// ApplyAdam -INPUT_MAP(ApplyAdam) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(m)}, {3, INPUT_DESC(v)}, - {4, INPUT_DESC(beta1_power)}, {5, INPUT_DESC(beta2_power)}, {6, INPUT_DESC(lr)}, - {7, INPUT_DESC(beta1)}, {8, INPUT_DESC(beta2)}, {9, INPUT_DESC(epsilon)}, - {10, INPUT_DESC(grad)}}; -ATTR_MAP(ApplyAdam) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}, - {"use_nesterov", ATTR_DESC(use_nesterov, AnyTraits())}}; -OUTPUT_MAP(ApplyAdam) = {{0, OUTPUT_DESC(var)}}; - -// ApplyAdamD -INPUT_MAP(ApplyAdamD) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(m)}, {3, INPUT_DESC(v)}, - {4, INPUT_DESC(beta1_power)}, {5, INPUT_DESC(beta2_power)}, {6, INPUT_DESC(lr)}, - {7, INPUT_DESC(beta1)}, {8, INPUT_DESC(beta2)}, {9, INPUT_DESC(epsilon)}, - {10, INPUT_DESC(grad)}}; -ATTR_MAP(ApplyAdamD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}, - {"use_nesterov", ATTR_DESC(use_nesterov, AnyTraits())}}; -OUTPUT_MAP(ApplyAdamD) = {{0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(m)}, {2, OUTPUT_DESC(v)}}; - -// Relu6 -INPUT_MAP(Relu6) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Relu6) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Relu6) = {{0, OUTPUT_DESC(y)}}; - -// Relu6Grad -INPUT_MAP(Relu6Grad) = {{1, INPUT_DESC(gradients)}, {2, INPUT_DESC(features)}}; -ATTR_MAP(Relu6Grad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Relu6Grad) = {{0, OUTPUT_DESC(backprops)}}; - -// ResizeBilinearV2Grad -INPUT_MAP(ResizeBilinearV2Grad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(original_image)}}; -ATTR_MAP(ResizeBilinearV2Grad) = {{"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; -OUTPUT_MAP(ResizeBilinearV2Grad) = {{0, OUTPUT_DESC(y)}}; - -// ResizeBilinearV2D -INPUT_MAP(ResizeBilinearV2D) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ResizeBilinearV2D) = { - {"size", ATTR_DESC(size, AnyTraits>(), AnyTraits>())}, - {"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; -OUTPUT_MAP(ResizeBilinearV2D) = {{0, OUTPUT_DESC(y)}}; - -// ZerosLike -INPUT_MAP(ZerosLike) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ZerosLike) = EMPTY_ATTR_MAP; -OUTPUT_MAP(ZerosLike) = {{0, OUTPUT_DESC(y)}}; - -// OnesLike -INPUT_MAP(OnesLike) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(OnesLike) = EMPTY_ATTR_MAP; -OUTPUT_MAP(OnesLike) = {{0, OUTPUT_DESC(y)}}; - -// NMSWithMask -INPUT_MAP(NMSWithMask) = {{1, INPUT_DESC(box_scores)}}; -ATTR_MAP(NMSWithMask) = {{"iou_threshold", ATTR_DESC(iou_threshold, AnyTraits())}}; -OUTPUT_MAP(NMSWithMask) = { - {0, OUTPUT_DESC(selected_boxes)}, {1, OUTPUT_DESC(selected_idx)}, {2, OUTPUT_DESC(selected_mask)}}; - -// Unpack -INPUT_MAP(Unpack) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Unpack) = {{"axis", ATTR_DESC(axis, AnyTraits())}, {"num", ATTR_DESC(num, AnyTraits())}}; -DYN_OUTPUT_MAP(Unpack) = {{0, DYN_OUTPUT_DESC(y)}}; - -// TensorScatterUpdate -INPUT_MAP(TensorScatterUpdate) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; -ATTR_MAP(TensorScatterUpdate) = EMPTY_ATTR_MAP; -OUTPUT_MAP(TensorScatterUpdate) = {{0, OUTPUT_DESC(y)}}; - -// ScatterUpdate -INPUT_MAP(ScatterUpdate) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; -ATTR_MAP(ScatterUpdate) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ScatterUpdate) = {{0, OUTPUT_DESC(var)}}; - -// ScatterNdUpdate -INPUT_MAP(ScatterNdUpdate) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; -ATTR_MAP(ScatterNdUpdate) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ScatterNdUpdate) = {{0, OUTPUT_DESC(var)}}; - -// ScatterMax -INPUT_MAP(ScatterMax) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; -ATTR_MAP(ScatterMax) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ScatterMax) = {{0, OUTPUT_DESC(var)}}; - -// CheckValid -INPUT_MAP(CheckValid) = {{1, INPUT_DESC(bbox_tensor)}, {2, INPUT_DESC(img_metas)}}; -ATTR_MAP(CheckValid) = EMPTY_ATTR_MAP; -OUTPUT_MAP(CheckValid) = {{0, OUTPUT_DESC(valid_tensor)}}; - -// SmoothL1Loss -INPUT_MAP(SmoothL1Loss) = {{1, INPUT_DESC(predict)}, {2, INPUT_DESC(label)}}; -ATTR_MAP(SmoothL1Loss) = {{"sigma", ATTR_DESC(sigma, AnyTraits())}}; -OUTPUT_MAP(SmoothL1Loss) = {{0, OUTPUT_DESC(loss)}}; - -// SmoothL1LossGrad -INPUT_MAP(SmoothL1LossGrad) = {{1, INPUT_DESC(predict)}, {2, INPUT_DESC(label)}, {3, INPUT_DESC(dout)}}; -ATTR_MAP(SmoothL1LossGrad) = {{"sigma", ATTR_DESC(sigma, AnyTraits())}}; -OUTPUT_MAP(SmoothL1LossGrad) = {{0, OUTPUT_DESC(gradient)}}; - -// SigmoidCrossEntropyWithLogits -INPUT_MAP(SigmoidCrossEntropyWithLogits) = {{1, INPUT_DESC(predict)}, {2, INPUT_DESC(target)}}; -ATTR_MAP(SigmoidCrossEntropyWithLogits) = EMPTY_ATTR_MAP; -OUTPUT_MAP(SigmoidCrossEntropyWithLogits) = {{0, OUTPUT_DESC(loss)}}; - -// SigmoidCrossEntropyWithLogitsGrad -INPUT_MAP(SigmoidCrossEntropyWithLogitsGrad) = { - {1, INPUT_DESC(predict)}, {2, INPUT_DESC(target)}, {3, INPUT_DESC(dout)}}; -ATTR_MAP(SigmoidCrossEntropyWithLogitsGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(SigmoidCrossEntropyWithLogitsGrad) = {{0, OUTPUT_DESC(gradient)}}; - -// ScatterNdD -INPUT_MAP(ScatterNdD) = {{1, INPUT_DESC(indices)}, {2, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ScatterNdD) = { - {3, ATTR_DESC(shape, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ScatterNdD) = EMPTY_ATTR_MAP; -OUTPUT_MAP(ScatterNdD) = {{0, OUTPUT_DESC(y)}}; - -// PadD -INPUT_MAP(PadD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(PadD) = {{"paddings", ATTR_DESC(paddings, AnyTraits>>())}}; -OUTPUT_MAP(PadD) = {{0, OUTPUT_DESC(y)}}; - -// MirrorPad -INPUT_MAP(MirrorPad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(paddings)}}; -ATTR_MAP(MirrorPad) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; -OUTPUT_MAP(MirrorPad) = {{0, OUTPUT_DESC(y)}}; - -// MirrorPadGrad -INPUT_MAP(MirrorPadGrad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(paddings)}}; -ATTR_MAP(MirrorPadGrad) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; -OUTPUT_MAP(MirrorPadGrad) = {{0, OUTPUT_DESC(y)}}; - -// GatherNd -INPUT_MAP(GatherNd) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}}; -ATTR_MAP(GatherNd) = EMPTY_ATTR_MAP; -OUTPUT_MAP(GatherNd) = {{0, OUTPUT_DESC(y)}}; - -// ROIAlign -INPUT_MAP(ROIAlign) = {{1, INPUT_DESC(features)}, {2, INPUT_DESC(rois)}}; -OUTPUT_MAP(ROIAlign) = {{0, OUTPUT_DESC(y)}}; -ATTR_MAP(ROIAlign) = {{"pooled_height", ATTR_DESC(pooled_height, AnyTraits())}, - {"pooled_width", ATTR_DESC(pooled_width, AnyTraits())}, - {"spatial_scale", ATTR_DESC(spatial_scale, AnyTraits())}, - {"sample_num", ATTR_DESC(sample_num, AnyTraits())}, - {"roi_end_mode", ATTR_DESC(roi_end_mode, AnyTraits())}}; - -// ROIAlignGrad -INPUT_MAP(ROIAlignGrad) = {{1, INPUT_DESC(ydiff)}, {2, INPUT_DESC(rois)}}; -OUTPUT_MAP(ROIAlignGrad) = {{0, OUTPUT_DESC(xdiff)}}; -ATTR_MAP(ROIAlignGrad) = { - {"xdiff_shape", ATTR_DESC(xdiff_shape, AnyTraits>(), AnyTraits>())}, - {"pooled_height", ATTR_DESC(pooled_height, AnyTraits())}, - {"pooled_width", ATTR_DESC(pooled_width, AnyTraits())}, - {"spatial_scale", ATTR_DESC(spatial_scale, AnyTraits())}, - {"sample_num", ATTR_DESC(sample_num, AnyTraits())}}; - -// ArgMaxD -INPUT_MAP(ArgMaxD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ArgMaxD) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, - {"output_type", ATTR_DESC(dtype, AnyTraits())}}; -OUTPUT_MAP(ArgMaxD) = {{0, OUTPUT_DESC(y)}}; - -// ArgMinD -INPUT_MAP(ArgMinD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ArgMinD) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, - {"output_type", ATTR_DESC(dtype, AnyTraits())}}; -OUTPUT_MAP(ArgMinD) = {{0, OUTPUT_DESC(y)}}; - -// ArgMaxWithValue -INPUT_MAP(ArgMaxWithValue) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ArgMaxWithValue) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, - {"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ArgMaxWithValue) = {{0, OUTPUT_DESC(indice)}, {1, OUTPUT_DESC(values)}}; - -// ArgMinWithValue -INPUT_MAP(ArgMinWithValue) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ArgMinWithValue) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, - {"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ArgMinWithValue) = {{0, OUTPUT_DESC(indice)}, {1, OUTPUT_DESC(values)}}; - -// ReduceAllD -INPUT_MAP(ReduceAllD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ReduceAllD) = { - {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ReduceAllD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ReduceAllD) = {{0, OUTPUT_DESC(y)}}; - -// ReduceMeanD -INPUT_MAP(ReduceMeanD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ReduceMeanD) = { - {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ReduceMeanD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ReduceMeanD) = {{0, OUTPUT_DESC(y)}}; - -// HCOMAllreduce -INPUT_MAP(HcomAllReduce) = {{1, INPUT_DESC(x)}}; -OUTPUT_MAP(HcomAllReduce) = {{0, OUTPUT_DESC(y)}}; -ATTR_MAP(HcomAllReduce) = {{"op", ATTR_DESC(reduction, AnyTraits())}, - {"group", ATTR_DESC(group, AnyTraits())}, - {"fusion", ATTR_DESC(fusion, AnyTraits())}}; - -// HCOMBraodcast -INPUT_MAP(HcomBroadcast) = EMPTY_INPUT_MAP; -DYN_INPUT_MAP(HcomBroadcast) = {{1, DYN_INPUT_DESC(x)}}; -DYN_OUTPUT_MAP(HcomBroadcast) = {{0, DYN_OUTPUT_DESC(y)}}; -ATTR_MAP(HcomBroadcast) = {{"root_rank", ATTR_DESC(root_rank, AnyTraits())}, - {"group", ATTR_DESC(group, AnyTraits())}}; - -// HCOMAllreduce -INPUT_MAP(HcomAllGather) = {{1, INPUT_DESC(x)}}; -OUTPUT_MAP(HcomAllGather) = {{0, OUTPUT_DESC(y)}}; -ATTR_MAP(HcomAllGather) = {{"group", ATTR_DESC(group, AnyTraits())}, - {"rank_size", ATTR_DESC(rank_size, AnyTraits())}}; - -// HCOMReduceScatter -INPUT_MAP(HcomReduceScatter) = {{1, INPUT_DESC(x)}}; -OUTPUT_MAP(HcomReduceScatter) = {{0, OUTPUT_DESC(y)}}; -ATTR_MAP(HcomReduceScatter) = {{"group", ATTR_DESC(group, AnyTraits())}, - {"op", ATTR_DESC(reduction, AnyTraits())}, - {"rank_size", ATTR_DESC(rank_size, AnyTraits())}}; - -// Variable -INPUT_MAP(Variable) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Variable) = EMPTY_ATTR_MAP; - -// ReluGrad -INPUT_MAP(ReluGrad) = {{1, INPUT_DESC(gradients)}, {2, INPUT_DESC(features)}}; -ATTR_MAP(ReluGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(ReluGrad) = {{0, OUTPUT_DESC(backprops)}}; - -// BiasAddGrad -INPUT_MAP(BiasAddGrad) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(BiasAddGrad) = {{"data_format", ATTR_DESC(data_format, AnyTraits())}}; -OUTPUT_MAP(BiasAddGrad) = {{0, OUTPUT_DESC(y)}}; - -// MaxPoolGrad -INPUT_MAP(MaxPoolGrad) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(grad)}}; -ATTR_MAP(MaxPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}}; -OUTPUT_MAP(MaxPoolGrad) = {{0, OUTPUT_DESC(y)}}; - -// RsqrtGrad -INPUT_MAP(RsqrtGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; -ATTR_MAP(RsqrtGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(RsqrtGrad) = {{0, OUTPUT_DESC(z)}}; - -// SqrtGrad -INPUT_MAP(SqrtGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; -ATTR_MAP(SqrtGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(SqrtGrad) = {{0, OUTPUT_DESC(z)}}; - -// ReciprocalGrad -INPUT_MAP(ReciprocalGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; -ATTR_MAP(ReciprocalGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(ReciprocalGrad) = {{0, OUTPUT_DESC(z)}}; - -// avgpoolgrad -INPUT_MAP(AvgPoolGrad) = {{1, INPUT_DESC(orig_input_shape)}, {2, INPUT_DESC(input_grad)}}; -ATTR_MAP(AvgPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}}; -OUTPUT_MAP(AvgPoolGrad) = {{0, OUTPUT_DESC(out_grad)}}; - -// MaxPoolWithArgmax -INPUT_MAP(MaxPoolWithArgmax) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(MaxPoolWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}}; -OUTPUT_MAP(MaxPoolWithArgmax) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(argmax)}}; - -// MaxPoolGradWithArgmax -INPUT_MAP(MaxPoolGradWithArgmax) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(grad)}, {3, INPUT_DESC(argmax)}}; -ATTR_MAP(MaxPoolGradWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}}; -OUTPUT_MAP(MaxPoolGradWithArgmax) = {{0, OUTPUT_DESC(y)}}; - -// ExtractImagePatches -INPUT_MAP(ExtractImagePatches) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ExtractImagePatches) = {{"ksizes", ATTR_DESC(ksizes, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"rates", ATTR_DESC(rates, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}}; -OUTPUT_MAP(ExtractImagePatches) = {{0, OUTPUT_DESC(y)}}; - -// Conv2D -INPUT_MAP(Conv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}, {3, INPUT_DESC(bias)}}; -ATTR_MAP(Conv2D) = { - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, - {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}, - {"group", ATTR_DESC(groups, AnyTraits())}, -}; -OUTPUT_MAP(Conv2D) = {{0, OUTPUT_DESC(y)}}; - -// Conv2DBackpropInputD -INPUT_MAP(Conv2DBackpropInputD) = {{1, INPUT_DESC(out_backprop)}, {2, INPUT_DESC(filter)}}; -INPUT_ATTR_MAP(Conv2DBackpropInputD) = { - {3, ATTR_DESC(input_size, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(Conv2DBackpropInputD) = { - {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}, - {"group", ATTR_DESC(groups, AnyTraits())}, -}; -OUTPUT_MAP(Conv2DBackpropInputD) = {{0, OUTPUT_DESC(y)}}; - -// Conv2DBackpropFilterD -INPUT_MAP(Conv2DBackpropFilterD) = {{1, INPUT_DESC(out_backprop)}, {2, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(Conv2DBackpropFilterD) = { - {3, ATTR_DESC(filter_size, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(Conv2DBackpropFilterD) = { - {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}, - {"group", ATTR_DESC(groups, AnyTraits())}, -}; -OUTPUT_MAP(Conv2DBackpropFilterD) = {{0, OUTPUT_DESC(y)}}; - -// DepthwiseConv2D -INPUT_MAP(DepthwiseConv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}, {3, INPUT_DESC(bias)}}; -ATTR_MAP(DepthwiseConv2D) = { - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, - {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}, -}; -OUTPUT_MAP(DepthwiseConv2D) = {{0, OUTPUT_DESC(y)}}; - -// DepthwiseConv2DBackpropInputD -INPUT_MAP(DepthwiseConv2DBackpropInputD) = {{2, INPUT_DESC(filter)}, {3, INPUT_DESC(out_backprop)}}; -INPUT_ATTR_MAP(DepthwiseConv2DBackpropInputD) = { - {1, ATTR_DESC(input_size, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(DepthwiseConv2DBackpropInputD) = { - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, - {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, -}; -OUTPUT_MAP(DepthwiseConv2DBackpropInputD) = {{0, OUTPUT_DESC(input_grad)}}; - -// DepthwiseConv2DBackpropFilterD -INPUT_MAP(DepthwiseConv2DBackpropFilterD) = {{1, INPUT_DESC(input)}, {3, INPUT_DESC(out_backprop)}}; -INPUT_ATTR_MAP(DepthwiseConv2DBackpropFilterD) = { - {2, ATTR_DESC(filter_size, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(DepthwiseConv2DBackpropFilterD) = { - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, - {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, -}; -OUTPUT_MAP(DepthwiseConv2DBackpropFilterD) = {{0, OUTPUT_DESC(filter_grad)}}; - -// MatMulV2 -INPUT_MAP(MatMulV2) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(bias)}}; -ATTR_MAP(MatMulV2) = {{"transpose_a", ATTR_DESC(transpose_x1, AnyTraits())}, - {"transpose_b", ATTR_DESC(transpose_x2, AnyTraits())}}; -OUTPUT_MAP(MatMulV2) = {{0, OUTPUT_DESC(y)}}; - -// Merge -INPUT_MAP(Merge) = EMPTY_INPUT_MAP; -DYN_INPUT_MAP(Merge) = {{1, DYN_INPUT_DESC(x)}}; -ATTR_MAP(Merge) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Merge) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(value_index)}}; - -// Switch -INPUT_MAP(Switch) = {{1, INPUT_DESC(data)}, {2, INPUT_DESC(pred)}}; -OUTPUT_MAP(Switch) = {{0, OUTPUT_DESC(output_false)}, {1, OUTPUT_DESC(output_true)}}; -ATTR_MAP(Switch) = EMPTY_ATTR_MAP; - -// AddN -INPUT_MAP(AddN) = EMPTY_INPUT_MAP; -DYN_INPUT_MAP(AddN) = {{1, DYN_INPUT_DESC(x)}}; -ATTR_MAP(AddN) = {{"n", ATTR_DESC(N, AnyTraits())}}; -OUTPUT_MAP(AddN) = {{0, OUTPUT_DESC(y)}}; - -// Mul -INPUT_MAP(Mul) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Mul) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Mul) = {{0, OUTPUT_DESC(y)}}; - -// RealDiv -INPUT_MAP(RealDiv) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(RealDiv) = EMPTY_ATTR_MAP; -OUTPUT_MAP(RealDiv) = {{0, OUTPUT_DESC(y)}}; - -// Cast -INPUT_MAP(Cast) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(Cast) = {{2, ATTR_DESC(dst_type, AnyTraits())}}; -ATTR_MAP(Cast) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Cast) = {{0, OUTPUT_DESC(y)}}; - -// Case -INPUT_MAP(Case) = {{1, INPUT_DESC(branch_index)}}; -DYN_INPUT_MAP(Case) = {{2, DYN_INPUT_DESC(input)}}; -ATTR_MAP(Case) = EMPTY_ATTR_MAP; -DYN_OUTPUT_MAP(Case) = {{0, DYN_OUTPUT_DESC(output)}}; -DYN_SUBGRAPH_MAP(Case) = {{0, DYN_SUBGRAPH_DESC(branches)}}; - -// Reciprocal -INPUT_MAP(Reciprocal) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Reciprocal) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Reciprocal) = {{0, OUTPUT_DESC(y)}}; - -// Sub -INPUT_MAP(Sub) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Sub) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Sub) = {{0, OUTPUT_DESC(y)}}; - -// SplitD -INPUT_MAP(SplitD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(SplitD) = {{"axis", ATTR_DESC(split_dim, AnyTraits())}, - {"output_num", ATTR_DESC(num_split, AnyTraits())}}; -DYN_OUTPUT_MAP(SplitD) = {{0, DYN_OUTPUT_DESC(y)}}; - -// Range -INPUT_MAP(RangeD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(RangeD) = {{"start", ATTR_DESC(start, AnyTraits())}, - {"limit", ATTR_DESC(limit, AnyTraits())}, - {"delta", ATTR_DESC(delta, AnyTraits())}}; -OUTPUT_MAP(RangeD) = {{0, OUTPUT_DESC(y)}}; - -// Neg -INPUT_MAP(Neg) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Neg) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Neg) = {{0, OUTPUT_DESC(y)}}; - -// Transpose -INPUT_MAP(TransposeD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(TransposeD) = {{2, ATTR_DESC(perm, AnyTraits(), AnyTraits>())}}; -ATTR_MAP(TransposeD) = EMPTY_ATTR_MAP; -// Do not set Transpose operator output descriptor - -// DropOutGenMask -INPUT_MAP(DropOutGenMask) = {{1, INPUT_DESC(shape)}, {2, INPUT_DESC(prob)}}; -ATTR_MAP(DropOutGenMask) = {{"Seed0", ATTR_DESC(seed, AnyTraits())}, - {"Seed1", ATTR_DESC(seed2, AnyTraits())}}; -OUTPUT_MAP(DropOutGenMask) = {{0, OUTPUT_DESC(y)}}; - -// Pack -INPUT_MAP(Pack) = EMPTY_INPUT_MAP; -DYN_INPUT_MAP(Pack) = {{1, DYN_INPUT_DESC(x)}}; -ATTR_MAP(Pack) = {{"num", ATTR_DESC(N, AnyTraits())}, {"axis", ATTR_DESC(axis, AnyTraits())}}; -OUTPUT_MAP(Pack) = {{0, OUTPUT_DESC(y)}}; - -// ConcatD -INPUT_MAP(ConcatD) = EMPTY_INPUT_MAP; -DYN_INPUT_MAP(ConcatD) = {{1, DYN_INPUT_DESC(x)}}; -ATTR_MAP(ConcatD) = { - {"axis", ATTR_DESC(concat_dim, AnyTraits())}, - {"inputNums", ATTR_DESC(N, AnyTraits())}, -}; -OUTPUT_MAP(ConcatD) = {{0, OUTPUT_DESC(y)}}; - -// Less -INPUT_MAP(Less) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Less) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Less) = {{0, OUTPUT_DESC(y)}}; - -// Rsqrt -INPUT_MAP(Rsqrt) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Rsqrt) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Rsqrt) = {{0, OUTPUT_DESC(y)}}; - -// Sqrt -INPUT_MAP(Sqrt) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Sqrt) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Sqrt) = {{0, OUTPUT_DESC(y)}}; - -// Square -INPUT_MAP(Square) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Square) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Square) = {{0, OUTPUT_DESC(y)}}; - -// SquareSumAll -INPUT_MAP(SquareSumAll) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(SquareSumAll) = EMPTY_ATTR_MAP; -OUTPUT_MAP(SquareSumAll) = {{0, OUTPUT_DESC(y1)}, {1, OUTPUT_DESC(y2)}}; - -// Tanh -INPUT_MAP(Tanh) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Tanh) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Tanh) = {{0, OUTPUT_DESC(y)}}; - -// TanhGrad -INPUT_MAP(TanhGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; -ATTR_MAP(TanhGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(TanhGrad) = {{0, OUTPUT_DESC(z)}}; - -// ReduceMinD -INPUT_MAP(ReduceMinD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ReduceMinD) = { - {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ReduceMinD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ReduceMinD) = {{0, OUTPUT_DESC(y)}}; - -// ReduceMaxD -INPUT_MAP(ReduceMaxD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ReduceMaxD) = { - {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ReduceMaxD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ReduceMaxD) = {{0, OUTPUT_DESC(y)}}; - -// Maximum -INPUT_MAP(Maximum) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Maximum) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Maximum) = {{0, OUTPUT_DESC(y)}}; - -// Minimum -INPUT_MAP(Minimum) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Minimum) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Minimum) = {{0, OUTPUT_DESC(y)}}; - -// MaximumGrad -INPUT_MAP(MaximumGrad) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(grads)}}; -ATTR_MAP(MaximumGrad) = {{"grad_x", ATTR_DESC(grad_x, AnyTraits())}, - {"grad_y", ATTR_DESC(grad_y, AnyTraits())}}; -OUTPUT_MAP(MaximumGrad) = {{0, OUTPUT_DESC(y1)}, {1, OUTPUT_DESC(y2)}}; - -// MinimumGrad -INPUT_MAP(MinimumGrad) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(grads)}}; -ATTR_MAP(MinimumGrad) = {{"grad_x", ATTR_DESC(grad_x, AnyTraits())}, - {"grad_y", ATTR_DESC(grad_y, AnyTraits())}}; -OUTPUT_MAP(MinimumGrad) = {{0, OUTPUT_DESC(y1)}, {1, OUTPUT_DESC(y2)}}; - -// Pow -INPUT_MAP(Pow) = { - {1, INPUT_DESC(x1)}, - {2, INPUT_DESC(x2)}, -}; -ATTR_MAP(Pow) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Pow) = {{0, OUTPUT_DESC(y)}}; - -// Equal -INPUT_MAP(Equal) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Equal) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Equal) = {{0, OUTPUT_DESC(y)}}; - -// NotEqual -INPUT_MAP(NotEqual) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(NotEqual) = EMPTY_ATTR_MAP; -OUTPUT_MAP(NotEqual) = {{0, OUTPUT_DESC(y)}}; - -// Log -INPUT_MAP(Log) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Log) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Log) = {{0, OUTPUT_DESC(y)}}; - -// LogicalAnd -INPUT_MAP(LogicalAnd) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(LogicalAnd) = EMPTY_ATTR_MAP; -OUTPUT_MAP(LogicalAnd) = {{0, OUTPUT_DESC(y)}}; - -// LogicalOr -INPUT_MAP(LogicalOr) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(LogicalOr) = EMPTY_ATTR_MAP; -OUTPUT_MAP(LogicalOr) = {{0, OUTPUT_DESC(y)}}; - -// LogicalNot -INPUT_MAP(LogicalNot) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(LogicalNot) = EMPTY_ATTR_MAP; -OUTPUT_MAP(LogicalNot) = {{0, OUTPUT_DESC(y)}}; - -// Greater -INPUT_MAP(Greater) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Greater) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Greater) = {{0, OUTPUT_DESC(y)}}; - -// LogSoftmaxGrad -INPUT_MAP(LogSoftmaxGrad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(grad)}}; -ATTR_MAP(LogSoftmaxGrad) = { - {"axis", ATTR_DESC(axis, AnyTraits>(), AnyTraits>())}}; -OUTPUT_MAP(LogSoftmaxGrad) = {{0, OUTPUT_DESC(y)}}; - -// Select -INPUT_MAP(Select) = {{1, INPUT_DESC(condition)}, {2, INPUT_DESC(x1)}, {3, INPUT_DESC(x2)}}; -ATTR_MAP(Select) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Select) = {{0, OUTPUT_DESC(y)}}; - -// LessEqual -INPUT_MAP(LessEqual) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(LessEqual) = EMPTY_ATTR_MAP; -OUTPUT_MAP(LessEqual) = {{0, OUTPUT_DESC(y)}}; - -// LogSoftmaxV2 -INPUT_MAP(LogSoftmaxV2) = {{1, INPUT_DESC(logits)}}; -ATTR_MAP(LogSoftmaxV2) = { - {"axis", ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -OUTPUT_MAP(LogSoftmaxV2) = {{0, OUTPUT_DESC(logsoftmax)}}; - -// RandomChoiceWithMask -INPUT_MAP(RandomChoiceWithMask) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(RandomChoiceWithMask) = {{"count", ATTR_DESC(count, AnyTraits())}, - {"seed", ATTR_DESC(seed, AnyTraits())}, - {"seed2", ATTR_DESC(seed2, AnyTraits())}}; -OUTPUT_MAP(RandomChoiceWithMask) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(mask)}}; - -// TruncatedNormal -INPUT_MAP(TruncatedNormal) = {{1, INPUT_DESC(shape)}}; -ATTR_MAP(TruncatedNormal) = {{"seed", ATTR_DESC(seed, AnyTraits())}, - {"seed2", ATTR_DESC(seed2, AnyTraits())}}; -OUTPUT_MAP(TruncatedNormal) = {{0, OUTPUT_DESC(y)}}; - -// StridedSliceGrad -INPUT_MAP(StridedSliceGrad) = { - {1, INPUT_DESC(dy)}, {2, INPUT_DESC(shape)}, {3, INPUT_DESC(begin)}, {4, INPUT_DESC(end)}, {5, INPUT_DESC(strides)}}; -ATTR_MAP(StridedSliceGrad) = {{"begin_mask", ATTR_DESC(begin_mask, AnyTraits())}, - {"end_mask", ATTR_DESC(end_mask, AnyTraits())}, - {"ellipsis_mask", ATTR_DESC(ellipsis_mask, AnyTraits())}, - {"new_axis_mask", ATTR_DESC(new_axis_mask, AnyTraits())}, - {"shrink_axis_mask", ATTR_DESC(shrink_axis_mask, AnyTraits())}}; -OUTPUT_MAP(StridedSliceGrad) = {{0, OUTPUT_DESC(output)}}; - -// Gelu -INPUT_MAP(Gelu) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Gelu) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Gelu) = {{0, OUTPUT_DESC(y)}}; - -// GeluGrad -INPUT_MAP(GeluGrad) = {{1, INPUT_DESC(dy)}, {2, INPUT_DESC(x)}, {3, INPUT_DESC(y)}}; -ATTR_MAP(GeluGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(GeluGrad) = {{0, OUTPUT_DESC(z)}}; - -// StridedSlice -INPUT_MAP(StridedSlice) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(begin)}, {3, INPUT_DESC(end)}, {4, INPUT_DESC(strides)}}; -ATTR_MAP(StridedSlice) = {{"begin_mask", ATTR_DESC(begin_mask, AnyTraits())}, - {"end_mask", ATTR_DESC(end_mask, AnyTraits())}, - {"ellipsis_mask", ATTR_DESC(ellipsis_mask, AnyTraits())}, - {"new_axis_mask", ATTR_DESC(new_axis_mask, AnyTraits())}, - {"shrink_axis_mask", ATTR_DESC(shrink_axis_mask, AnyTraits())}}; -OUTPUT_MAP(StridedSlice) = {{0, OUTPUT_DESC(y)}}; - -// UnsortedSegmentSum -INPUT_MAP(UnsortedSegmentSumD) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(segment_ids)}}; -INPUT_ATTR_MAP(UnsortedSegmentSumD) = {{3, ATTR_DESC(num_segments, AnyTraits())}}; -ATTR_MAP(UnsortedSegmentSumD) = EMPTY_ATTR_MAP; -OUTPUT_MAP(UnsortedSegmentSumD) = {{0, OUTPUT_DESC(y)}}; - -// UnsortedSegmentMin -INPUT_MAP(UnsortedSegmentMin) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(segment_ids)}, {3, INPUT_DESC(num_segments)}}; -ATTR_MAP(UnsortedSegmentMin) = EMPTY_ATTR_MAP; -OUTPUT_MAP(UnsortedSegmentMin) = {{0, OUTPUT_DESC(y)}}; - -// ExpandDims -INPUT_MAP(ExpandDims) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(axis)}}; -ATTR_MAP(ExpandDims) = EMPTY_ATTR_MAP; -OUTPUT_MAP(ExpandDims) = {{0, OUTPUT_DESC(y)}}; - -// Squeeze -INPUT_MAP(Squeeze) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Squeeze) = {{"axis", ATTR_DESC(axis, AnyTraits(), AnyTraits>())}}; -OUTPUT_MAP(Squeeze) = {{0, OUTPUT_DESC(y)}}; - -// SGD -INPUT_MAP(SGD) = {{1, INPUT_DESC(parameters)}, {2, INPUT_DESC(gradient)}, {3, INPUT_DESC(learning_rate)}, - {4, INPUT_DESC(accum)}, {5, INPUT_DESC(momentum)}, {6, INPUT_DESC(stat)}}; -ATTR_MAP(SGD) = {{"dampening", ATTR_DESC(dampening, AnyTraits())}, - {"weight_decay", ATTR_DESC(weight_decay, AnyTraits())}, - {"nesterov", ATTR_DESC(nesterov, AnyTraits())}}; -OUTPUT_MAP(SGD) = {{0, OUTPUT_DESC(parameters)}}; - -// LayerNorm -INPUT_MAP(LayerNorm) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(gamma)}, {3, INPUT_DESC(beta)}}; -ATTR_MAP(LayerNorm) = {{"begin_norm_axis", ATTR_DESC(begin_norm_axis, AnyTraits())}, - {"begin_params_axis", ATTR_DESC(begin_params_axis, AnyTraits())}, - {"epsilon", ATTR_DESC(epsilon, AnyTraits())}}; -OUTPUT_MAP(LayerNorm) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(mean)}, {2, OUTPUT_DESC(variance)}}; - -// LayerNormGrad -INPUT_MAP(LayerNormGrad) = { - {1, INPUT_DESC(x)}, {2, INPUT_DESC(dy)}, {3, INPUT_DESC(variance)}, {4, INPUT_DESC(mean)}, {5, INPUT_DESC(gamma)}}; -ATTR_MAP(LayerNormGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(LayerNormGrad) = {{0, OUTPUT_DESC(pd_x)}, {1, OUTPUT_DESC(pd_gamma)}, {2, OUTPUT_DESC(pd_beta)}}; - -// BatchMatMul -INPUT_MAP(BatchMatMul) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(BatchMatMul) = {{"transpose_x1", ATTR_DESC(adj_x1, AnyTraits())}, - {"transpose_x2", ATTR_DESC(adj_x2, AnyTraits())}}; -OUTPUT_MAP(BatchMatMul) = {{0, OUTPUT_DESC(y)}}; - -// DropoutDoMask -INPUT_MAP(DropOutDoMask) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(mask)}, {3, INPUT_DESC(keep_prob)}}; -ATTR_MAP(DropOutDoMask) = EMPTY_ATTR_MAP; -OUTPUT_MAP(DropOutDoMask) = {{0, OUTPUT_DESC(y)}}; - -// NPUGetFloatStatus -INPUT_MAP(NPUGetFloatStatus) = {{1, INPUT_DESC(addr)}}; -OUTPUT_MAP(NPUGetFloatStatus) = {{0, OUTPUT_DESC(data)}}; -ATTR_MAP(NPUGetFloatStatus) = EMPTY_ATTR_MAP; - -// NPUAllocFloatStatus -INPUT_MAP(NPUAllocFloatStatus) = EMPTY_INPUT_MAP; -ATTR_MAP(NPUAllocFloatStatus) = EMPTY_ATTR_MAP; -OUTPUT_MAP(NPUAllocFloatStatus) = {{0, OUTPUT_DESC(data)}}; - -// NPUClearFloatStatus -INPUT_MAP(NPUClearFloatStatus) = {{1, INPUT_DESC(addr)}}; -OUTPUT_MAP(NPUClearFloatStatus) = {{0, OUTPUT_DESC(data)}}; -ATTR_MAP(NPUClearFloatStatus) = EMPTY_ATTR_MAP; - -// Abs -INPUT_MAP(Abs) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Abs) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Abs) = {{0, OUTPUT_DESC(y)}}; - -// AbsGrad -INPUT_MAP(AbsGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; -ATTR_MAP(AbsGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(AbsGrad) = {{0, OUTPUT_DESC(z)}}; - -// BinaryCrossEntropy -INPUT_MAP(BinaryCrossEntropy) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}, {3, INPUT_DESC(weight)}}; -ATTR_MAP(BinaryCrossEntropy) = {{"reduction", ATTR_DESC(reduction, AnyTraits())}}; -OUTPUT_MAP(BinaryCrossEntropy) = {{0, OUTPUT_DESC(output)}}; - -// BinaryCrossEntropyGrad -INPUT_MAP(BinaryCrossEntropyGrad) = { - {1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}, {3, INPUT_DESC(grad_output)}, {4, INPUT_DESC(weight)}}; -ATTR_MAP(BinaryCrossEntropyGrad) = {{"reduction", ATTR_DESC(reduction, AnyTraits())}}; -OUTPUT_MAP(BinaryCrossEntropyGrad) = {{0, OUTPUT_DESC(output)}}; - -// SparseApplyAdagradD -INPUT_MAP(SparseApplyAdagradD) = { - {1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INPUT_DESC(grad)}, {4, INPUT_DESC(indices)}}; -ATTR_MAP(SparseApplyAdagradD) = {{"lr", ATTR_DESC(lr, AnyTraits())}, - {"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(SparseApplyAdagradD) = {{0, OUTPUT_DESC(var)}}; - -// ApplyProximalAdagradD -INPUT_MAP(ApplyProximalAdagradD) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INPUT_DESC(lr)}, - {4, INPUT_DESC(l1)}, {5, INPUT_DESC(l2)}, {6, INPUT_DESC(grad)}}; -ATTR_MAP(ApplyProximalAdagradD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ApplyProximalAdagradD) = {{0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(accum)}}; - -// SparseApplyFtrlD -INPUT_MAP(SparseApplyFtrlD) = {{1, INPUT_DESC(var)}, - {2, INPUT_DESC(accum)}, - {3, INPUT_DESC(linear)}, - {4, INPUT_DESC(grad)}, - {5, INPUT_DESC(indices)}}; -ATTR_MAP(SparseApplyFtrlD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}, - {"lr", ATTR_DESC(lr, AnyTraits())}, - {"l1", ATTR_DESC(l1, AnyTraits())}, - {"l2", ATTR_DESC(l2, AnyTraits())}, - {"lr_power", ATTR_DESC(lr_power, AnyTraits())}}; -OUTPUT_MAP(SparseApplyFtrlD) = {{0, OUTPUT_DESC(var)}}; - -// SpaceToDepth -INPUT_MAP(SpaceToDepth) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(SpaceToDepth) = {{"block_size", ATTR_DESC(block_size, AnyTraits())}}; -OUTPUT_MAP(SpaceToDepth) = {{0, OUTPUT_DESC(y)}}; - -// DepthToSpace -INPUT_MAP(DepthToSpace) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(DepthToSpace) = {{"block_size", ATTR_DESC(block_size, AnyTraits())}}; -OUTPUT_MAP(DepthToSpace) = {{0, OUTPUT_DESC(y)}}; - -// Sign -INPUT_MAP(Sign) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Sign) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Sign) = {{0, OUTPUT_DESC(y)}}; - -// Round -INPUT_MAP(Round) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Round) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Round) = {{0, OUTPUT_DESC(y)}}; - -// ApplyFtrlD -INPUT_MAP(ApplyFtrlD) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INPUT_DESC(linear)}, - {4, INPUT_DESC(grad)}, {5, INPUT_DESC(lr)}, {6, INPUT_DESC(l1)}, - {7, INPUT_DESC(l2)}, {8, INPUT_DESC(lr_power)}}; -ATTR_MAP(ApplyFtrlD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ApplyFtrlD) = {{0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(accum)}, {2, OUTPUT_DESC(linear)}}; - -// Diag -INPUT_MAP(Diag) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Diag) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Diag) = {{0, OUTPUT_DESC(y)}}; - -// DiagPart -INPUT_MAP(DiagPart) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(DiagPart) = EMPTY_ATTR_MAP; -OUTPUT_MAP(DiagPart) = {{0, OUTPUT_DESC(y)}}; - -// SpaceToBatchD -INPUT_MAP(SpaceToBatchD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(SpaceToBatchD) = { - {"block_size", ATTR_DESC(block_size, AnyTraits())}, - {"paddings", ATTR_DESC(paddings, AnyTraits>>(), AnyTraits>())}}; -OUTPUT_MAP(SpaceToBatchD) = {{0, OUTPUT_DESC(y)}}; - -// BatchToSpaceD -INPUT_MAP(BatchToSpaceD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(BatchToSpaceD) = { - {"block_size", ATTR_DESC(block_size, AnyTraits())}, - {"crops", ATTR_DESC(crops, AnyTraits>>(), AnyTraits>())}}; -OUTPUT_MAP(BatchToSpaceD) = {{0, OUTPUT_DESC(y)}}; - -// Atan2 -INPUT_MAP(Atan2) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Atan2) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Atan2) = {{0, OUTPUT_DESC(y)}}; - -// ApplyRMSPropD -INPUT_MAP(ApplyRMSPropD) = { - {1, INPUT_DESC(var)}, {2, INPUT_DESC(ms)}, {3, INPUT_DESC(mom)}, {4, INPUT_DESC(lr)}, {5, INPUT_DESC(grad)}}; -INPUT_ATTR_MAP(ApplyRMSPropD) = {{6, ATTR_DESC(rho, AnyTraits())}, - {7, ATTR_DESC(momentum, AnyTraits())}, - {8, ATTR_DESC(epsilon, AnyTraits())}}; -ATTR_MAP(ApplyRMSPropD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ApplyRMSPropD) = {{0, OUTPUT_DESC(var)}}; - -// ApplyCenteredRMSPropD -INPUT_MAP(ApplyCenteredRMSPropD) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(mg)}, {3, INPUT_DESC(ms)}, - {4, INPUT_DESC(mom)}, {5, INPUT_DESC(grad)}, {6, INPUT_DESC(lr)}, - {7, INPUT_DESC(rho)}, {8, INPUT_DESC(momentum)}, {9, INPUT_DESC(epsilon)}}; -ATTR_MAP(ApplyCenteredRMSPropD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ApplyCenteredRMSPropD) = { - {0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(mg)}, {2, OUTPUT_DESC(ms)}, {3, OUTPUT_DESC(mom)}}; - -// BasicLSTMCell -INPUT_MAP(BasicLSTMCell) = { - {1, INPUT_DESC(x)}, {2, INPUT_DESC(h)}, {3, INPUT_DESC(c)}, {4, INPUT_DESC(w)}, {5, INPUT_DESC(b)}}; -ATTR_MAP(BasicLSTMCell) = {{"keep_prob", ATTR_DESC(keep_prob, AnyTraits())}, - {"forget_bias", ATTR_DESC(forget_bias, AnyTraits())}, - {"state_is_tuple", ATTR_DESC(state_is_tuple, AnyTraits())}, - {"activation", ATTR_DESC(activation, AnyTraits())}}; -OUTPUT_MAP(BasicLSTMCell) = {{0, OUTPUT_DESC(ct)}, {1, OUTPUT_DESC(ht)}, {2, OUTPUT_DESC(it)}, {3, OUTPUT_DESC(jt)}, - {4, OUTPUT_DESC(ft)}, {5, OUTPUT_DESC(ot)}, {7, OUTPUT_DESC(tanhct)}}; - -// BasicLSTMCellInputGrad -INPUT_MAP(BasicLSTMCellInputGrad) = {{1, INPUT_DESC(dgate)}, {2, INPUT_DESC(w)}}; -ATTR_MAP(BasicLSTMCellInputGrad) = {{"keep_prob", ATTR_DESC(keep_prob, AnyTraits())}}; -OUTPUT_MAP(BasicLSTMCellInputGrad) = {{0, OUTPUT_DESC(dxt)}, {1, OUTPUT_DESC(dht)}}; - -// BasicLSTMCellWeightGrad -INPUT_MAP(BasicLSTMCellWeightGrad) = {{1, INPUT_DESC(h)}, {2, INPUT_DESC(x)}, {3, INPUT_DESC(dgate)}}; -ATTR_MAP(BasicLSTMCellWeightGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(BasicLSTMCellWeightGrad) = {{0, OUTPUT_DESC(dw)}, {1, OUTPUT_DESC(db)}}; - -// BasicLSTMCellCStateGrad -INPUT_MAP(BasicLSTMCellCStateGrad) = {{1, INPUT_DESC(c)}, {2, INPUT_DESC(dht)}, {3, INPUT_DESC(dct)}, - {4, INPUT_DESC(it)}, {5, INPUT_DESC(jt)}, {6, INPUT_DESC(ft)}, - {7, INPUT_DESC(ot)}, {8, INPUT_DESC(tanhct)}}; -ATTR_MAP(BasicLSTMCellCStateGrad) = {{"forget_bias", ATTR_DESC(forget_bias, AnyTraits())}, - {"activation", ATTR_DESC(activation, AnyTraits())}}; -OUTPUT_MAP(BasicLSTMCellCStateGrad) = {{0, OUTPUT_DESC(dgate)}, {1, OUTPUT_DESC(dct_1)}}; - -// L2Loss -INPUT_MAP(L2Loss) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(L2Loss) = EMPTY_ATTR_MAP; -OUTPUT_MAP(L2Loss) = {{0, OUTPUT_DESC(y)}}; - -// CTCLoss -INPUT_MAP(CTCLoss) = {{1, INPUT_DESC(inputs)}, - {2, INPUT_DESC(labels_indices)}, - {3, INPUT_DESC(labels_values)}, - {4, INPUT_DESC(sequence_length)}}; -ATTR_MAP(CTCLoss) = { - {"preprocess_collapse_repeated", ATTR_DESC(preprocess_collapse_repeated, AnyTraits())}, - {"ctc_merge_repeated", ATTR_DESC(ctc_merge_repeated, AnyTraits())}, - {"ignore_longer_outputs_than_inputs", ATTR_DESC(ignore_longer_outputs_than_inputs, AnyTraits())}}; -OUTPUT_MAP(CTCLoss) = {{0, OUTPUT_DESC(loss)}, {1, OUTPUT_DESC(gradient)}}; - -// ReverseSequence -INPUT_MAP(ReverseSequence) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(seq_lengths)}}; -ATTR_MAP(ReverseSequence) = {{"seq_dim", ATTR_DESC(seq_dim, AnyTraits())}, - {"batch_dim", ATTR_DESC(batch_dim, AnyTraits())}}; -OUTPUT_MAP(ReverseSequence) = {{0, OUTPUT_DESC(y)}}; - -// AscendQuant -INPUT_MAP(AscendQuant) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(AscendQuant) = {{"scale", ATTR_DESC(scale, AnyTraits())}, - {"offset", ATTR_DESC(offset, AnyTraits())}, - {"sqrt_mode", ATTR_DESC(sqrt_mode, AnyTraits())}, - {"round_mode", ATTR_DESC(round_mode, AnyTraits())}}; -OUTPUT_MAP(AscendQuant) = {{0, OUTPUT_DESC(y)}}; - -// AscendDequant -INPUT_MAP(AscendDequant) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(deq_scale)}}; -ATTR_MAP(AscendDequant) = {{"sqrt_mode", ATTR_DESC(sqrt_mode, AnyTraits())}, - {"relu_flag", ATTR_DESC(relu_flag, AnyTraits())}, - {"dtype", ATTR_DESC(dtype, AnyTraits())}}; -OUTPUT_MAP(AscendDequant) = {{0, OUTPUT_DESC(y)}}; -#ifdef ENABLE_GE -// Print -INPUT_MAP(Print) = EMPTY_INPUT_MAP; -DYN_INPUT_MAP(Print) = {{1, DYN_INPUT_DESC(x)}}; -ATTR_MAP(Print) = EMPTY_ATTR_MAP; -#endif -} // namespace transform -} // namespace mindspore diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare.h deleted file mode 100755 index 9da23b49dad..00000000000 --- a/mindspore/ccsrc/transform/graph_ir/op_declare.h +++ /dev/null @@ -1,516 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_H_ -#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_H_ - -#include -#include -#include "transform/graph_ir/op_adapter.h" - -namespace mindspore { -namespace transform { -#define DECLARE_OP_ADAPTER(T) \ - using T = ge::op::T; \ - template <> \ - const std::unordered_map OpAdapter::input_map_; \ - template <> \ - const std::unordered_map OpAdapter::attr_map_; - -#define DECLARE_OP_USE_OUTPUT(T) \ - template <> \ - const std::unordered_map OpAdapter::output_map_; - -#define DECLARE_OP_USE_ENUM(T) \ - template <> \ - const std::unordered_map OpAdapter::enum_map_; - -#define DECLARE_OP_USE_INPUT_ATTR(T) \ - template <> \ - const std::unordered_map OpAdapter::input_attr_map_; - -#define DECLARE_OP_USE_DYN_INPUT(T) \ - template <> \ - const std::unordered_map OpAdapter::dyn_input_map_; - -#define DECLARE_OP_USE_DYN_SUBGRAPH(T) \ - template <> \ - const std::unordered_map OpAdapter::dyn_subgraph_map_; - -#define DECLARE_OP_USE_DYN_OUTPUT(T) \ - template <> \ - const std::unordered_map OpAdapter::dyn_output_map_; - -template <> -std::unordered_map> OpAdapter::cus_input_map_; -template <> -std::unordered_map> OpAdapter::cus_output_map_; - -DECLARE_OP_ADAPTER(GreaterEqual) -DECLARE_OP_USE_OUTPUT(GreaterEqual) -DECLARE_OP_ADAPTER(SliceD) -DECLARE_OP_USE_INPUT_ATTR(SliceD) -DECLARE_OP_USE_OUTPUT(SliceD) -DECLARE_OP_ADAPTER(AssignAdd) -DECLARE_OP_USE_OUTPUT(AssignAdd) -DECLARE_OP_ADAPTER(AssignSub) -DECLARE_OP_USE_OUTPUT(AssignSub) - -DECLARE_OP_ADAPTER(ReduceMean) - -// ** Distributed Operations ** -DECLARE_OP_ADAPTER(HcomReduceScatter) -DECLARE_OP_USE_OUTPUT(HcomReduceScatter) -DECLARE_OP_ADAPTER(HcomBroadcast) -DECLARE_OP_USE_DYN_INPUT(HcomBroadcast) -DECLARE_OP_USE_DYN_OUTPUT(HcomBroadcast) -DECLARE_OP_ADAPTER(HcomAllReduce) -DECLARE_OP_USE_OUTPUT(HcomAllReduce) -DECLARE_OP_ADAPTER(HcomAllGather) -DECLARE_OP_USE_OUTPUT(HcomAllGather) -DECLARE_OP_ADAPTER(Variable) -DECLARE_OP_ADAPTER(ReluGrad) -DECLARE_OP_USE_OUTPUT(ReluGrad) -DECLARE_OP_ADAPTER(BiasAddGrad) -DECLARE_OP_USE_OUTPUT(BiasAddGrad) -DECLARE_OP_ADAPTER(MaxPoolWithArgmax) -DECLARE_OP_USE_OUTPUT(MaxPoolWithArgmax) -DECLARE_OP_ADAPTER(MaxPoolGradWithArgmax) -DECLARE_OP_USE_OUTPUT(MaxPoolGradWithArgmax) -DECLARE_OP_ADAPTER(Conv2D) -DECLARE_OP_USE_ENUM(Conv2D) -DECLARE_OP_USE_OUTPUT(Conv2D) -DECLARE_OP_ADAPTER(ExtractImagePatches) -DECLARE_OP_USE_OUTPUT(ExtractImagePatches) -DECLARE_OP_ADAPTER(Conv2DBackpropInputD) -DECLARE_OP_USE_ENUM(Conv2DBackpropInputD) -DECLARE_OP_USE_INPUT_ATTR(Conv2DBackpropInputD) -DECLARE_OP_USE_OUTPUT(Conv2DBackpropInputD) -DECLARE_OP_ADAPTER(Conv2DBackpropFilterD) -DECLARE_OP_USE_ENUM(Conv2DBackpropFilterD) -DECLARE_OP_USE_INPUT_ATTR(Conv2DBackpropFilterD) -DECLARE_OP_USE_OUTPUT(Conv2DBackpropFilterD) -DECLARE_OP_ADAPTER(DepthwiseConv2D) -DECLARE_OP_USE_ENUM(DepthwiseConv2D) -DECLARE_OP_USE_OUTPUT(DepthwiseConv2D) -DECLARE_OP_ADAPTER(DepthwiseConv2DBackpropFilterD) -DECLARE_OP_USE_INPUT_ATTR(DepthwiseConv2DBackpropFilterD) -DECLARE_OP_USE_OUTPUT(DepthwiseConv2DBackpropFilterD) -DECLARE_OP_ADAPTER(DepthwiseConv2DBackpropInputD) -DECLARE_OP_USE_INPUT_ATTR(DepthwiseConv2DBackpropInputD) -DECLARE_OP_USE_OUTPUT(DepthwiseConv2DBackpropInputD) -DECLARE_OP_ADAPTER(Reshape) -DECLARE_OP_USE_OUTPUT(Reshape) -DECLARE_OP_ADAPTER(TransShape) -DECLARE_OP_USE_INPUT_ATTR(TransShape) -DECLARE_OP_USE_OUTPUT(TransShape) -DECLARE_OP_ADAPTER(Iou) -DECLARE_OP_USE_OUTPUT(Iou) -DECLARE_OP_ADAPTER(ResizeNearestNeighborV2D) -DECLARE_OP_USE_OUTPUT(ResizeNearestNeighborV2D) -DECLARE_OP_ADAPTER(ResizeNearestNeighborV2Grad) -DECLARE_OP_USE_OUTPUT(ResizeNearestNeighborV2Grad) -DECLARE_OP_ADAPTER(ApplyAdam) -DECLARE_OP_USE_OUTPUT(ApplyAdam) -DECLARE_OP_ADAPTER(ApplyAdamD) -DECLARE_OP_USE_OUTPUT(ApplyAdamD) -DECLARE_OP_ADAPTER(Relu6) -DECLARE_OP_USE_OUTPUT(Relu6) -DECLARE_OP_ADAPTER(Relu6Grad) -DECLARE_OP_USE_OUTPUT(Relu6Grad) -DECLARE_OP_ADAPTER(ResizeBilinearV2D) -DECLARE_OP_USE_OUTPUT(ResizeBilinearV2D) -DECLARE_OP_ADAPTER(ResizeBilinearV2Grad) -DECLARE_OP_USE_OUTPUT(ResizeBilinearV2Grad) -DECLARE_OP_ADAPTER(ZerosLike) -DECLARE_OP_USE_OUTPUT(ZerosLike) -DECLARE_OP_ADAPTER(OnesLike) -DECLARE_OP_USE_OUTPUT(OnesLike) -DECLARE_OP_ADAPTER(TensorScatterUpdate) -DECLARE_OP_USE_OUTPUT(TensorScatterUpdate) -DECLARE_OP_ADAPTER(ScatterUpdate) -DECLARE_OP_USE_OUTPUT(ScatterUpdate) -DECLARE_OP_ADAPTER(ScatterNdUpdate) -DECLARE_OP_USE_OUTPUT(ScatterNdUpdate) -DECLARE_OP_ADAPTER(ScatterMax) -DECLARE_OP_USE_OUTPUT(ScatterMax) -DECLARE_OP_ADAPTER(NMSWithMask) -DECLARE_OP_USE_OUTPUT(NMSWithMask) -DECLARE_OP_ADAPTER(Unpack) -DECLARE_OP_USE_DYN_OUTPUT(Unpack) -DECLARE_OP_ADAPTER(CheckValid) -DECLARE_OP_USE_OUTPUT(CheckValid) -DECLARE_OP_ADAPTER(SmoothL1Loss) -DECLARE_OP_USE_OUTPUT(SmoothL1Loss) -DECLARE_OP_ADAPTER(SmoothL1LossGrad) -DECLARE_OP_USE_OUTPUT(SmoothL1LossGrad) -DECLARE_OP_ADAPTER(SigmoidCrossEntropyWithLogits) -DECLARE_OP_USE_OUTPUT(SigmoidCrossEntropyWithLogits) -DECLARE_OP_ADAPTER(SigmoidCrossEntropyWithLogitsGrad) -DECLARE_OP_USE_OUTPUT(SigmoidCrossEntropyWithLogitsGrad) -DECLARE_OP_ADAPTER(ScatterNdD) -DECLARE_OP_USE_INPUT_ATTR(ScatterNdD) -DECLARE_OP_USE_OUTPUT(ScatterNdD) -DECLARE_OP_ADAPTER(PadD) -DECLARE_OP_USE_OUTPUT(PadD) -DECLARE_OP_ADAPTER(MirrorPad) -DECLARE_OP_USE_OUTPUT(MirrorPad) -DECLARE_OP_ADAPTER(MirrorPadGrad) -DECLARE_OP_USE_OUTPUT(MirrorPadGrad) -DECLARE_OP_ADAPTER(BoundingBoxEncode) -DECLARE_OP_USE_OUTPUT(BoundingBoxEncode) -DECLARE_OP_ADAPTER(BoundingBoxDecode) -DECLARE_OP_USE_OUTPUT(BoundingBoxDecode) -DECLARE_OP_ADAPTER(GatherNd) -DECLARE_OP_USE_OUTPUT(GatherNd) -DECLARE_OP_ADAPTER(ArgMaxD) -DECLARE_OP_USE_OUTPUT(ArgMaxD) -DECLARE_OP_ADAPTER(ArgMinD) -DECLARE_OP_USE_OUTPUT(ArgMinD) -DECLARE_OP_ADAPTER(ArgMaxWithValue) -DECLARE_OP_USE_OUTPUT(ArgMaxWithValue) -DECLARE_OP_ADAPTER(ArgMinWithValue) -DECLARE_OP_USE_OUTPUT(ArgMinWithValue) -DECLARE_OP_ADAPTER(Mul) -DECLARE_OP_USE_OUTPUT(Mul) -DECLARE_OP_ADAPTER(AddN) -DECLARE_OP_USE_DYN_INPUT(AddN) -DECLARE_OP_USE_OUTPUT(AddN) -DECLARE_OP_ADAPTER(Less) -DECLARE_OP_USE_OUTPUT(Less) -DECLARE_OP_ADAPTER(Rsqrt) -DECLARE_OP_USE_OUTPUT(Rsqrt) -DECLARE_OP_ADAPTER(Sqrt) -DECLARE_OP_USE_OUTPUT(Sqrt) -DECLARE_OP_ADAPTER(Square) -DECLARE_OP_USE_OUTPUT(Square) -DECLARE_OP_ADAPTER(SplitD) -DECLARE_OP_USE_DYN_OUTPUT(SplitD) -DECLARE_OP_ADAPTER(SGD) -DECLARE_OP_USE_OUTPUT(SGD) -DECLARE_OP_ADAPTER(SquareSumAll) -DECLARE_OP_USE_OUTPUT(SquareSumAll) - -DECLARE_OP_ADAPTER(Tanh) -DECLARE_OP_USE_OUTPUT(Tanh) -DECLARE_OP_ADAPTER(TanhGrad) -DECLARE_OP_USE_OUTPUT(TanhGrad) -DECLARE_OP_ADAPTER(Maximum) -DECLARE_OP_USE_OUTPUT(Maximum) -DECLARE_OP_ADAPTER(Minimum) -DECLARE_OP_USE_OUTPUT(Minimum) -DECLARE_OP_ADAPTER(MaximumGrad) -DECLARE_OP_USE_OUTPUT(MaximumGrad) -DECLARE_OP_ADAPTER(MinimumGrad) -DECLARE_OP_USE_OUTPUT(MinimumGrad) -DECLARE_OP_ADAPTER(ReduceMinD) -DECLARE_OP_USE_INPUT_ATTR(ReduceMinD) -DECLARE_OP_USE_OUTPUT(ReduceMinD) -DECLARE_OP_ADAPTER(ReduceMaxD) -DECLARE_OP_USE_INPUT_ATTR(ReduceMaxD) -DECLARE_OP_USE_OUTPUT(ReduceMaxD) -DECLARE_OP_ADAPTER(Merge) -DECLARE_OP_USE_DYN_INPUT(Merge) -DECLARE_OP_USE_OUTPUT(Merge) -DECLARE_OP_ADAPTER(Switch) -DECLARE_OP_USE_OUTPUT(Switch) - -DECLARE_OP_ADAPTER(TopK) -DECLARE_OP_USE_OUTPUT(TopK) - -DECLARE_OP_ADAPTER(RealDiv) -DECLARE_OP_USE_OUTPUT(RealDiv) - -DECLARE_OP_ADAPTER(Cast) -DECLARE_OP_USE_INPUT_ATTR(Cast) -DECLARE_OP_USE_OUTPUT(Cast) -DECLARE_OP_ADAPTER(Case) -DECLARE_OP_USE_DYN_INPUT(Case) -DECLARE_OP_USE_DYN_SUBGRAPH(Case) -DECLARE_OP_USE_DYN_OUTPUT(Case) -DECLARE_OP_ADAPTER(Reciprocal) -DECLARE_OP_USE_OUTPUT(Reciprocal) -DECLARE_OP_ADAPTER(Neg) -DECLARE_OP_USE_OUTPUT(Neg) -DECLARE_OP_ADAPTER(TransposeD) -DECLARE_OP_USE_INPUT_ATTR(TransposeD) -// Do not set Transpose operator output descriptor -DECLARE_OP_ADAPTER(Sub) -DECLARE_OP_USE_OUTPUT(Sub) -DECLARE_OP_ADAPTER(DropOutGenMask) -DECLARE_OP_USE_OUTPUT(DropOutGenMask) -DECLARE_OP_ADAPTER(ConcatD) -DECLARE_OP_USE_DYN_INPUT(ConcatD) -DECLARE_OP_USE_OUTPUT(ConcatD) -DECLARE_OP_ADAPTER(Pack) -DECLARE_OP_USE_DYN_INPUT(Pack) -DECLARE_OP_USE_OUTPUT(Pack) - -DECLARE_OP_ADAPTER(Pow) -DECLARE_OP_USE_OUTPUT(Pow) -DECLARE_OP_ADAPTER(Equal) -DECLARE_OP_USE_OUTPUT(Equal) -DECLARE_OP_ADAPTER(NotEqual) -DECLARE_OP_USE_OUTPUT(NotEqual) -DECLARE_OP_ADAPTER(Log) -DECLARE_OP_USE_OUTPUT(Log) -DECLARE_OP_ADAPTER(LogicalAnd) -DECLARE_OP_USE_OUTPUT(LogicalAnd) -DECLARE_OP_ADAPTER(LogicalOr) -DECLARE_OP_USE_OUTPUT(LogicalOr) -DECLARE_OP_ADAPTER(LogicalNot) -DECLARE_OP_USE_OUTPUT(LogicalNot) -DECLARE_OP_ADAPTER(LogSoftmaxGrad) -DECLARE_OP_USE_OUTPUT(LogSoftmaxGrad) - -DECLARE_OP_ADAPTER(RandomChoiceWithMask) -DECLARE_OP_USE_OUTPUT(RandomChoiceWithMask) - -DECLARE_OP_ADAPTER(Select) -DECLARE_OP_USE_OUTPUT(Select) -DECLARE_OP_ADAPTER(LessEqual) -DECLARE_OP_USE_OUTPUT(LessEqual) -DECLARE_OP_ADAPTER(LogSoftmaxV2) -DECLARE_OP_USE_OUTPUT(LogSoftmaxV2) -DECLARE_OP_ADAPTER(TruncatedNormal) -DECLARE_OP_USE_OUTPUT(TruncatedNormal) -DECLARE_OP_ADAPTER(StridedSliceGrad) -DECLARE_OP_USE_OUTPUT(StridedSliceGrad) -DECLARE_OP_ADAPTER(Gelu) -DECLARE_OP_USE_OUTPUT(Gelu) -DECLARE_OP_ADAPTER(GeluGrad) -DECLARE_OP_USE_OUTPUT(GeluGrad) -DECLARE_OP_ADAPTER(StridedSlice) -DECLARE_OP_USE_OUTPUT(StridedSlice) -DECLARE_OP_ADAPTER(UnsortedSegmentSumD) -DECLARE_OP_USE_INPUT_ATTR(UnsortedSegmentSumD) -DECLARE_OP_USE_OUTPUT(UnsortedSegmentSumD) -DECLARE_OP_ADAPTER(UnsortedSegmentMin) -DECLARE_OP_USE_OUTPUT(UnsortedSegmentMin) -DECLARE_OP_ADAPTER(ExpandDims) -DECLARE_OP_USE_OUTPUT(ExpandDims) -DECLARE_OP_ADAPTER(Squeeze) -DECLARE_OP_USE_OUTPUT(Squeeze) -DECLARE_OP_ADAPTER(LayerNorm) -DECLARE_OP_USE_OUTPUT(LayerNorm) -DECLARE_OP_ADAPTER(LayerNormGrad) -DECLARE_OP_USE_OUTPUT(LayerNormGrad) -DECLARE_OP_ADAPTER(BatchMatMul) -DECLARE_OP_USE_OUTPUT(BatchMatMul) -DECLARE_OP_ADAPTER(DropOutDoMask) -DECLARE_OP_USE_OUTPUT(DropOutDoMask) -// ** Mix-precision Operations ** -DECLARE_OP_ADAPTER(NPUGetFloatStatus) -DECLARE_OP_USE_OUTPUT(NPUGetFloatStatus) -DECLARE_OP_ADAPTER(NPUAllocFloatStatus) -DECLARE_OP_USE_OUTPUT(NPUAllocFloatStatus) -DECLARE_OP_ADAPTER(NPUClearFloatStatus) -DECLARE_OP_USE_OUTPUT(NPUClearFloatStatus) -DECLARE_OP_ADAPTER(MatMulV2) -DECLARE_OP_USE_OUTPUT(MatMulV2) - -DECLARE_OP_ADAPTER(SoftmaxCrossEntropyWithLogits) -DECLARE_OP_USE_OUTPUT(SoftmaxCrossEntropyWithLogits) - -DECLARE_OP_ADAPTER(Assign) -DECLARE_OP_USE_OUTPUT(Assign) -DECLARE_OP_ADAPTER(Constant) -DECLARE_OP_USE_OUTPUT(Constant) -DECLARE_OP_ADAPTER(ApplyMomentum) -DECLARE_OP_USE_OUTPUT(ApplyMomentum) -// ** Summary Operations ** -DECLARE_OP_ADAPTER(Summary) - -// fully supported -DECLARE_OP_ADAPTER(Add) -DECLARE_OP_USE_OUTPUT(Add) -DECLARE_OP_ADAPTER(Const) -DECLARE_OP_USE_OUTPUT(Const) -DECLARE_OP_ADAPTER(Cos) -DECLARE_OP_USE_OUTPUT(Cos) - -DECLARE_OP_ADAPTER(Acos) -DECLARE_OP_USE_OUTPUT(Acos) -DECLARE_OP_ADAPTER(AcosGrad) -DECLARE_OP_USE_OUTPUT(AcosGrad) -DECLARE_OP_ADAPTER(Acosh) -DECLARE_OP_USE_OUTPUT(Acosh) -DECLARE_OP_ADAPTER(AcoshGrad) -DECLARE_OP_USE_OUTPUT(AcoshGrad) - -DECLARE_OP_ADAPTER(Floor) -DECLARE_OP_USE_OUTPUT(Floor) -DECLARE_OP_ADAPTER(FloorDiv) -DECLARE_OP_USE_OUTPUT(FloorDiv) -DECLARE_OP_ADAPTER(FloorMod) -DECLARE_OP_USE_OUTPUT(FloorMod) -DECLARE_OP_ADAPTER(Sin) -DECLARE_OP_USE_OUTPUT(Sin) -DECLARE_OP_ADAPTER(Exp) -DECLARE_OP_USE_OUTPUT(Exp) - -DECLARE_OP_ADAPTER(ReduceAllD) -DECLARE_OP_USE_INPUT_ATTR(ReduceAllD) -DECLARE_OP_USE_OUTPUT(ReduceAllD) -DECLARE_OP_ADAPTER(ReduceSumD) -DECLARE_OP_USE_INPUT_ATTR(ReduceSumD) -DECLARE_OP_USE_OUTPUT(ReduceSumD) -DECLARE_OP_ADAPTER(ReduceMeanD) -DECLARE_OP_USE_INPUT_ATTR(ReduceMeanD) -DECLARE_OP_USE_OUTPUT(ReduceMeanD) -DECLARE_OP_ADAPTER(ReduceProdD) -DECLARE_OP_USE_INPUT_ATTR(ReduceProdD) -DECLARE_OP_USE_OUTPUT(ReduceProdD) -DECLARE_OP_ADAPTER(CumprodD) -DECLARE_OP_USE_INPUT_ATTR(CumprodD) -DECLARE_OP_USE_OUTPUT(CumprodD) - -DECLARE_OP_ADAPTER(TileD) -DECLARE_OP_USE_INPUT_ATTR(TileD) -DECLARE_OP_USE_OUTPUT(TileD) -DECLARE_OP_ADAPTER(OneHot) -DECLARE_OP_USE_OUTPUT(OneHot) -DECLARE_OP_ADAPTER(GatherV2D) -DECLARE_OP_USE_INPUT_ATTR(GatherV2D) -DECLARE_OP_USE_OUTPUT(GatherV2D) -DECLARE_OP_ADAPTER(RangeD) -DECLARE_OP_USE_OUTPUT(RangeD) - -DECLARE_OP_ADAPTER(Data) -DECLARE_OP_ADAPTER(BiasAdd) -DECLARE_OP_USE_OUTPUT(BiasAdd) -DECLARE_OP_ADAPTER(BatchNorm) -DECLARE_OP_USE_OUTPUT(BatchNorm) -DECLARE_OP_ADAPTER(BatchNormGrad) -DECLARE_OP_USE_OUTPUT(BatchNormGrad) -DECLARE_OP_ADAPTER(Relu) -DECLARE_OP_USE_OUTPUT(Relu) -DECLARE_OP_ADAPTER(PRelu) -DECLARE_OP_USE_OUTPUT(PRelu) -DECLARE_OP_ADAPTER(Elu) -DECLARE_OP_USE_OUTPUT(Elu) - -DECLARE_OP_ADAPTER(EluGrad) -DECLARE_OP_USE_OUTPUT(EluGrad) -DECLARE_OP_ADAPTER(PReluGrad) -DECLARE_OP_USE_OUTPUT(PReluGrad) - -DECLARE_OP_ADAPTER(L2Normalize) -DECLARE_OP_USE_OUTPUT(L2Normalize) - -DECLARE_OP_ADAPTER(CumsumD) -DECLARE_OP_USE_INPUT_ATTR(CumsumD) -DECLARE_OP_USE_OUTPUT(CumsumD) -DECLARE_OP_ADAPTER(L2NormalizeGrad) -DECLARE_OP_USE_OUTPUT(L2NormalizeGrad) -DECLARE_OP_ADAPTER(Sigmoid) -DECLARE_OP_USE_OUTPUT(Sigmoid) -DECLARE_OP_ADAPTER(SigmoidGrad) -DECLARE_OP_USE_OUTPUT(SigmoidGrad) -DECLARE_OP_ADAPTER(SoftmaxV2) -DECLARE_OP_USE_OUTPUT(SoftmaxV2) -DECLARE_OP_ADAPTER(SoftmaxGrad) -DECLARE_OP_USE_OUTPUT(SoftmaxGrad) -DECLARE_OP_ADAPTER(Greater) -DECLARE_OP_USE_OUTPUT(Greater) -DECLARE_OP_ADAPTER(Flatten) -DECLARE_OP_USE_OUTPUT(Flatten) -DECLARE_OP_ADAPTER(GatherV2) -DECLARE_OP_USE_OUTPUT(GatherV2) -DECLARE_OP_ADAPTER(MaxPool) -DECLARE_OP_USE_OUTPUT(MaxPool) -DECLARE_OP_ADAPTER(MaxPoolGrad) -DECLARE_OP_USE_OUTPUT(MaxPoolGrad) -DECLARE_OP_ADAPTER(SqrtGrad) -DECLARE_OP_USE_OUTPUT(SqrtGrad) -DECLARE_OP_ADAPTER(ReciprocalGrad) -DECLARE_OP_USE_OUTPUT(ReciprocalGrad) -DECLARE_OP_ADAPTER(RsqrtGrad) -DECLARE_OP_USE_OUTPUT(RsqrtGrad) -DECLARE_OP_ADAPTER(AvgPool) -DECLARE_OP_USE_OUTPUT(AvgPool) -DECLARE_OP_ADAPTER(AvgPoolGrad) -DECLARE_OP_USE_OUTPUT(AvgPoolGrad) -DECLARE_OP_ADAPTER(ROIAlign) -DECLARE_OP_USE_OUTPUT(ROIAlign) -DECLARE_OP_ADAPTER(ROIAlignGrad) -DECLARE_OP_USE_OUTPUT(ROIAlignGrad) -DECLARE_OP_ADAPTER(Abs) -DECLARE_OP_USE_OUTPUT(Abs) -DECLARE_OP_ADAPTER(AbsGrad) -DECLARE_OP_USE_OUTPUT(AbsGrad) -DECLARE_OP_ADAPTER(BinaryCrossEntropy) -DECLARE_OP_USE_OUTPUT(BinaryCrossEntropy) -DECLARE_OP_ADAPTER(BinaryCrossEntropyGrad) -DECLARE_OP_USE_OUTPUT(BinaryCrossEntropyGrad) -DECLARE_OP_ADAPTER(SparseApplyAdagradD) -DECLARE_OP_USE_OUTPUT(SparseApplyAdagradD) -DECLARE_OP_ADAPTER(ApplyProximalAdagradD) -DECLARE_OP_USE_OUTPUT(ApplyProximalAdagradD) -DECLARE_OP_ADAPTER(SpaceToDepth) -DECLARE_OP_USE_OUTPUT(SpaceToDepth) -DECLARE_OP_ADAPTER(DepthToSpace) -DECLARE_OP_USE_OUTPUT(DepthToSpace) -DECLARE_OP_ADAPTER(Sign) -DECLARE_OP_USE_OUTPUT(Sign) -DECLARE_OP_ADAPTER(LarsV2Update) -DECLARE_OP_USE_OUTPUT(LarsV2Update) -DECLARE_OP_ADAPTER(Round) -DECLARE_OP_USE_OUTPUT(Round) -DECLARE_OP_ADAPTER(ApplyFtrlD) -DECLARE_OP_USE_OUTPUT(ApplyFtrlD) -DECLARE_OP_ADAPTER(SparseApplyFtrlD) -DECLARE_OP_USE_OUTPUT(SparseApplyFtrlD) -DECLARE_OP_ADAPTER(Diag) -DECLARE_OP_USE_OUTPUT(Diag) -DECLARE_OP_ADAPTER(DiagPart) -DECLARE_OP_USE_OUTPUT(DiagPart) -DECLARE_OP_ADAPTER(SpaceToBatchD) -DECLARE_OP_USE_OUTPUT(SpaceToBatchD) -DECLARE_OP_ADAPTER(BatchToSpaceD) -DECLARE_OP_USE_OUTPUT(BatchToSpaceD) -DECLARE_OP_ADAPTER(Atan2) -DECLARE_OP_USE_OUTPUT(Atan2) -DECLARE_OP_ADAPTER(ApplyRMSPropD) -DECLARE_OP_USE_INPUT_ATTR(ApplyRMSPropD) -DECLARE_OP_USE_OUTPUT(ApplyRMSPropD) -DECLARE_OP_ADAPTER(ApplyCenteredRMSPropD) -DECLARE_OP_USE_OUTPUT(ApplyCenteredRMSPropD) -DECLARE_OP_ADAPTER(BasicLSTMCell) -DECLARE_OP_USE_OUTPUT(BasicLSTMCell) -DECLARE_OP_ADAPTER(BasicLSTMCellInputGrad) -DECLARE_OP_USE_OUTPUT(BasicLSTMCellInputGrad) -DECLARE_OP_ADAPTER(BasicLSTMCellWeightGrad) -DECLARE_OP_USE_OUTPUT(BasicLSTMCellWeightGrad) -DECLARE_OP_ADAPTER(BasicLSTMCellCStateGrad) -DECLARE_OP_USE_OUTPUT(BasicLSTMCellCStateGrad) -DECLARE_OP_ADAPTER(L2Loss) -DECLARE_OP_USE_OUTPUT(L2Loss) -DECLARE_OP_ADAPTER(CTCLoss) -DECLARE_OP_USE_OUTPUT(CTCLoss) -DECLARE_OP_ADAPTER(ReverseSequence) -DECLARE_OP_USE_OUTPUT(ReverseSequence) -DECLARE_OP_ADAPTER(AscendQuant) -DECLARE_OP_USE_OUTPUT(AscendQuant) -DECLARE_OP_ADAPTER(AscendDequant) -DECLARE_OP_USE_OUTPUT(AscendDequant) -#ifdef ENABLE_GE -DECLARE_OP_ADAPTER(Print) -DECLARE_OP_USE_DYN_INPUT(Print) -#endif -} // namespace transform -} // namespace mindspore -#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.cc new file mode 100644 index 00000000000..63f55c218c3 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.cc @@ -0,0 +1,90 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/array_ops_declare.h" +#include + +namespace mindspore::transform { +// const +INPUT_MAP(Const) = EMPTY_INPUT_MAP; +ATTR_MAP(Const) = {{"value", ATTR_DESC(value, AnyTraits())}}; +OUTPUT_MAP(Const) = {{0, OUTPUT_DESC(y)}}; + +// Constant +INPUT_MAP(Constant) = EMPTY_INPUT_MAP; +ATTR_MAP(Constant) = {{"value", ATTR_DESC(value, AnyTraits())}}; +OUTPUT_MAP(Constant) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Constant, kNameConst, ADPT_DESC(Constant, Const)) + +// ScalarSummary +INPUT_MAP(Summary) = {{2, INPUT_DESC(x)}}; +ATTR_MAP(Summary) = EMPTY_ATTR_MAP; +REG_ADPT_DESC(ScalarSummary, prim::kPrimScalarSummary->name(), ADPT_DESC(Summary)) +REG_ADPT_DESC(ImageSummary, prim::kPrimImageSummary->name(), ADPT_DESC(Summary)) +REG_ADPT_DESC(TensorSummary, prim::kPrimTensorSummary->name(), ADPT_DESC(Summary)) +REG_ADPT_DESC(HistogramSummary, prim::kPrimHistogramSummary->name(), ADPT_DESC(Summary)) +REG_ADPT_DESC(Debug, prim::kPrimDebug->name(), ADPT_DESC(Summary)) + +// Data +INPUT_MAP(Data) = EMPTY_INPUT_MAP; +ATTR_MAP(Data) = EMPTY_ATTR_MAP; +REG_ADPT_DESC(Data, kNameParam, ADPT_DESC(Data)) + +// Reshape +INPUT_MAP(Reshape) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(shape)}}; +ATTR_MAP(Reshape) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Reshape) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Reshape, kNameReshape, ADPT_DESC(Reshape)) +REG_ADPT_DESC(FlattenGrad, kNameFlattenGrad, ADPT_DESC(Reshape)) + +// TransShape +INPUT_MAP(TransShape) = {{1, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(TransShape) = {{2, ATTR_DESC(outShape, AnyTraits(), AnyTraits>())}}; +ATTR_MAP(TransShape) = EMPTY_ATTR_MAP; +OUTPUT_MAP(TransShape) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(TransShape, kNameTransShape, ADPT_DESC(TransShape)) + +// MirrorPad +INPUT_MAP(MirrorPad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(paddings)}}; +ATTR_MAP(MirrorPad) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; +OUTPUT_MAP(MirrorPad) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(MirrorPad, kNameMirrorPad, ADPT_DESC(MirrorPad)) + +// MirrorPadGrad +INPUT_MAP(MirrorPadGrad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(paddings)}}; +ATTR_MAP(MirrorPadGrad) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; +OUTPUT_MAP(MirrorPadGrad) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(MirrorPadGrad, kNameMirrorPadGrad, ADPT_DESC(MirrorPadGrad)) + +// ExpandDims +INPUT_MAP(ExpandDims) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(axis)}}; +ATTR_MAP(ExpandDims) = EMPTY_ATTR_MAP; +OUTPUT_MAP(ExpandDims) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ExpandDims, kNameExpandDims, ADPT_DESC(ExpandDims)) + +// Squeeze +INPUT_MAP(Squeeze) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Squeeze) = {{"axis", ATTR_DESC(axis, AnyTraits(), AnyTraits>())}}; +OUTPUT_MAP(Squeeze) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Squeeze, prim::kPrimSqueeze->name(), ADPT_DESC(Squeeze)) + +// ReverseSequence +INPUT_MAP(ReverseSequence) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(seq_lengths)}}; +ATTR_MAP(ReverseSequence) = {{"seq_dim", ATTR_DESC(seq_dim, AnyTraits())}, + {"batch_dim", ATTR_DESC(batch_dim, AnyTraits())}}; +OUTPUT_MAP(ReverseSequence) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ReverseSequence, kNameReverseSequence, ADPT_DESC(ReverseSequence)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.h new file mode 100644 index 00000000000..c00f33c67ab --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.h @@ -0,0 +1,58 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_ARRAY_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_ARRAY_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/array_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(Reshape) +DECLARE_OP_USE_OUTPUT(Reshape) + +DECLARE_OP_ADAPTER(TransShape) +DECLARE_OP_USE_INPUT_ATTR(TransShape) +DECLARE_OP_USE_OUTPUT(TransShape) + +DECLARE_OP_ADAPTER(MirrorPad) +DECLARE_OP_USE_OUTPUT(MirrorPad) + +DECLARE_OP_ADAPTER(MirrorPadGrad) +DECLARE_OP_USE_OUTPUT(MirrorPadGrad) + +DECLARE_OP_ADAPTER(ExpandDims) +DECLARE_OP_USE_OUTPUT(ExpandDims) + +DECLARE_OP_ADAPTER(Squeeze) +DECLARE_OP_USE_OUTPUT(Squeeze) + +DECLARE_OP_ADAPTER(Constant) +DECLARE_OP_USE_OUTPUT(Constant) + +DECLARE_OP_ADAPTER(Summary) + +DECLARE_OP_ADAPTER(Const) +DECLARE_OP_USE_OUTPUT(Const) + +DECLARE_OP_ADAPTER(Data) + +DECLARE_OP_ADAPTER(ReverseSequence) +DECLARE_OP_USE_OUTPUT(ReverseSequence) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_ARRAY_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/control_flow_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/control_flow_ops_declare.cc new file mode 100644 index 00000000000..c4903570972 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/control_flow_ops_declare.cc @@ -0,0 +1,32 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/control_flow_ops_declare.h" + +namespace mindspore::transform { +// Merge +INPUT_MAP(Merge) = EMPTY_INPUT_MAP; +DYN_INPUT_MAP(Merge) = {{1, DYN_INPUT_DESC(x)}}; +ATTR_MAP(Merge) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Merge) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(value_index)}}; +REG_ADPT_DESC(Merge, kNameMerge, ADPT_DESC(Merge)) + +// Switch +INPUT_MAP(Switch) = {{1, INPUT_DESC(data)}, {2, INPUT_DESC(pred)}}; +OUTPUT_MAP(Switch) = {{0, OUTPUT_DESC(output_false)}, {1, OUTPUT_DESC(output_true)}}; +ATTR_MAP(Switch) = EMPTY_ATTR_MAP; +REG_ADPT_DESC(Switch, kNameGeSwitch, ADPT_DESC(Switch)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/control_flow_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/control_flow_ops_declare.h new file mode 100644 index 00000000000..bbbe81d96cb --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/control_flow_ops_declare.h @@ -0,0 +1,33 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_CONTROL_FLOW_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_CONTROL_FLOW_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/control_flow_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(Merge) +DECLARE_OP_USE_DYN_INPUT(Merge) +DECLARE_OP_USE_OUTPUT(Merge) + +DECLARE_OP_ADAPTER(Switch) +DECLARE_OP_USE_OUTPUT(Switch) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_CONTROL_FLOW_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/ctc_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/ctc_ops_declare.cc new file mode 100644 index 00000000000..e28c25da299 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/ctc_ops_declare.cc @@ -0,0 +1,31 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/ctc_ops_declare.h" + +namespace mindspore::transform { +// CTCLoss +INPUT_MAP(CTCLoss) = {{1, INPUT_DESC(inputs)}, + {2, INPUT_DESC(labels_indices)}, + {3, INPUT_DESC(labels_values)}, + {4, INPUT_DESC(sequence_length)}}; +ATTR_MAP(CTCLoss) = { + {"preprocess_collapse_repeated", ATTR_DESC(preprocess_collapse_repeated, AnyTraits())}, + {"ctc_merge_repeated", ATTR_DESC(ctc_merge_repeated, AnyTraits())}, + {"ignore_longer_outputs_than_inputs", ATTR_DESC(ignore_longer_outputs_than_inputs, AnyTraits())}}; +OUTPUT_MAP(CTCLoss) = {{0, OUTPUT_DESC(loss)}, {1, OUTPUT_DESC(gradient)}}; +REG_ADPT_DESC(CTCLoss, kNameCTCLoss, ADPT_DESC(CTCLoss)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/ctc_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/ctc_ops_declare.h new file mode 100644 index 00000000000..5a7bc6b6d96 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/ctc_ops_declare.h @@ -0,0 +1,29 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_CTC_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_CTC_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/ctc_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(CTCLoss) +DECLARE_OP_USE_OUTPUT(CTCLoss) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_CTC_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.cc new file mode 100644 index 00000000000..d5c581c406e --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.cc @@ -0,0 +1,364 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/elewise_calculation_ops_declare.h" +#include + +namespace mindspore::transform { +// Assign +INPUT_MAP(Assign) = {{1, INPUT_DESC(ref)}, {2, INPUT_DESC(value)}}; +ATTR_MAP(Assign) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Assign) = {{0, OUTPUT_DESC(ref)}}; +REG_ADPT_DESC(Assign, prim::kPrimAssign->name(), ADPT_DESC(Assign)) +REG_ADPT_DESC(StateSetItem, prim::kPrimStateSetItem->name(), ADPT_DESC(Assign)) + +// add +INPUT_MAP(Add) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(Add) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Add) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Add, prim::kPrimTensorAdd->name(), + std::make_shared(std::make_shared>(ExtraAttr({{"mode", MakeValue(1)}})), + std::make_shared>(ExtraAttr({{"mode", MakeValue(1)}})))) + +// GreaterEqual +INPUT_MAP(GreaterEqual) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(GreaterEqual) = EMPTY_ATTR_MAP; +OUTPUT_MAP(GreaterEqual) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(GreaterEqual, kNameGreaterEqual, ADPT_DESC(GreaterEqual)) + +// AssignAdd +INPUT_MAP(AssignAdd) = {{1, INPUT_DESC(ref)}, {2, INPUT_DESC(value)}}; +ATTR_MAP(AssignAdd) = EMPTY_ATTR_MAP; +OUTPUT_MAP(AssignAdd) = {{0, OUTPUT_DESC(ref)}}; +REG_ADPT_DESC(AssignAdd, kNameAssignAdd, ADPT_DESC(AssignAdd)) + +// AssignSub +INPUT_MAP(AssignSub) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(value)}}; +ATTR_MAP(AssignSub) = EMPTY_ATTR_MAP; +OUTPUT_MAP(AssignSub) = {{0, OUTPUT_DESC(var)}}; +REG_ADPT_DESC(AssignSub, kNameAssignSub, ADPT_DESC(AssignSub)) + +// Cos +INPUT_MAP(Cos) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Cos) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Cos) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Cos, kNameCos, ADPT_DESC(Cos)) + +// Acos +INPUT_MAP(Acos) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Acos) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Acos) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Acos, kNameACos, ADPT_DESC(Acos)) + +// AcosGrad +INPUT_MAP(AcosGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; +ATTR_MAP(AcosGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(AcosGrad) = {{0, OUTPUT_DESC(z)}}; +REG_ADPT_DESC(AcosGrad, kNameACosGrad, ADPT_DESC(AcosGrad)) + +// Acosh +INPUT_MAP(Acosh) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Acosh) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Acosh) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Acosh, kNameAcosh, ADPT_DESC(Acosh)) + +// AcoshGrad +INPUT_MAP(AcoshGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; +ATTR_MAP(AcoshGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(AcoshGrad) = {{0, OUTPUT_DESC(z)}}; +REG_ADPT_DESC(AcoshGrad, kNameAcoshGrad, ADPT_DESC(AcoshGrad)) + +// Floor +INPUT_MAP(Floor) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Floor) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Floor) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Floor, kNameFloor, ADPT_DESC(Floor)) + +// FloorDiv +INPUT_MAP(FloorDiv) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(FloorDiv) = EMPTY_ATTR_MAP; +OUTPUT_MAP(FloorDiv) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(FloorDiv, kNameFloorDiv, ADPT_DESC(FloorDiv)) + +// FloorMod +INPUT_MAP(FloorMod) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(FloorMod) = EMPTY_ATTR_MAP; +OUTPUT_MAP(FloorMod) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(FloorMod, kNameFloorMod, ADPT_DESC(FloorMod)) + +// Sin +INPUT_MAP(Sin) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Sin) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Sin) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Sin, kNameSin, ADPT_DESC(Sin)) + +// Exp +INPUT_MAP(Exp) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Exp) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Exp) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Exp, kNameExp, ADPT_DESC(Exp)) + +// BiasAdd +INPUT_MAP(BiasAdd) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(bias)}}; +ATTR_MAP(BiasAdd) = {{"data_format", ATTR_DESC(data_format, AnyTraits())}}; +OUTPUT_MAP(BiasAdd) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(BiasAdd, kNameBiasAdd, ADPT_DESC(BiasAdd)) + +// ZerosLike +INPUT_MAP(ZerosLike) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(ZerosLike) = EMPTY_ATTR_MAP; +OUTPUT_MAP(ZerosLike) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ZerosLike, kNameZerosLike, ADPT_DESC(ZerosLike)) + +// OnesLike +INPUT_MAP(OnesLike) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(OnesLike) = EMPTY_ATTR_MAP; +OUTPUT_MAP(OnesLike) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(OnesLike, kNameOnesLike, ADPT_DESC(OnesLike)) + +// ArgMaxD +INPUT_MAP(ArgMaxD) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(ArgMaxD) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, + {"output_type", ATTR_DESC(dtype, AnyTraits())}}; +OUTPUT_MAP(ArgMaxD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ArgMaxD, kNameArgmax, ADPT_DESC(ArgMaxD)) + +// ArgMinD +INPUT_MAP(ArgMinD) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(ArgMinD) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, + {"output_type", ATTR_DESC(dtype, AnyTraits())}}; +OUTPUT_MAP(ArgMinD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ArgMinD, kNameArgmin, ADPT_DESC(ArgMinD)) + +// ArgMaxWithValue +INPUT_MAP(ArgMaxWithValue) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(ArgMaxWithValue) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, + {"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; +OUTPUT_MAP(ArgMaxWithValue) = {{0, OUTPUT_DESC(indice)}, {1, OUTPUT_DESC(values)}}; +REG_ADPT_DESC(ArgMaxWithValue, kNameArgMaxWithValue, ADPT_DESC(ArgMaxWithValue)) + +// ArgMinWithValue +INPUT_MAP(ArgMinWithValue) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(ArgMinWithValue) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, + {"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; +OUTPUT_MAP(ArgMinWithValue) = {{0, OUTPUT_DESC(indice)}, {1, OUTPUT_DESC(values)}}; +REG_ADPT_DESC(ArgMinWithValue, kNameArgMinWithValue, ADPT_DESC(ArgMinWithValue)) + +// RsqrtGrad +INPUT_MAP(RsqrtGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; +ATTR_MAP(RsqrtGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(RsqrtGrad) = {{0, OUTPUT_DESC(z)}}; +REG_ADPT_DESC(RsqrtGrad, kNameRsqrtGrad, ADPT_DESC(RsqrtGrad)) + +// SqrtGrad +INPUT_MAP(SqrtGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; +ATTR_MAP(SqrtGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(SqrtGrad) = {{0, OUTPUT_DESC(z)}}; +REG_ADPT_DESC(SqrtGrad, kNameSqrtGrad, ADPT_DESC(SqrtGrad)) + +// ReciprocalGrad +INPUT_MAP(ReciprocalGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; +ATTR_MAP(ReciprocalGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(ReciprocalGrad) = {{0, OUTPUT_DESC(z)}}; +REG_ADPT_DESC(ReciprocalGrad, kNameReciprocalGrad, ADPT_DESC(ReciprocalGrad)) + +// AddN +INPUT_MAP(AddN) = EMPTY_INPUT_MAP; +DYN_INPUT_MAP(AddN) = {{1, DYN_INPUT_DESC(x)}}; +ATTR_MAP(AddN) = {{"n", ATTR_DESC(N, AnyTraits())}}; +OUTPUT_MAP(AddN) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(AddN, kNameAddN, ADPT_DESC(AddN)) + +// Mul +INPUT_MAP(Mul) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(Mul) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Mul) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Mul, prim::kPrimMul->name(), ADPT_DESC(Mul)) + +// RealDiv +INPUT_MAP(RealDiv) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(RealDiv) = EMPTY_ATTR_MAP; +OUTPUT_MAP(RealDiv) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(RealDiv, kNameRealDiv, ADPT_DESC(RealDiv)) + +// Cast +INPUT_MAP(Cast) = {{1, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(Cast) = {{2, ATTR_DESC(dst_type, AnyTraits())}}; +ATTR_MAP(Cast) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Cast) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Cast, prim::kPrimCast->name(), ADPT_DESC(Cast)) + +// Reciprocal +INPUT_MAP(Reciprocal) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Reciprocal) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Reciprocal) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Reciprocal, kNameReciprocal, ADPT_DESC(Reciprocal)) + +// Sub +INPUT_MAP(Sub) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(Sub) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Sub) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Sub, prim::kPrimSub->name(), ADPT_DESC(Sub)) + +// Neg +INPUT_MAP(Neg) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Neg) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Neg) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Neg, prim::kPrimNeg->name(), ADPT_DESC(Neg)) + +// Less +INPUT_MAP(Less) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(Less) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Less) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Less, kNameLess, ADPT_DESC(Less)) + +// Rsqrt +INPUT_MAP(Rsqrt) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Rsqrt) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Rsqrt) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Rsqrt, kNameRsqrt, ADPT_DESC(Rsqrt)) + +// Sqrt +INPUT_MAP(Sqrt) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Sqrt) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Sqrt) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Sqrt, kNameSqrt, ADPT_DESC(Sqrt)) + +// Square +INPUT_MAP(Square) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Square) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Square) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Square, kNameSquare, ADPT_DESC(Square)) + +// SquareSumAll +INPUT_MAP(SquareSumAll) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(SquareSumAll) = EMPTY_ATTR_MAP; +OUTPUT_MAP(SquareSumAll) = {{0, OUTPUT_DESC(y1)}, {1, OUTPUT_DESC(y2)}}; +REG_ADPT_DESC(SquareSumAll, kNameSquareSumAll, ADPT_DESC(SquareSumAll)) + +// Maximum +INPUT_MAP(Maximum) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(Maximum) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Maximum) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Maximum, prim::kPrimMaximum->name(), ADPT_DESC(Maximum)) + +// Minimum +INPUT_MAP(Minimum) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(Minimum) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Minimum) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Minimum, prim::kPrimMinimum->name(), ADPT_DESC(Minimum)) + +// MaximumGrad +INPUT_MAP(MaximumGrad) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(grads)}}; +ATTR_MAP(MaximumGrad) = {{"grad_x", ATTR_DESC(grad_x, AnyTraits())}, + {"grad_y", ATTR_DESC(grad_y, AnyTraits())}}; +OUTPUT_MAP(MaximumGrad) = {{0, OUTPUT_DESC(y1)}, {1, OUTPUT_DESC(y2)}}; +REG_ADPT_DESC(MaximumGrad, prim::kPrimMaximumGrad->name(), ADPT_DESC(MaximumGrad)) + +// MinimumGrad +INPUT_MAP(MinimumGrad) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(grads)}}; +ATTR_MAP(MinimumGrad) = {{"grad_x", ATTR_DESC(grad_x, AnyTraits())}, + {"grad_y", ATTR_DESC(grad_y, AnyTraits())}}; +OUTPUT_MAP(MinimumGrad) = {{0, OUTPUT_DESC(y1)}, {1, OUTPUT_DESC(y2)}}; +REG_ADPT_DESC(MinimumGrad, prim::kPrimMinimumGrad->name(), ADPT_DESC(MinimumGrad)) + +// Pow +INPUT_MAP(Pow) = { + {1, INPUT_DESC(x1)}, + {2, INPUT_DESC(x2)}, +}; +ATTR_MAP(Pow) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Pow) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Pow, kNamePow, ADPT_DESC(Pow)) + +// Equal +INPUT_MAP(Equal) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(Equal) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Equal) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Equal, kNameEqual, ADPT_DESC(Equal)) + +// NotEqual +INPUT_MAP(NotEqual) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(NotEqual) = EMPTY_ATTR_MAP; +OUTPUT_MAP(NotEqual) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(NotEqual, kNameNotEqual, ADPT_DESC(NotEqual)) + +// Log +INPUT_MAP(Log) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Log) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Log) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Log, kNameLog, ADPT_DESC(Log)) + +// LogicalAnd +INPUT_MAP(LogicalAnd) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(LogicalAnd) = EMPTY_ATTR_MAP; +OUTPUT_MAP(LogicalAnd) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(LogicalAnd, kNameLogicalAnd, ADPT_DESC(LogicalAnd)) + +// LogicalOr +INPUT_MAP(LogicalOr) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(LogicalOr) = EMPTY_ATTR_MAP; +OUTPUT_MAP(LogicalOr) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(LogicalOr, kNameLogicalOr, ADPT_DESC(LogicalOr)) + +// LogicalNot +INPUT_MAP(LogicalNot) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(LogicalNot) = EMPTY_ATTR_MAP; +OUTPUT_MAP(LogicalNot) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(LogicalNot, kNameLogicalNot, ADPT_DESC(LogicalNot)) + +// Greater +INPUT_MAP(Greater) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(Greater) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Greater) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Greater, kNameGreater, ADPT_DESC(Greater)) + +// LessEqual +INPUT_MAP(LessEqual) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(LessEqual) = EMPTY_ATTR_MAP; +OUTPUT_MAP(LessEqual) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(LessEqual, kNameLessEqual, ADPT_DESC(LessEqual)) + +// Abs +INPUT_MAP(Abs) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Abs) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Abs) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Abs, kNameAbs, ADPT_DESC(Abs)) + +// AbsGrad +INPUT_MAP(AbsGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; +ATTR_MAP(AbsGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(AbsGrad) = {{0, OUTPUT_DESC(z)}}; +REG_ADPT_DESC(AbsGrad, kNameAbsGrad, ADPT_DESC(AbsGrad)) + +// Sign +INPUT_MAP(Sign) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Sign) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Sign) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Sign, kNameSign, ADPT_DESC(Sign)) + +// Round +INPUT_MAP(Round) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Round) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Round) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Round, kNameRound, ADPT_DESC(Round)) + +// Atan2 +INPUT_MAP(Atan2) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(Atan2) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Atan2) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Atan2, kNameAtan2, ADPT_DESC(Atan2)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.h new file mode 100644 index 00000000000..27a1407d3b6 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.h @@ -0,0 +1,193 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_ELEWISE_CALCULATION_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_ELEWISE_CALCULATION_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/elewise_calculation_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(GreaterEqual) +DECLARE_OP_USE_OUTPUT(GreaterEqual) + +DECLARE_OP_ADAPTER(AssignAdd) +DECLARE_OP_USE_OUTPUT(AssignAdd) + +DECLARE_OP_ADAPTER(AssignSub) +DECLARE_OP_USE_OUTPUT(AssignSub) + +DECLARE_OP_ADAPTER(ZerosLike) +DECLARE_OP_USE_OUTPUT(ZerosLike) + +DECLARE_OP_ADAPTER(OnesLike) +DECLARE_OP_USE_OUTPUT(OnesLike) + +DECLARE_OP_ADAPTER(ArgMaxD) +DECLARE_OP_USE_OUTPUT(ArgMaxD) + +DECLARE_OP_ADAPTER(ArgMinD) +DECLARE_OP_USE_OUTPUT(ArgMinD) + +DECLARE_OP_ADAPTER(ArgMaxWithValue) +DECLARE_OP_USE_OUTPUT(ArgMaxWithValue) + +DECLARE_OP_ADAPTER(ArgMinWithValue) +DECLARE_OP_USE_OUTPUT(ArgMinWithValue) + +DECLARE_OP_ADAPTER(Mul) +DECLARE_OP_USE_OUTPUT(Mul) + +DECLARE_OP_ADAPTER(AddN) +DECLARE_OP_USE_DYN_INPUT(AddN) +DECLARE_OP_USE_OUTPUT(AddN) + +DECLARE_OP_ADAPTER(Less) +DECLARE_OP_USE_OUTPUT(Less) + +DECLARE_OP_ADAPTER(Rsqrt) +DECLARE_OP_USE_OUTPUT(Rsqrt) + +DECLARE_OP_ADAPTER(Sqrt) +DECLARE_OP_USE_OUTPUT(Sqrt) + +DECLARE_OP_ADAPTER(Square) +DECLARE_OP_USE_OUTPUT(Square) + +DECLARE_OP_ADAPTER(SquareSumAll) +DECLARE_OP_USE_OUTPUT(SquareSumAll) + +DECLARE_OP_ADAPTER(Maximum) +DECLARE_OP_USE_OUTPUT(Maximum) + +DECLARE_OP_ADAPTER(Minimum) +DECLARE_OP_USE_OUTPUT(Minimum) + +DECLARE_OP_ADAPTER(MaximumGrad) +DECLARE_OP_USE_OUTPUT(MaximumGrad) + +DECLARE_OP_ADAPTER(MinimumGrad) +DECLARE_OP_USE_OUTPUT(MinimumGrad) + +DECLARE_OP_ADAPTER(RealDiv) +DECLARE_OP_USE_OUTPUT(RealDiv) + +DECLARE_OP_ADAPTER(Cast) +DECLARE_OP_USE_INPUT_ATTR(Cast) +DECLARE_OP_USE_OUTPUT(Cast) + +DECLARE_OP_ADAPTER(Reciprocal) +DECLARE_OP_USE_OUTPUT(Reciprocal) + +DECLARE_OP_ADAPTER(Neg) +DECLARE_OP_USE_OUTPUT(Neg) + +DECLARE_OP_ADAPTER(Sub) +DECLARE_OP_USE_OUTPUT(Sub) + +DECLARE_OP_ADAPTER(Pow) +DECLARE_OP_USE_OUTPUT(Pow) + +DECLARE_OP_ADAPTER(Equal) +DECLARE_OP_USE_OUTPUT(Equal) + +DECLARE_OP_ADAPTER(NotEqual) +DECLARE_OP_USE_OUTPUT(NotEqual) + +DECLARE_OP_ADAPTER(Log) +DECLARE_OP_USE_OUTPUT(Log) + +DECLARE_OP_ADAPTER(LogicalAnd) +DECLARE_OP_USE_OUTPUT(LogicalAnd) + +DECLARE_OP_ADAPTER(LogicalOr) +DECLARE_OP_USE_OUTPUT(LogicalOr) + +DECLARE_OP_ADAPTER(LogicalNot) +DECLARE_OP_USE_OUTPUT(LogicalNot) + +DECLARE_OP_ADAPTER(LessEqual) +DECLARE_OP_USE_OUTPUT(LessEqual) + +DECLARE_OP_ADAPTER(Assign) +DECLARE_OP_USE_OUTPUT(Assign) + +DECLARE_OP_ADAPTER(Add) +DECLARE_OP_USE_OUTPUT(Add) + +DECLARE_OP_ADAPTER(Cos) +DECLARE_OP_USE_OUTPUT(Cos) + +DECLARE_OP_ADAPTER(Acos) +DECLARE_OP_USE_OUTPUT(Acos) + +DECLARE_OP_ADAPTER(AcosGrad) +DECLARE_OP_USE_OUTPUT(AcosGrad) + +DECLARE_OP_ADAPTER(Acosh) +DECLARE_OP_USE_OUTPUT(Acosh) + +DECLARE_OP_ADAPTER(AcoshGrad) +DECLARE_OP_USE_OUTPUT(AcoshGrad) + +DECLARE_OP_ADAPTER(Floor) +DECLARE_OP_USE_OUTPUT(Floor) + +DECLARE_OP_ADAPTER(FloorDiv) +DECLARE_OP_USE_OUTPUT(FloorDiv) + +DECLARE_OP_ADAPTER(FloorMod) +DECLARE_OP_USE_OUTPUT(FloorMod) + +DECLARE_OP_ADAPTER(Sin) +DECLARE_OP_USE_OUTPUT(Sin) + +DECLARE_OP_ADAPTER(Exp) +DECLARE_OP_USE_OUTPUT(Exp) + +DECLARE_OP_ADAPTER(BiasAdd) +DECLARE_OP_USE_OUTPUT(BiasAdd) + +DECLARE_OP_ADAPTER(Greater) +DECLARE_OP_USE_OUTPUT(Greater) + +DECLARE_OP_ADAPTER(SqrtGrad) +DECLARE_OP_USE_OUTPUT(SqrtGrad) + +DECLARE_OP_ADAPTER(ReciprocalGrad) +DECLARE_OP_USE_OUTPUT(ReciprocalGrad) + +DECLARE_OP_ADAPTER(RsqrtGrad) +DECLARE_OP_USE_OUTPUT(RsqrtGrad) + +DECLARE_OP_ADAPTER(Abs) +DECLARE_OP_USE_OUTPUT(Abs) + +DECLARE_OP_ADAPTER(AbsGrad) +DECLARE_OP_USE_OUTPUT(AbsGrad) + +DECLARE_OP_ADAPTER(Sign) +DECLARE_OP_USE_OUTPUT(Sign) + +DECLARE_OP_ADAPTER(Round) +DECLARE_OP_USE_OUTPUT(Round) + +DECLARE_OP_ADAPTER(Atan2) +DECLARE_OP_USE_OUTPUT(Atan2) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_ELEWISE_CALCULATION_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.cc new file mode 100644 index 00000000000..9508728a2f8 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.cc @@ -0,0 +1,27 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/functional_ops_declare.h" + +namespace mindspore::transform { +// Case +INPUT_MAP(Case) = {{1, INPUT_DESC(branch_index)}}; +DYN_INPUT_MAP(Case) = {{2, DYN_INPUT_DESC(input)}}; +ATTR_MAP(Case) = EMPTY_ATTR_MAP; +DYN_OUTPUT_MAP(Case) = {{0, DYN_OUTPUT_DESC(output)}}; +DYN_SUBGRAPH_MAP(Case) = {{0, DYN_SUBGRAPH_DESC(branches)}}; +REG_ADPT_DESC(Case, kNameCase, ADPT_DESC(Case)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.h new file mode 100644 index 00000000000..025b0d0c60b --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.h @@ -0,0 +1,31 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_FUNCTIONAL_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_FUNCTIONAL_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/functional_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(Case) +DECLARE_OP_USE_DYN_INPUT(Case) +DECLARE_OP_USE_DYN_SUBGRAPH(Case) +DECLARE_OP_USE_DYN_OUTPUT(Case) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_FUNCTIONAL_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/hcom_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/hcom_ops_declare.cc new file mode 100644 index 00000000000..890e3815736 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/hcom_ops_declare.cc @@ -0,0 +1,50 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/hcom_ops_declare.h" + +namespace mindspore::transform { +// HCOMAllreduce +INPUT_MAP(HcomAllReduce) = {{1, INPUT_DESC(x)}}; +OUTPUT_MAP(HcomAllReduce) = {{0, OUTPUT_DESC(y)}}; +ATTR_MAP(HcomAllReduce) = {{"op", ATTR_DESC(reduction, AnyTraits())}, + {"group", ATTR_DESC(group, AnyTraits())}, + {"fusion", ATTR_DESC(fusion, AnyTraits())}}; +REG_ADPT_DESC(HcomAllReduce, kNameAllReduce, ADPT_DESC(HcomAllReduce)) + +// HCOMBraodcast +INPUT_MAP(HcomBroadcast) = EMPTY_INPUT_MAP; +DYN_INPUT_MAP(HcomBroadcast) = {{1, DYN_INPUT_DESC(x)}}; +DYN_OUTPUT_MAP(HcomBroadcast) = {{0, DYN_OUTPUT_DESC(y)}}; +ATTR_MAP(HcomBroadcast) = {{"root_rank", ATTR_DESC(root_rank, AnyTraits())}, + {"group", ATTR_DESC(group, AnyTraits())}}; +REG_ADPT_DESC(HcomBroadcast, kNameBroadcast, ADPT_DESC(HcomBroadcast)) + +// HcomAllGather +INPUT_MAP(HcomAllGather) = {{1, INPUT_DESC(x)}}; +OUTPUT_MAP(HcomAllGather) = {{0, OUTPUT_DESC(y)}}; +ATTR_MAP(HcomAllGather) = {{"group", ATTR_DESC(group, AnyTraits())}, + {"rank_size", ATTR_DESC(rank_size, AnyTraits())}}; +REG_ADPT_DESC(HcomAllGather, kNameAllgather, ADPT_DESC(HcomAllGather)) + +// HCOMReduceScatter +INPUT_MAP(HcomReduceScatter) = {{1, INPUT_DESC(x)}}; +OUTPUT_MAP(HcomReduceScatter) = {{0, OUTPUT_DESC(y)}}; +ATTR_MAP(HcomReduceScatter) = {{"group", ATTR_DESC(group, AnyTraits())}, + {"op", ATTR_DESC(reduction, AnyTraits())}, + {"rank_size", ATTR_DESC(rank_size, AnyTraits())}}; +REG_ADPT_DESC(HcomReduceScatter, kNameReduceScatter, ADPT_DESC(HcomReduceScatter)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/hcom_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/hcom_ops_declare.h new file mode 100644 index 00000000000..635b7b1e22f --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/hcom_ops_declare.h @@ -0,0 +1,39 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_HCOM_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_HCOM_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/hcom_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(HcomReduceScatter) +DECLARE_OP_USE_OUTPUT(HcomReduceScatter) + +DECLARE_OP_ADAPTER(HcomBroadcast) +DECLARE_OP_USE_DYN_INPUT(HcomBroadcast) +DECLARE_OP_USE_DYN_OUTPUT(HcomBroadcast) + +DECLARE_OP_ADAPTER(HcomAllReduce) +DECLARE_OP_USE_OUTPUT(HcomAllReduce) + +DECLARE_OP_ADAPTER(HcomAllGather) +DECLARE_OP_USE_OUTPUT(HcomAllGather) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_HCOM_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.cc new file mode 100644 index 00000000000..b485a3f3628 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.cc @@ -0,0 +1,48 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/image_ops_declare.h" +#include + +namespace mindspore::transform { +// ResizeNearestNeighborV2D +INPUT_MAP(ResizeNearestNeighborV2D) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(ResizeNearestNeighborV2D) = { + {"size", ATTR_DESC(size, AnyTraits>(), AnyTraits>())}, + {"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; +OUTPUT_MAP(ResizeNearestNeighborV2D) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ResizeNearestNeighborV2D, kNameResizeNearestNeighborD, ADPT_DESC(ResizeNearestNeighborV2D)) + +// ResizeNearestNeighborV2Grad +INPUT_MAP(ResizeNearestNeighborV2Grad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(size)}}; +ATTR_MAP(ResizeNearestNeighborV2Grad) = {{"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; +OUTPUT_MAP(ResizeNearestNeighborV2Grad) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ResizeNearestNeighborV2Grad, kNameResizeNearestNeighborGrad, ADPT_DESC(ResizeNearestNeighborV2Grad)) + +// ResizeBilinearV2Grad +INPUT_MAP(ResizeBilinearV2Grad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(original_image)}}; +ATTR_MAP(ResizeBilinearV2Grad) = {{"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; +OUTPUT_MAP(ResizeBilinearV2Grad) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ResizeBilinearV2Grad, kNameResizeBilinearGrad, ADPT_DESC(ResizeBilinearV2Grad)) + +// ResizeBilinearV2D +INPUT_MAP(ResizeBilinearV2D) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(ResizeBilinearV2D) = { + {"size", ATTR_DESC(size, AnyTraits>(), AnyTraits>())}, + {"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; +OUTPUT_MAP(ResizeBilinearV2D) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ResizeBilinearV2D, kNameResizeBilinear, ADPT_DESC(ResizeBilinearV2D)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.h new file mode 100644 index 00000000000..9511212d208 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.h @@ -0,0 +1,38 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_IMAGE_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_IMAGE_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/image_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(ResizeNearestNeighborV2D) +DECLARE_OP_USE_OUTPUT(ResizeNearestNeighborV2D) + +DECLARE_OP_ADAPTER(ResizeNearestNeighborV2Grad) +DECLARE_OP_USE_OUTPUT(ResizeNearestNeighborV2Grad) + +DECLARE_OP_ADAPTER(ResizeBilinearV2D) +DECLARE_OP_USE_OUTPUT(ResizeBilinearV2D) + +DECLARE_OP_ADAPTER(ResizeBilinearV2Grad) +DECLARE_OP_USE_OUTPUT(ResizeBilinearV2Grad) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_IMAGE_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/logging_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/logging_ops_declare.cc new file mode 100644 index 00000000000..e73a127ca21 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/logging_ops_declare.cc @@ -0,0 +1,27 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/logging_ops_declare.h" + +namespace mindspore::transform { +#ifdef ENABLE_GE +// Print +INPUT_MAP(Print) = EMPTY_INPUT_MAP; +DYN_INPUT_MAP(Print) = {{1, DYN_INPUT_DESC(x)}}; +ATTR_MAP(Print) = EMPTY_ATTR_MAP; +REG_ADPT_DESC(Print, kNamePrint, ADPT_DESC(Print)) +#endif +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/logging_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/logging_ops_declare.h new file mode 100644 index 00000000000..524da0079b4 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/logging_ops_declare.h @@ -0,0 +1,31 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_LOGGING_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_LOGGING_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/logging_ops.h" + +namespace mindspore::transform { +#ifdef ENABLE_GE +DECLARE_OP_ADAPTER(Print) +DECLARE_OP_USE_DYN_INPUT(Print) +#endif +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_LOGGING_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.cc new file mode 100644 index 00000000000..e376202520f --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.cc @@ -0,0 +1,69 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/matrix_calculation_ops_declare.h" + +namespace mindspore::transform { +// TensorScatterUpdate +INPUT_MAP(TensorScatterUpdate) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; +ATTR_MAP(TensorScatterUpdate) = EMPTY_ATTR_MAP; +OUTPUT_MAP(TensorScatterUpdate) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(TensorScatterUpdate, kNameTensorScatterUpdate, ADPT_DESC(TensorScatterUpdate)) + +// ScatterUpdate +INPUT_MAP(ScatterUpdate) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; +ATTR_MAP(ScatterUpdate) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; +OUTPUT_MAP(ScatterUpdate) = {{0, OUTPUT_DESC(var)}}; +REG_ADPT_DESC(ScatterUpdate, kNameScatterUpdate, ADPT_DESC(ScatterUpdate)) + +// ScatterNdUpdate +INPUT_MAP(ScatterNdUpdate) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; +ATTR_MAP(ScatterNdUpdate) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; +OUTPUT_MAP(ScatterNdUpdate) = {{0, OUTPUT_DESC(var)}}; +REG_ADPT_DESC(ScatterNdUpdate, kNameScatterNdUpdate, ADPT_DESC(ScatterNdUpdate)) + +// ScatterMax +INPUT_MAP(ScatterMax) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; +ATTR_MAP(ScatterMax) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; +OUTPUT_MAP(ScatterMax) = {{0, OUTPUT_DESC(var)}}; +REG_ADPT_DESC(ScatterMax, kNameScatterMax, ADPT_DESC(ScatterMax)) + +// MatMulV2 +INPUT_MAP(MatMulV2) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(bias)}}; +ATTR_MAP(MatMulV2) = {{"transpose_a", ATTR_DESC(transpose_x1, AnyTraits())}, + {"transpose_b", ATTR_DESC(transpose_x2, AnyTraits())}}; +OUTPUT_MAP(MatMulV2) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(MatMulV2, prim::kPrimMatMul->name(), ADPT_DESC(MatMulV2)) + +// DiagPart +INPUT_MAP(DiagPart) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(DiagPart) = EMPTY_ATTR_MAP; +OUTPUT_MAP(DiagPart) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(DiagPart, kNameDiagPart, ADPT_DESC(DiagPart)) + +// BatchMatMul +INPUT_MAP(BatchMatMul) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(BatchMatMul) = {{"transpose_x1", ATTR_DESC(adj_x1, AnyTraits())}, + {"transpose_x2", ATTR_DESC(adj_x2, AnyTraits())}}; +OUTPUT_MAP(BatchMatMul) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(BatchMatMul, kNameBatchMatMul, ADPT_DESC(BatchMatMul)) + +// L2Loss +INPUT_MAP(L2Loss) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(L2Loss) = EMPTY_ATTR_MAP; +OUTPUT_MAP(L2Loss) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(L2Loss, kNameL2Loss, ADPT_DESC(L2Loss)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.h new file mode 100644 index 00000000000..4b21f825e88 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.h @@ -0,0 +1,50 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_MATRIX_CALCULATION_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_MATRIX_CALCULATION_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/matrix_calculation_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(TensorScatterUpdate) +DECLARE_OP_USE_OUTPUT(TensorScatterUpdate) + +DECLARE_OP_ADAPTER(ScatterUpdate) +DECLARE_OP_USE_OUTPUT(ScatterUpdate) + +DECLARE_OP_ADAPTER(ScatterNdUpdate) +DECLARE_OP_USE_OUTPUT(ScatterNdUpdate) + +DECLARE_OP_ADAPTER(ScatterMax) +DECLARE_OP_USE_OUTPUT(ScatterMax) + +DECLARE_OP_ADAPTER(BatchMatMul) +DECLARE_OP_USE_OUTPUT(BatchMatMul) + +DECLARE_OP_ADAPTER(MatMulV2) +DECLARE_OP_USE_OUTPUT(MatMulV2) + +DECLARE_OP_ADAPTER(DiagPart) +DECLARE_OP_USE_OUTPUT(DiagPart) + +DECLARE_OP_ADAPTER(L2Loss) +DECLARE_OP_USE_OUTPUT(L2Loss) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_MATRIX_CALCULATION_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_batch_norm_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_batch_norm_ops_declare.cc new file mode 100644 index 00000000000..72f935cb96d --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_batch_norm_ops_declare.cc @@ -0,0 +1,68 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/nn_batch_norm_ops_declare.h" +#include + +namespace mindspore::transform { +// BatchNorm +INPUT_MAP(BatchNorm) = {{1, INPUT_DESC(x)}, + {2, INPUT_DESC(scale)}, + {3, INPUT_DESC(offset)}, + {4, INPUT_DESC(mean)}, + {5, INPUT_DESC(variance)}}; +ATTR_MAP(BatchNorm) = {{"data_format", ATTR_DESC(data_format, AnyTraits())}, + {"epsilon", ATTR_DESC(epsilon, AnyTraits())}, + {"is_training", ATTR_DESC(is_training, AnyTraits())}}; +OUTPUT_MAP(BatchNorm) = {{0, OUTPUT_DESC(y)}, + {1, OUTPUT_DESC(batch_mean)}, + {2, OUTPUT_DESC(batch_variance)}, + {3, OUTPUT_DESC(reserve_space_1)}, + {4, OUTPUT_DESC(reserve_space_2)}}; +REG_ADPT_DESC(BatchNorm, kNameBatchNorm, ADPT_DESC(BatchNorm)) + +// BatchNormGrad +INPUT_MAP(BatchNormGrad) = {{1, INPUT_DESC(y_backprop)}, + {2, INPUT_DESC(x)}, + {3, INPUT_DESC(scale)}, + {4, INPUT_DESC(reserve_space_1)}, + {5, INPUT_DESC(reserve_space_2)}}; +ATTR_MAP(BatchNormGrad) = {{"data_format", ATTR_DESC(data_format, AnyTraits())}, + {"epsilon", ATTR_DESC(epsilon, AnyTraits())}, + {"is_training", ATTR_DESC(is_training, AnyTraits())}}; +OUTPUT_MAP(BatchNormGrad) = {{0, OUTPUT_DESC(x_backprop)}, + {1, OUTPUT_DESC(scale_backprop)}, + {2, OUTPUT_DESC(offset_backprop)}, + {3, OUTPUT_DESC(reserve_space_4)}, + {4, OUTPUT_DESC(reserve_space_5)}}; +REG_ADPT_DESC(BatchNormGrad, kNameBatchNormGrad, ADPT_DESC(BatchNormGrad)) + +// L2NormalizeGrad +INPUT_MAP(L2NormalizeGrad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}, {3, INPUT_DESC(dy)}}; +ATTR_MAP(L2NormalizeGrad) = { + {"axis", ATTR_DESC(dim, AnyTraits>(), AnyTraits>())}, + {"epsilon", ATTR_DESC(eps, AnyTraits())}}; +OUTPUT_MAP(L2NormalizeGrad) = {{0, OUTPUT_DESC(dx)}}; +REG_ADPT_DESC(L2NormalizeGrad, kNameL2NormalizeGrad, ADPT_DESC(L2NormalizeGrad)) + +// L2Normalize +INPUT_MAP(L2Normalize) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(L2Normalize) = { + {"axis", ATTR_DESC(axis, AnyTraits>(), AnyTraits>())}, + {"epsilon", ATTR_DESC(eps, AnyTraits())}}; +OUTPUT_MAP(L2Normalize) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(L2Normalize, kNameL2Normalize, ADPT_DESC(L2Normalize)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_batch_norm_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_batch_norm_ops_declare.h new file mode 100644 index 00000000000..fa2db24279d --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_batch_norm_ops_declare.h @@ -0,0 +1,38 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_BATCH_NORM_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_BATCH_NORM_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/nn_batch_norm_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(BatchNorm) +DECLARE_OP_USE_OUTPUT(BatchNorm) + +DECLARE_OP_ADAPTER(BatchNormGrad) +DECLARE_OP_USE_OUTPUT(BatchNormGrad) + +DECLARE_OP_ADAPTER(L2Normalize) +DECLARE_OP_USE_OUTPUT(L2Normalize) + +DECLARE_OP_ADAPTER(L2NormalizeGrad) +DECLARE_OP_USE_OUTPUT(L2NormalizeGrad) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_BATCH_NORM_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_calculation_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_calculation_ops_declare.cc new file mode 100644 index 00000000000..28410f6f6bc --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_calculation_ops_declare.cc @@ -0,0 +1,103 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/nn_calculation_ops_declare.h" +#include + +namespace mindspore::transform { +// BiasAddGrad +INPUT_MAP(BiasAddGrad) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(BiasAddGrad) = {{"data_format", ATTR_DESC(data_format, AnyTraits())}}; +OUTPUT_MAP(BiasAddGrad) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(BiasAddGrad, prim::kPrimBiasAddGrad->name(), ADPT_DESC(BiasAddGrad)) + +// Conv2D +INPUT_MAP(Conv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}, {3, INPUT_DESC(bias)}}; +ATTR_MAP(Conv2D) = { + {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, + {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, + {"data_format", ATTR_DESC(data_format, AnyTraits())}, + {"group", ATTR_DESC(groups, AnyTraits())}, +}; +OUTPUT_MAP(Conv2D) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Conv2D, prim::kPrimConv2D->name(), ADPT_DESC(Conv2D)) + +// Conv2DBackpropInputD +INPUT_MAP(Conv2DBackpropInputD) = {{1, INPUT_DESC(out_backprop)}, {2, INPUT_DESC(filter)}}; +INPUT_ATTR_MAP(Conv2DBackpropInputD) = { + {3, ATTR_DESC(input_size, AnyTraits>(), AnyTraits>())}}; +ATTR_MAP(Conv2DBackpropInputD) = { + {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, + {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, + {"data_format", ATTR_DESC(data_format, AnyTraits())}, + {"group", ATTR_DESC(groups, AnyTraits())}, +}; +OUTPUT_MAP(Conv2DBackpropInputD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Conv2DBackpropInputD, prim::kPrimConv2DBackpropInput->name(), ADPT_DESC(Conv2DBackpropInputD)) + +// Conv2DBackpropFilterD +INPUT_MAP(Conv2DBackpropFilterD) = {{1, INPUT_DESC(out_backprop)}, {2, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(Conv2DBackpropFilterD) = { + {3, ATTR_DESC(filter_size, AnyTraits>(), AnyTraits>())}}; +ATTR_MAP(Conv2DBackpropFilterD) = { + {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, + {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, + {"data_format", ATTR_DESC(data_format, AnyTraits())}, + {"group", ATTR_DESC(groups, AnyTraits())}, +}; +OUTPUT_MAP(Conv2DBackpropFilterD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Conv2DBackpropFilterD, prim::kPrimConv2DBackpropFilter->name(), ADPT_DESC(Conv2DBackpropFilterD)) + +// DepthwiseConv2D +INPUT_MAP(DepthwiseConv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}, {3, INPUT_DESC(bias)}}; +ATTR_MAP(DepthwiseConv2D) = { + {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, + {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, + {"data_format", ATTR_DESC(data_format, AnyTraits())}, +}; +OUTPUT_MAP(DepthwiseConv2D) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(DepthwiseConv2D, prim::kPrimDepthwiseConv2dNative->name(), ADPT_DESC(DepthwiseConv2D)) + +// DepthwiseConv2DBackpropInputD +INPUT_MAP(DepthwiseConv2DBackpropInputD) = {{2, INPUT_DESC(filter)}, {3, INPUT_DESC(out_backprop)}}; +INPUT_ATTR_MAP(DepthwiseConv2DBackpropInputD) = { + {1, ATTR_DESC(input_size, AnyTraits>(), AnyTraits>())}}; +ATTR_MAP(DepthwiseConv2DBackpropInputD) = { + {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, + {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, +}; +OUTPUT_MAP(DepthwiseConv2DBackpropInputD) = {{0, OUTPUT_DESC(input_grad)}}; +REG_ADPT_DESC(DepthwiseConv2DBackpropInputD, prim::kPrimDepthwiseConv2dNativeBackpropInput->name(), + ADPT_DESC(DepthwiseConv2DBackpropInputD)) + +// DepthwiseConv2DBackpropFilterD +INPUT_MAP(DepthwiseConv2DBackpropFilterD) = {{1, INPUT_DESC(input)}, {3, INPUT_DESC(out_backprop)}}; +INPUT_ATTR_MAP(DepthwiseConv2DBackpropFilterD) = { + {2, ATTR_DESC(filter_size, AnyTraits>(), AnyTraits>())}}; +ATTR_MAP(DepthwiseConv2DBackpropFilterD) = { + {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, + {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, +}; +OUTPUT_MAP(DepthwiseConv2DBackpropFilterD) = {{0, OUTPUT_DESC(filter_grad)}}; +REG_ADPT_DESC(DepthwiseConv2DBackpropFilterD, prim::kPrimDepthwiseConv2dNativeBackpropFilter->name(), + ADPT_DESC(DepthwiseConv2DBackpropFilterD)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_calculation_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_calculation_ops_declare.h new file mode 100644 index 00000000000..e93ecce0986 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_calculation_ops_declare.h @@ -0,0 +1,55 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_CALCULATION_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_CALCULATION_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/nn_calculation_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(BiasAddGrad) +DECLARE_OP_USE_OUTPUT(BiasAddGrad) + +DECLARE_OP_ADAPTER(Conv2D) +DECLARE_OP_USE_ENUM(Conv2D) +DECLARE_OP_USE_OUTPUT(Conv2D) + +DECLARE_OP_ADAPTER(Conv2DBackpropInputD) +DECLARE_OP_USE_ENUM(Conv2DBackpropInputD) +DECLARE_OP_USE_INPUT_ATTR(Conv2DBackpropInputD) +DECLARE_OP_USE_OUTPUT(Conv2DBackpropInputD) + +DECLARE_OP_ADAPTER(Conv2DBackpropFilterD) +DECLARE_OP_USE_ENUM(Conv2DBackpropFilterD) +DECLARE_OP_USE_INPUT_ATTR(Conv2DBackpropFilterD) +DECLARE_OP_USE_OUTPUT(Conv2DBackpropFilterD) + +DECLARE_OP_ADAPTER(DepthwiseConv2D) +DECLARE_OP_USE_ENUM(DepthwiseConv2D) +DECLARE_OP_USE_OUTPUT(DepthwiseConv2D) + +DECLARE_OP_ADAPTER(DepthwiseConv2DBackpropFilterD) +DECLARE_OP_USE_INPUT_ATTR(DepthwiseConv2DBackpropFilterD) +DECLARE_OP_USE_OUTPUT(DepthwiseConv2DBackpropFilterD) + +DECLARE_OP_ADAPTER(DepthwiseConv2DBackpropInputD) +DECLARE_OP_USE_INPUT_ATTR(DepthwiseConv2DBackpropInputD) +DECLARE_OP_USE_OUTPUT(DepthwiseConv2DBackpropInputD) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_CALCULATION_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_detect_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_detect_ops_declare.cc new file mode 100644 index 00000000000..7416d439d8d --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_detect_ops_declare.cc @@ -0,0 +1,79 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/nn_detect_ops_declare.h" +#include + +namespace mindspore::transform { +// BoundingBoxEncode +INPUT_MAP(BoundingBoxEncode) = { + {1, INPUT_DESC(anchor_box)}, + {2, INPUT_DESC(ground_truth_box)}, +}; +ATTR_MAP(BoundingBoxEncode) = { + {"means", ATTR_DESC(means, AnyTraits>(), AnyTraits())}, + {"stds", ATTR_DESC(stds, AnyTraits>(), AnyTraits())}, +}; +OUTPUT_MAP(BoundingBoxEncode) = {{0, OUTPUT_DESC(delats)}}; +REG_ADPT_DESC(BoundingBoxEncode, kNameBoundingBoxEncode, ADPT_DESC(BoundingBoxEncode)) + +// BoundingBoxDecode +INPUT_MAP(BoundingBoxDecode) = { + {1, INPUT_DESC(rois)}, + {2, INPUT_DESC(deltas)}, +}; +ATTR_MAP(BoundingBoxDecode) = { + {"means", ATTR_DESC(means, AnyTraits>(), AnyTraits())}, + {"stds", ATTR_DESC(stds, AnyTraits>(), AnyTraits())}, + {"max_shape", ATTR_DESC(max_shape, AnyTraits>(), AnyTraits>())}, + {"wh_ratio_clip", ATTR_DESC(wh_ratio_clip, AnyTraits())}, +}; +OUTPUT_MAP(BoundingBoxDecode) = {{0, OUTPUT_DESC(bboxes)}}; +REG_ADPT_DESC(BoundingBoxDecode, kNameBoundingBoxDecode, ADPT_DESC(BoundingBoxDecode)) + +// Iou +INPUT_MAP(Iou) = {{1, INPUT_DESC(bboxes)}, {2, INPUT_DESC(gtboxes)}}; +ATTR_MAP(Iou) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; +OUTPUT_MAP(Iou) = {{0, OUTPUT_DESC(overlap)}}; +REG_ADPT_DESC(Iou, kNameIOU, ADPT_DESC(Iou)) + +// CheckValid +INPUT_MAP(CheckValid) = {{1, INPUT_DESC(bbox_tensor)}, {2, INPUT_DESC(img_metas)}}; +ATTR_MAP(CheckValid) = EMPTY_ATTR_MAP; +OUTPUT_MAP(CheckValid) = {{0, OUTPUT_DESC(valid_tensor)}}; +REG_ADPT_DESC(CheckValid, kNameCheckValid, ADPT_DESC(CheckValid)) + +// ROIAlign +INPUT_MAP(ROIAlign) = {{1, INPUT_DESC(features)}, {2, INPUT_DESC(rois)}}; +OUTPUT_MAP(ROIAlign) = {{0, OUTPUT_DESC(y)}}; +ATTR_MAP(ROIAlign) = {{"pooled_height", ATTR_DESC(pooled_height, AnyTraits())}, + {"pooled_width", ATTR_DESC(pooled_width, AnyTraits())}, + {"spatial_scale", ATTR_DESC(spatial_scale, AnyTraits())}, + {"sample_num", ATTR_DESC(sample_num, AnyTraits())}, + {"roi_end_mode", ATTR_DESC(roi_end_mode, AnyTraits())}}; +REG_ADPT_DESC(ROIAlign, kNameROIAlign, ADPT_DESC(ROIAlign)) + +// ROIAlignGrad +INPUT_MAP(ROIAlignGrad) = {{1, INPUT_DESC(ydiff)}, {2, INPUT_DESC(rois)}}; +OUTPUT_MAP(ROIAlignGrad) = {{0, OUTPUT_DESC(xdiff)}}; +ATTR_MAP(ROIAlignGrad) = { + {"xdiff_shape", ATTR_DESC(xdiff_shape, AnyTraits>(), AnyTraits>())}, + {"pooled_height", ATTR_DESC(pooled_height, AnyTraits())}, + {"pooled_width", ATTR_DESC(pooled_width, AnyTraits())}, + {"spatial_scale", ATTR_DESC(spatial_scale, AnyTraits())}, + {"sample_num", ATTR_DESC(sample_num, AnyTraits())}}; +REG_ADPT_DESC(ROIAlignGrad, kNameROIAlignGrad, ADPT_DESC(ROIAlignGrad)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_detect_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_detect_ops_declare.h new file mode 100644 index 00000000000..21bcfab2a4d --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_detect_ops_declare.h @@ -0,0 +1,44 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_DETECT_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_DETECT_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/nn_detect_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(Iou) +DECLARE_OP_USE_OUTPUT(Iou) + +DECLARE_OP_ADAPTER(CheckValid) +DECLARE_OP_USE_OUTPUT(CheckValid) + +DECLARE_OP_ADAPTER(BoundingBoxEncode) +DECLARE_OP_USE_OUTPUT(BoundingBoxEncode) + +DECLARE_OP_ADAPTER(BoundingBoxDecode) +DECLARE_OP_USE_OUTPUT(BoundingBoxDecode) + +DECLARE_OP_ADAPTER(ROIAlign) +DECLARE_OP_USE_OUTPUT(ROIAlign) + +DECLARE_OP_ADAPTER(ROIAlignGrad) +DECLARE_OP_USE_OUTPUT(ROIAlignGrad) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_DETECT_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_norm_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_norm_ops_declare.cc new file mode 100644 index 00000000000..42f1e1f6203 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_norm_ops_declare.cc @@ -0,0 +1,116 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/nn_norm_ops_declare.h" +#include + +namespace mindspore::transform { +// SoftmaxV2 +INPUT_MAP(SoftmaxV2) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(SoftmaxV2) = { + {"axis", ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}, +}; +OUTPUT_MAP(SoftmaxV2) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(SoftmaxV2, kNameSoftmax, ADPT_DESC(SoftmaxV2)) + +// SoftmaxGrad +INPUT_MAP(SoftmaxGrad) = {{1, INPUT_DESC(softmax)}, {2, INPUT_DESC(grad_softmax)}}; +OUTPUT_MAP(SoftmaxGrad) = {{0, OUTPUT_DESC(grad_x)}}; +ATTR_MAP(SoftmaxGrad) = EMPTY_ATTR_MAP; +REG_ADPT_DESC(SoftmaxGrad, kNameSoftmaxGrad, ADPT_DESC(SoftmaxGrad)) + +// SoftmaxCrossEntropyWithLogits +INPUT_MAP(SoftmaxCrossEntropyWithLogits) = {{1, INPUT_DESC(features)}, {2, INPUT_DESC(labels)}}; +ATTR_MAP(SoftmaxCrossEntropyWithLogits) = EMPTY_ATTR_MAP; +OUTPUT_MAP(SoftmaxCrossEntropyWithLogits) = {{0, OUTPUT_DESC(loss)}, {1, OUTPUT_DESC(backprop)}}; +REG_ADPT_DESC(SoftmaxCrossEntropyWithLogits, prim::kPrimSoftmaxCrossEntropyWithLogits->name(), + ADPT_DESC(SoftmaxCrossEntropyWithLogits)) + +// SmoothL1Loss +INPUT_MAP(SmoothL1Loss) = {{1, INPUT_DESC(predict)}, {2, INPUT_DESC(label)}}; +ATTR_MAP(SmoothL1Loss) = {{"sigma", ATTR_DESC(sigma, AnyTraits())}}; +OUTPUT_MAP(SmoothL1Loss) = {{0, OUTPUT_DESC(loss)}}; +REG_ADPT_DESC(SmoothL1Loss, kNameSmoothL1Loss, ADPT_DESC(SmoothL1Loss)) + +// SmoothL1LossGrad +INPUT_MAP(SmoothL1LossGrad) = {{1, INPUT_DESC(predict)}, {2, INPUT_DESC(label)}, {3, INPUT_DESC(dout)}}; +ATTR_MAP(SmoothL1LossGrad) = {{"sigma", ATTR_DESC(sigma, AnyTraits())}}; +OUTPUT_MAP(SmoothL1LossGrad) = {{0, OUTPUT_DESC(gradient)}}; +REG_ADPT_DESC(SmoothL1LossGrad, kNameSmoothL1LossGrad, ADPT_DESC(SmoothL1LossGrad)) + +// SigmoidCrossEntropyWithLogits +INPUT_MAP(SigmoidCrossEntropyWithLogits) = {{1, INPUT_DESC(predict)}, {2, INPUT_DESC(target)}}; +ATTR_MAP(SigmoidCrossEntropyWithLogits) = EMPTY_ATTR_MAP; +OUTPUT_MAP(SigmoidCrossEntropyWithLogits) = {{0, OUTPUT_DESC(loss)}}; +REG_ADPT_DESC(SigmoidCrossEntropyWithLogits, kNameSigmoidCrossEntropyWithLogits, + ADPT_DESC(SigmoidCrossEntropyWithLogits)) + +// SigmoidCrossEntropyWithLogitsGrad +INPUT_MAP(SigmoidCrossEntropyWithLogitsGrad) = { + {1, INPUT_DESC(predict)}, {2, INPUT_DESC(target)}, {3, INPUT_DESC(dout)}}; +ATTR_MAP(SigmoidCrossEntropyWithLogitsGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(SigmoidCrossEntropyWithLogitsGrad) = {{0, OUTPUT_DESC(gradient)}}; +REG_ADPT_DESC(SigmoidCrossEntropyWithLogitsGrad, kNameSigmoidCrossEntropyWithLogitsGrad, + ADPT_DESC(SigmoidCrossEntropyWithLogitsGrad)) + +// LogSoftmaxGrad +INPUT_MAP(LogSoftmaxGrad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(grad)}}; +ATTR_MAP(LogSoftmaxGrad) = { + {"axis", ATTR_DESC(axis, AnyTraits>(), AnyTraits>())}}; +OUTPUT_MAP(LogSoftmaxGrad) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(LogSoftmaxGrad, prim::kPrimLogSoftmaxGrad->name(), ADPT_DESC(LogSoftmaxGrad)) + +// LogSoftmaxV2 +INPUT_MAP(LogSoftmaxV2) = {{1, INPUT_DESC(logits)}}; +ATTR_MAP(LogSoftmaxV2) = { + {"axis", ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; +OUTPUT_MAP(LogSoftmaxV2) = {{0, OUTPUT_DESC(logsoftmax)}}; +REG_ADPT_DESC(LogSoftmaxV2, prim::kPrimLogSoftmax->name(), ADPT_DESC(LogSoftmaxV2)) + +// LayerNorm +INPUT_MAP(LayerNorm) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(gamma)}, {3, INPUT_DESC(beta)}}; +ATTR_MAP(LayerNorm) = {{"begin_norm_axis", ATTR_DESC(begin_norm_axis, AnyTraits())}, + {"begin_params_axis", ATTR_DESC(begin_params_axis, AnyTraits())}, + {"epsilon", ATTR_DESC(epsilon, AnyTraits())}}; +OUTPUT_MAP(LayerNorm) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(mean)}, {2, OUTPUT_DESC(variance)}}; +REG_ADPT_DESC(LayerNorm, prim::kPrimLayerNorm->name(), ADPT_DESC(LayerNorm)) + +// LayerNormGrad +INPUT_MAP(LayerNormGrad) = { + {1, INPUT_DESC(x)}, {2, INPUT_DESC(dy)}, {3, INPUT_DESC(variance)}, {4, INPUT_DESC(mean)}, {5, INPUT_DESC(gamma)}}; +ATTR_MAP(LayerNormGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(LayerNormGrad) = {{0, OUTPUT_DESC(pd_x)}, {1, OUTPUT_DESC(pd_gamma)}, {2, OUTPUT_DESC(pd_beta)}}; +REG_ADPT_DESC(LayerNormGrad, prim::kPrimLayerNormGrad->name(), ADPT_DESC(LayerNormGrad)) + +// DropoutDoMask +INPUT_MAP(DropOutDoMask) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(mask)}, {3, INPUT_DESC(keep_prob)}}; +ATTR_MAP(DropOutDoMask) = EMPTY_ATTR_MAP; +OUTPUT_MAP(DropOutDoMask) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(DropOutDoMask, kNameDropoutDoMask, ADPT_DESC(DropOutDoMask)) + +// BinaryCrossEntropy +INPUT_MAP(BinaryCrossEntropy) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}, {3, INPUT_DESC(weight)}}; +ATTR_MAP(BinaryCrossEntropy) = {{"reduction", ATTR_DESC(reduction, AnyTraits())}}; +OUTPUT_MAP(BinaryCrossEntropy) = {{0, OUTPUT_DESC(output)}}; +REG_ADPT_DESC(BinaryCrossEntropy, kNameBinaryCrossEntropy, ADPT_DESC(BinaryCrossEntropy)) + +// BinaryCrossEntropyGrad +INPUT_MAP(BinaryCrossEntropyGrad) = { + {1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}, {3, INPUT_DESC(grad_output)}, {4, INPUT_DESC(weight)}}; +ATTR_MAP(BinaryCrossEntropyGrad) = {{"reduction", ATTR_DESC(reduction, AnyTraits())}}; +OUTPUT_MAP(BinaryCrossEntropyGrad) = {{0, OUTPUT_DESC(output)}}; +REG_ADPT_DESC(BinaryCrossEntropyGrad, kNameBinaryCrossEntropyGrad, ADPT_DESC(BinaryCrossEntropyGrad)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_norm_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_norm_ops_declare.h new file mode 100644 index 00000000000..dda9c406581 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_norm_ops_declare.h @@ -0,0 +1,68 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_IMAGE_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_IMAGE_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/nn_norm_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(SmoothL1Loss) +DECLARE_OP_USE_OUTPUT(SmoothL1Loss) + +DECLARE_OP_ADAPTER(SmoothL1LossGrad) +DECLARE_OP_USE_OUTPUT(SmoothL1LossGrad) + +DECLARE_OP_ADAPTER(SigmoidCrossEntropyWithLogits) +DECLARE_OP_USE_OUTPUT(SigmoidCrossEntropyWithLogits) + +DECLARE_OP_ADAPTER(SigmoidCrossEntropyWithLogitsGrad) +DECLARE_OP_USE_OUTPUT(SigmoidCrossEntropyWithLogitsGrad) + +DECLARE_OP_ADAPTER(LogSoftmaxGrad) +DECLARE_OP_USE_OUTPUT(LogSoftmaxGrad) + +DECLARE_OP_ADAPTER(LogSoftmaxV2) +DECLARE_OP_USE_OUTPUT(LogSoftmaxV2) + +DECLARE_OP_ADAPTER(LayerNorm) +DECLARE_OP_USE_OUTPUT(LayerNorm) + +DECLARE_OP_ADAPTER(LayerNormGrad) +DECLARE_OP_USE_OUTPUT(LayerNormGrad) + +DECLARE_OP_ADAPTER(DropOutDoMask) +DECLARE_OP_USE_OUTPUT(DropOutDoMask) + +DECLARE_OP_ADAPTER(SoftmaxCrossEntropyWithLogits) +DECLARE_OP_USE_OUTPUT(SoftmaxCrossEntropyWithLogits) + +DECLARE_OP_ADAPTER(SoftmaxV2) +DECLARE_OP_USE_OUTPUT(SoftmaxV2) + +DECLARE_OP_ADAPTER(SoftmaxGrad) +DECLARE_OP_USE_OUTPUT(SoftmaxGrad) + +DECLARE_OP_ADAPTER(BinaryCrossEntropy) +DECLARE_OP_USE_OUTPUT(BinaryCrossEntropy) + +DECLARE_OP_ADAPTER(BinaryCrossEntropyGrad) +DECLARE_OP_USE_OUTPUT(BinaryCrossEntropyGrad) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_IMAGE_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_pooling_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_pooling_ops_declare.cc new file mode 100644 index 00000000000..d7af8a35fcb --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_pooling_ops_declare.cc @@ -0,0 +1,72 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/nn_pooling_ops_declare.h" +#include + +namespace mindspore::transform { +// MaxPool +INPUT_MAP(MaxPool) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(MaxPool) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, + {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, + {"padding", ATTR_DESC(padding, AnyTraits())}, + {"data_format", ATTR_DESC(data_format, AnyTraits())}}; +OUTPUT_MAP(MaxPool) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(MaxPool, kNameMaxPool, ADPT_DESC(MaxPool)) + +// AvgPool +INPUT_MAP(AvgPool) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(AvgPool) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, + {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, + {"padding", ATTR_DESC(padding, AnyTraits())}, + {"data_format", ATTR_DESC(data_format, AnyTraits())}}; +OUTPUT_MAP(AvgPool) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(AvgPool, kNameAvgPool, ADPT_DESC(AvgPool)) + +// MaxPoolGrad +INPUT_MAP(MaxPoolGrad) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(grad)}}; +ATTR_MAP(MaxPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, + {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, + {"padding", ATTR_DESC(padding, AnyTraits())}, + {"data_format", ATTR_DESC(data_format, AnyTraits())}}; +OUTPUT_MAP(MaxPoolGrad) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(MaxPoolGrad, kNameMaxPoolGrad, ADPT_DESC(MaxPoolGrad)) + +// avgpoolgrad +INPUT_MAP(AvgPoolGrad) = {{1, INPUT_DESC(orig_input_shape)}, {2, INPUT_DESC(input_grad)}}; +ATTR_MAP(AvgPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, + {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, + {"padding", ATTR_DESC(padding, AnyTraits())}, + {"data_format", ATTR_DESC(data_format, AnyTraits())}}; +OUTPUT_MAP(AvgPoolGrad) = {{0, OUTPUT_DESC(out_grad)}}; +REG_ADPT_DESC(AvgPoolGrad, kNameAvgPoolGrad, ADPT_DESC(AvgPoolGrad)) + +// MaxPoolWithArgmax +INPUT_MAP(MaxPoolWithArgmax) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(MaxPoolWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, + {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, + {"padding", ATTR_DESC(padding, AnyTraits())}}; +OUTPUT_MAP(MaxPoolWithArgmax) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(argmax)}}; +REG_ADPT_DESC(MaxPoolWithArgmax, kNameMaxPoolWithArgmax, ADPT_DESC(MaxPoolWithArgmax)) + +// MaxPoolGradWithArgmax +INPUT_MAP(MaxPoolGradWithArgmax) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(grad)}, {3, INPUT_DESC(argmax)}}; +ATTR_MAP(MaxPoolGradWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, + {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, + {"padding", ATTR_DESC(padding, AnyTraits())}}; +OUTPUT_MAP(MaxPoolGradWithArgmax) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(MaxPoolGradWithArgmax, kNameMaxPoolGradWithArgmax, ADPT_DESC(MaxPoolGradWithArgmax)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_pooling_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_pooling_ops_declare.h new file mode 100644 index 00000000000..6b423bdff79 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_pooling_ops_declare.h @@ -0,0 +1,44 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_POOLING_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_POOLING_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/nn_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(MaxPoolWithArgmax) +DECLARE_OP_USE_OUTPUT(MaxPoolWithArgmax) + +DECLARE_OP_ADAPTER(MaxPoolGradWithArgmax) +DECLARE_OP_USE_OUTPUT(MaxPoolGradWithArgmax) + +DECLARE_OP_ADAPTER(MaxPool) +DECLARE_OP_USE_OUTPUT(MaxPool) + +DECLARE_OP_ADAPTER(MaxPoolGrad) +DECLARE_OP_USE_OUTPUT(MaxPoolGrad) + +DECLARE_OP_ADAPTER(AvgPool) +DECLARE_OP_USE_OUTPUT(AvgPool) + +DECLARE_OP_ADAPTER(AvgPoolGrad) +DECLARE_OP_USE_OUTPUT(AvgPoolGrad) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_POOLING_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_training_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_training_ops_declare.cc new file mode 100644 index 00000000000..e7f486c6e72 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_training_ops_declare.cc @@ -0,0 +1,128 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/nn_training_ops_declare.h" + +namespace mindspore::transform { +// ApplyMomentum +INPUT_MAP(ApplyMomentum) = { + {1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INPUT_DESC(lr)}, {4, INPUT_DESC(grad)}, {5, INPUT_DESC(momentum)}}; +ATTR_MAP(ApplyMomentum) = {{"use_nesterov", ATTR_DESC(use_nesterov, AnyTraits())}, + {"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; +OUTPUT_MAP(ApplyMomentum) = {{0, OUTPUT_DESC(var)}}; +REG_ADPT_DESC(ApplyMomentum, kNameApplyMomentum, ADPT_DESC(ApplyMomentum)) + +// LarsV2Update +INPUT_MAP(LarsV2Update) = {{1, INPUT_DESC(w)}, + {2, INPUT_DESC(g)}, + {3, INPUT_DESC(w_square_sum)}, + {4, INPUT_DESC(g_square_sum)}, + {5, INPUT_DESC(weight_decay)}, + {6, INPUT_DESC(learning_rate)}}; +ATTR_MAP(LarsV2Update) = {{"epsilon", ATTR_DESC(epsilon, AnyTraits())}, + {"hyperpara", ATTR_DESC(hyperpara, AnyTraits())}, + {"use_clip", ATTR_DESC(use_clip, AnyTraits())}}; +OUTPUT_MAP(LarsV2Update) = {{0, OUTPUT_DESC(g_new)}}; +REG_ADPT_DESC(LarsV2Update, kNameLARSUpdate, ADPT_DESC(LarsV2Update)) + +// ApplyAdam +INPUT_MAP(ApplyAdam) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(m)}, {3, INPUT_DESC(v)}, + {4, INPUT_DESC(beta1_power)}, {5, INPUT_DESC(beta2_power)}, {6, INPUT_DESC(lr)}, + {7, INPUT_DESC(beta1)}, {8, INPUT_DESC(beta2)}, {9, INPUT_DESC(epsilon)}, + {10, INPUT_DESC(grad)}}; +ATTR_MAP(ApplyAdam) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}, + {"use_nesterov", ATTR_DESC(use_nesterov, AnyTraits())}}; +OUTPUT_MAP(ApplyAdam) = {{0, OUTPUT_DESC(var)}}; + +// ApplyAdamD +INPUT_MAP(ApplyAdamD) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(m)}, {3, INPUT_DESC(v)}, + {4, INPUT_DESC(beta1_power)}, {5, INPUT_DESC(beta2_power)}, {6, INPUT_DESC(lr)}, + {7, INPUT_DESC(beta1)}, {8, INPUT_DESC(beta2)}, {9, INPUT_DESC(epsilon)}, + {10, INPUT_DESC(grad)}}; +ATTR_MAP(ApplyAdamD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}, + {"use_nesterov", ATTR_DESC(use_nesterov, AnyTraits())}}; +OUTPUT_MAP(ApplyAdamD) = {{0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(m)}, {2, OUTPUT_DESC(v)}}; +#ifdef ENABLE_GE +REG_ADPT_DESC(ApplyAdamD, kNameApplyAdam, ADPT_DESC(ApplyAdamD)) +#else +REG_ADPT_DESC(ApplyAdam, kNameApplyAdam, ADPT_DESC(ApplyAdam)) +#endif + +// SGD +INPUT_MAP(SGD) = {{1, INPUT_DESC(parameters)}, {2, INPUT_DESC(gradient)}, {3, INPUT_DESC(learning_rate)}, + {4, INPUT_DESC(accum)}, {5, INPUT_DESC(momentum)}, {6, INPUT_DESC(stat)}}; +ATTR_MAP(SGD) = {{"dampening", ATTR_DESC(dampening, AnyTraits())}, + {"weight_decay", ATTR_DESC(weight_decay, AnyTraits())}, + {"nesterov", ATTR_DESC(nesterov, AnyTraits())}}; +OUTPUT_MAP(SGD) = {{0, OUTPUT_DESC(parameters)}}; +REG_ADPT_DESC(SGD, kNameSGD, ADPT_DESC(SGD)) + +// SparseApplyAdagradD +INPUT_MAP(SparseApplyAdagradD) = { + {1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INPUT_DESC(grad)}, {4, INPUT_DESC(indices)}}; +ATTR_MAP(SparseApplyAdagradD) = {{"lr", ATTR_DESC(lr, AnyTraits())}, + {"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; +OUTPUT_MAP(SparseApplyAdagradD) = {{0, OUTPUT_DESC(var)}}; +REG_ADPT_DESC(SparseApplyAdagradD, kNameSparseApplyAdagrad, ADPT_DESC(SparseApplyAdagradD)) + +// ApplyProximalAdagradD +INPUT_MAP(ApplyProximalAdagradD) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INPUT_DESC(lr)}, + {4, INPUT_DESC(l1)}, {5, INPUT_DESC(l2)}, {6, INPUT_DESC(grad)}}; +ATTR_MAP(ApplyProximalAdagradD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; +OUTPUT_MAP(ApplyProximalAdagradD) = {{0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(accum)}}; +REG_ADPT_DESC(ApplyProximalAdagradD, kNameApplyProximalAdagrad, ADPT_DESC(ApplyProximalAdagradD)) + +// SparseApplyFtrlD +INPUT_MAP(SparseApplyFtrlD) = {{1, INPUT_DESC(var)}, + {2, INPUT_DESC(accum)}, + {3, INPUT_DESC(linear)}, + {4, INPUT_DESC(grad)}, + {5, INPUT_DESC(indices)}}; +ATTR_MAP(SparseApplyFtrlD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}, + {"lr", ATTR_DESC(lr, AnyTraits())}, + {"l1", ATTR_DESC(l1, AnyTraits())}, + {"l2", ATTR_DESC(l2, AnyTraits())}, + {"lr_power", ATTR_DESC(lr_power, AnyTraits())}}; +OUTPUT_MAP(SparseApplyFtrlD) = {{0, OUTPUT_DESC(var)}}; +REG_ADPT_DESC(SparseApplyFtrlD, kNameSparseApplyFtrlD, ADPT_DESC(SparseApplyFtrlD)) + +// ApplyFtrlD +INPUT_MAP(ApplyFtrlD) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INPUT_DESC(linear)}, + {4, INPUT_DESC(grad)}, {5, INPUT_DESC(lr)}, {6, INPUT_DESC(l1)}, + {7, INPUT_DESC(l2)}, {8, INPUT_DESC(lr_power)}}; +ATTR_MAP(ApplyFtrlD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; +OUTPUT_MAP(ApplyFtrlD) = {{0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(accum)}, {2, OUTPUT_DESC(linear)}}; +REG_ADPT_DESC(ApplyFtrlD, kNameApplyFtrl, ADPT_DESC(ApplyFtrlD)) + +// ApplyRMSPropD +INPUT_MAP(ApplyRMSPropD) = { + {1, INPUT_DESC(var)}, {2, INPUT_DESC(ms)}, {3, INPUT_DESC(mom)}, {4, INPUT_DESC(lr)}, {5, INPUT_DESC(grad)}}; +INPUT_ATTR_MAP(ApplyRMSPropD) = {{6, ATTR_DESC(rho, AnyTraits())}, + {7, ATTR_DESC(momentum, AnyTraits())}, + {8, ATTR_DESC(epsilon, AnyTraits())}}; +ATTR_MAP(ApplyRMSPropD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; +OUTPUT_MAP(ApplyRMSPropD) = {{0, OUTPUT_DESC(var)}}; +REG_ADPT_DESC(ApplyRMSPropD, kNameApplyRMSProp, ADPT_DESC(ApplyRMSPropD)) + +// ApplyCenteredRMSPropD +INPUT_MAP(ApplyCenteredRMSPropD) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(mg)}, {3, INPUT_DESC(ms)}, + {4, INPUT_DESC(mom)}, {5, INPUT_DESC(grad)}, {6, INPUT_DESC(lr)}, + {7, INPUT_DESC(rho)}, {8, INPUT_DESC(momentum)}, {9, INPUT_DESC(epsilon)}}; +ATTR_MAP(ApplyCenteredRMSPropD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; +OUTPUT_MAP(ApplyCenteredRMSPropD) = { + {0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(mg)}, {2, OUTPUT_DESC(ms)}, {3, OUTPUT_DESC(mom)}}; +REG_ADPT_DESC(ApplyCenteredRMSPropD, kNameApplyCenteredRMSProp, ADPT_DESC(ApplyCenteredRMSPropD)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_training_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_training_ops_declare.h new file mode 100644 index 00000000000..122b1850e49 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_training_ops_declare.h @@ -0,0 +1,60 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_TRAINING_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_TRAINING_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/nn_training_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(ApplyAdam) +DECLARE_OP_USE_OUTPUT(ApplyAdam) + +DECLARE_OP_ADAPTER(ApplyAdamD) +DECLARE_OP_USE_OUTPUT(ApplyAdamD) + +DECLARE_OP_ADAPTER(SGD) +DECLARE_OP_USE_OUTPUT(SGD) + +DECLARE_OP_ADAPTER(ApplyMomentum) +DECLARE_OP_USE_OUTPUT(ApplyMomentum) + +DECLARE_OP_ADAPTER(SparseApplyAdagradD) +DECLARE_OP_USE_OUTPUT(SparseApplyAdagradD) + +DECLARE_OP_ADAPTER(ApplyProximalAdagradD) +DECLARE_OP_USE_OUTPUT(ApplyProximalAdagradD) + +DECLARE_OP_ADAPTER(LarsV2Update) +DECLARE_OP_USE_OUTPUT(LarsV2Update) + +DECLARE_OP_ADAPTER(ApplyFtrlD) +DECLARE_OP_USE_OUTPUT(ApplyFtrlD) + +DECLARE_OP_ADAPTER(SparseApplyFtrlD) +DECLARE_OP_USE_OUTPUT(SparseApplyFtrlD) + +DECLARE_OP_ADAPTER(ApplyRMSPropD) +DECLARE_OP_USE_INPUT_ATTR(ApplyRMSPropD) +DECLARE_OP_USE_OUTPUT(ApplyRMSPropD) + +DECLARE_OP_ADAPTER(ApplyCenteredRMSPropD) +DECLARE_OP_USE_OUTPUT(ApplyCenteredRMSPropD) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_TRAINING_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nonlinear_fuc_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/nonlinear_fuc_ops_declare.cc new file mode 100644 index 00000000000..1273bf76563 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nonlinear_fuc_ops_declare.cc @@ -0,0 +1,103 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/nonlinear_fuc_ops_declare.h" + +namespace mindspore::transform { +// Relu +INPUT_MAP(Relu) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Relu) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Relu) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Relu, prim::kPrimRelu->name(), ADPT_DESC(Relu)) + +// Elu +INPUT_MAP(Elu) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Elu) = {{"alpha", ATTR_DESC(alpha, AnyTraits())}}; +OUTPUT_MAP(Elu) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Elu, kNameElu, ADPT_DESC(Elu)) + +// EluGrad +INPUT_MAP(EluGrad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(activations)}}; +ATTR_MAP(EluGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(EluGrad) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(EluGrad, kNameEluGrad, ADPT_DESC(EluGrad)) + +// PRelu +INPUT_MAP(PRelu) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(weight)}}; +ATTR_MAP(PRelu) = EMPTY_ATTR_MAP; +OUTPUT_MAP(PRelu) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(PRelu, kNamePrelu, ADPT_DESC(PRelu)) + +// PReluGrad +INPUT_MAP(PReluGrad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(features)}, {3, INPUT_DESC(weights)}}; +ATTR_MAP(PReluGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(PReluGrad) = {{0, OUTPUT_DESC(dx)}, {1, OUTPUT_DESC(da)}}; +REG_ADPT_DESC(PReluGrad, kNamePreluGrad, ADPT_DESC(PReluGrad)) + +// Sigmoid +INPUT_MAP(Sigmoid) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Sigmoid) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Sigmoid) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Sigmoid, kNameSigmoid, ADPT_DESC(Sigmoid)) + +// SigmoidGrad +INPUT_MAP(SigmoidGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; +ATTR_MAP(SigmoidGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(SigmoidGrad) = {{0, OUTPUT_DESC(z)}}; +REG_ADPT_DESC(SigmoidGrad, kNameSigmoidGrad, ADPT_DESC(SigmoidGrad)) + +// Relu6 +INPUT_MAP(Relu6) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Relu6) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Relu6) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Relu6, kNameReLU6, ADPT_DESC(Relu6)) + +// Relu6Grad +INPUT_MAP(Relu6Grad) = {{1, INPUT_DESC(gradients)}, {2, INPUT_DESC(features)}}; +ATTR_MAP(Relu6Grad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Relu6Grad) = {{0, OUTPUT_DESC(backprops)}}; +REG_ADPT_DESC(Relu6Grad, kNameReLU6Grad, ADPT_DESC(Relu6Grad)) + +// ReluGrad +INPUT_MAP(ReluGrad) = {{1, INPUT_DESC(gradients)}, {2, INPUT_DESC(features)}}; +ATTR_MAP(ReluGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(ReluGrad) = {{0, OUTPUT_DESC(backprops)}}; +REG_ADPT_DESC(ReluGrad, prim::kPrimReluGrad->name(), ADPT_DESC(ReluGrad)) + +// Tanh +INPUT_MAP(Tanh) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Tanh) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Tanh) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Tanh, prim::kPrimTanh->name(), ADPT_DESC(Tanh)) + +// TanhGrad +INPUT_MAP(TanhGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; +ATTR_MAP(TanhGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(TanhGrad) = {{0, OUTPUT_DESC(z)}}; +REG_ADPT_DESC(TanhGrad, prim::kPrimTanhGrad->name(), ADPT_DESC(TanhGrad)) + +// Gelu +INPUT_MAP(Gelu) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Gelu) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Gelu) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Gelu, prim::kPrimGelu->name(), ADPT_DESC(Gelu)) + +// GeluGrad +INPUT_MAP(GeluGrad) = {{1, INPUT_DESC(dy)}, {2, INPUT_DESC(x)}, {3, INPUT_DESC(y)}}; +ATTR_MAP(GeluGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(GeluGrad) = {{0, OUTPUT_DESC(z)}}; +REG_ADPT_DESC(GeluGrad, prim::kPrimGeluGrad->name(), ADPT_DESC(GeluGrad)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nonlinear_fuc_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nonlinear_fuc_ops_declare.h new file mode 100644 index 00000000000..71b81f450b2 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nonlinear_fuc_ops_declare.h @@ -0,0 +1,68 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NONLINEAR_FUC_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NONLINEAR_FUC_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/nonlinear_fuc_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(ReluGrad) +DECLARE_OP_USE_OUTPUT(ReluGrad) + +DECLARE_OP_ADAPTER(Relu6) +DECLARE_OP_USE_OUTPUT(Relu6) + +DECLARE_OP_ADAPTER(Relu6Grad) +DECLARE_OP_USE_OUTPUT(Relu6Grad) + +DECLARE_OP_ADAPTER(Tanh) +DECLARE_OP_USE_OUTPUT(Tanh) + +DECLARE_OP_ADAPTER(TanhGrad) +DECLARE_OP_USE_OUTPUT(TanhGrad) + +DECLARE_OP_ADAPTER(Gelu) +DECLARE_OP_USE_OUTPUT(Gelu) + +DECLARE_OP_ADAPTER(GeluGrad) +DECLARE_OP_USE_OUTPUT(GeluGrad) + +DECLARE_OP_ADAPTER(Relu) +DECLARE_OP_USE_OUTPUT(Relu) + +DECLARE_OP_ADAPTER(PRelu) +DECLARE_OP_USE_OUTPUT(PRelu) + +DECLARE_OP_ADAPTER(Elu) +DECLARE_OP_USE_OUTPUT(Elu) + +DECLARE_OP_ADAPTER(EluGrad) +DECLARE_OP_USE_OUTPUT(EluGrad) + +DECLARE_OP_ADAPTER(PReluGrad) +DECLARE_OP_USE_OUTPUT(PReluGrad) + +DECLARE_OP_ADAPTER(Sigmoid) +DECLARE_OP_USE_OUTPUT(Sigmoid) + +DECLARE_OP_ADAPTER(SigmoidGrad) +DECLARE_OP_USE_OUTPUT(SigmoidGrad) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NONLINEAR_FUC_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/npu_loss_scale_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/npu_loss_scale_ops_declare.cc new file mode 100644 index 00000000000..3f5098ba7b3 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/npu_loss_scale_ops_declare.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/npu_loss_scale_ops_declare.h" + +namespace mindspore::transform { +// NPUGetFloatStatus +INPUT_MAP(NPUGetFloatStatus) = {{1, INPUT_DESC(addr)}}; +OUTPUT_MAP(NPUGetFloatStatus) = {{0, OUTPUT_DESC(data)}}; +ATTR_MAP(NPUGetFloatStatus) = EMPTY_ATTR_MAP; +REG_ADPT_DESC(NPUGetFloatStatus, kNameNPUGetFloatStatus, ADPT_DESC(NPUGetFloatStatus)) + +// NPUAllocFloatStatus +INPUT_MAP(NPUAllocFloatStatus) = EMPTY_INPUT_MAP; +ATTR_MAP(NPUAllocFloatStatus) = EMPTY_ATTR_MAP; +OUTPUT_MAP(NPUAllocFloatStatus) = {{0, OUTPUT_DESC(data)}}; +REG_ADPT_DESC(NPUAllocFloatStatus, kNameNPUAllocFloatStatus, ADPT_DESC(NPUAllocFloatStatus)) + +// NPUClearFloatStatus +INPUT_MAP(NPUClearFloatStatus) = {{1, INPUT_DESC(addr)}}; +OUTPUT_MAP(NPUClearFloatStatus) = {{0, OUTPUT_DESC(data)}}; +ATTR_MAP(NPUClearFloatStatus) = EMPTY_ATTR_MAP; +REG_ADPT_DESC(NPUClearFloatStatus, kNameNPUClearFloatStatus, ADPT_DESC(NPUClearFloatStatus)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/npu_loss_scale_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/npu_loss_scale_ops_declare.h new file mode 100644 index 00000000000..7bf5986c5cc --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/npu_loss_scale_ops_declare.h @@ -0,0 +1,35 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NPU_LOSS_SCALE_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NPU_LOSS_SCALE_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/npu_loss_scale_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(NPUGetFloatStatus) +DECLARE_OP_USE_OUTPUT(NPUGetFloatStatus) + +DECLARE_OP_ADAPTER(NPUAllocFloatStatus) +DECLARE_OP_USE_OUTPUT(NPUAllocFloatStatus) + +DECLARE_OP_ADAPTER(NPUClearFloatStatus) +DECLARE_OP_USE_OUTPUT(NPUClearFloatStatus) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NPU_LOSS_SCALE_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/op_declare_macro.h b/mindspore/ccsrc/transform/graph_ir/op_declare/op_declare_macro.h new file mode 100644 index 00000000000..6cb1b453756 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/op_declare_macro.h @@ -0,0 +1,169 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_MACRO_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_MACRO_H_ + +#include +#include +#include +#include "transform/graph_ir/op_adapter.h" +#include "transform/graph_ir/op_adapter_map.h" +#include "mindspore/core/base/core_ops.h" + +namespace mindspore::transform { +#define DECLARE_OP_ADAPTER(T) \ + using T = ge::op::T; \ + template <> \ + const std::unordered_map OpAdapter::input_map_; \ + template <> \ + const std::unordered_map OpAdapter::attr_map_; + +#define DECLARE_OP_USE_OUTPUT(T) \ + template <> \ + const std::unordered_map OpAdapter::output_map_; + +#define DECLARE_OP_USE_ENUM(T) \ + template <> \ + const std::unordered_map OpAdapter::enum_map_{}; + +#define DECLARE_OP_USE_INPUT_ATTR(T) \ + template <> \ + const std::unordered_map OpAdapter::input_attr_map_; + +#define DECLARE_OP_USE_DYN_INPUT(T) \ + template <> \ + const std::unordered_map OpAdapter::dyn_input_map_; + +#define DECLARE_OP_USE_DYN_SUBGRAPH(T) \ + template <> \ + const std::unordered_map OpAdapter::dyn_subgraph_map_; + +#define DECLARE_OP_USE_DYN_OUTPUT(T) \ + template <> \ + const std::unordered_map OpAdapter::dyn_output_map_; + +#define INPUT_MAP(T) \ + template <> \ + const std::unordered_map OpAdapter::input_map_ +#define EMPTY_INPUT_MAP std::unordered_map() +#define INPUT_DESC(name) \ + { \ +#name, \ + [](const OperatorPtr op, const OperatorPtr input) { \ + auto p = std::static_pointer_cast(op); \ + (void)p->set_input_##name(*input); \ + }, \ + [](const OperatorPtr op, const OutHandler& handle) { \ + auto p = std::static_pointer_cast(op); \ + (void)p->set_input_##name(*(handle.op), handle.out); \ + }, \ + [](const OperatorPtr op, const GeTensorDesc desc) { \ + auto p = std::static_pointer_cast(op); \ + (void)p->update_input_desc_##name(desc); \ + } \ + } + +#define DYN_INPUT_MAP(T) \ + template <> \ + const std::unordered_map OpAdapter::dyn_input_map_ +#define DYN_INPUT_DESC(name) \ + { \ +#name, \ + [](const OperatorPtr op, unsigned int num) { \ + auto p = std::static_pointer_cast(op); \ + (void)p->create_dynamic_input_##name(num); \ + }, \ + [](const OperatorPtr op, unsigned int index, const OperatorPtr input) { \ + auto p = std::static_pointer_cast(op); \ + (void)p->set_dynamic_input_##name(index, *input); \ + }, \ + [](const OperatorPtr op, unsigned int index, const OutHandler& handle) { \ + auto p = std::static_pointer_cast(op); \ + (void)p->set_dynamic_input_##name(index, *(handle.op), handle.out); \ + } \ + } + +#define DYN_SUBGRAPH_MAP(T) \ + template <> \ + const std::unordered_map OpAdapter::dyn_subgraph_map_ +#define DYN_SUBGRAPH_DESC(name) \ + { \ +#name, \ + [](const OperatorPtr op, unsigned int num) { \ + auto p = std::static_pointer_cast(op); \ + (void)p->create_dynamic_subgraph_##name(num); \ + }, \ + [](const OperatorPtr op, unsigned int index, const DfGraphPtr graph) { \ + auto p = std::static_pointer_cast(op); \ + (void)p->set_dynamic_subgraph_builder_##name(index, [graph](){return *graph;}); \ + } \ + } + +#define ATTR_MAP(T) \ + template <> \ + const std::unordered_map OpAdapter::attr_map_ +#define EMPTY_ATTR_MAP std::unordered_map() +#define ATTR_DESC(name, ...) \ + { \ +#name, \ + [](const OperatorPtr op, const ValuePtr& value) { \ + auto p = std::static_pointer_cast(op); \ + (void)p->set_attr_##name(ConvertAny(value, __VA_ARGS__)); \ + } \ + } + +#define INPUT_ATTR_MAP(T) \ + template <> \ + const std::unordered_map OpAdapter::input_attr_map_ + +#define OUTPUT_MAP(T) \ + template <> \ + const std::unordered_map OpAdapter::output_map_ +#define OUTPUT_DESC(name) \ + { \ +#name, \ + [](const OperatorPtr op, const GeTensorDesc desc) { \ + auto p = std::static_pointer_cast(op); \ + (void)p->update_output_desc_##name(desc); \ + } \ + } + +#define DYN_OUTPUT_MAP(T) \ + template <> \ + const std::unordered_map OpAdapter::dyn_output_map_ + +#define DYN_OUTPUT_DESC(name) \ + { \ +#name, \ + [](const OperatorPtr op, unsigned int num) { \ + auto p = std::static_pointer_cast(op); \ + (void)p->create_dynamic_output_##name(num); \ + } \ + } + +#define ADPT_DESC_ONE(T) std::make_shared(std::make_shared>()) +#define ADPT_DESC_TWO(T, I) \ + std::make_shared(std::make_shared>(), std::make_shared>()) +#define GET_MACRO(_1, _2, DESC, ...) DESC +#define ADPT_DESC(...) GET_MACRO(__VA_ARGS__, ADPT_DESC_TWO, ADPT_DESC_ONE, ...)(__VA_ARGS__) +#define REG_ADPT_DESC(name, name_str, adpt_desc) \ + static struct RegAdptDesc##name { \ + public: \ + RegAdptDesc##name() { OpAdapterMap::get()[name_str] = adpt_desc; } \ + } g_reg_adpt_desc_##name; +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_MACRO_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/pad_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/pad_ops_declare.cc new file mode 100644 index 00000000000..dcbcd6e739d --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/pad_ops_declare.cc @@ -0,0 +1,32 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/pad_ops_declare.h" +#include + +namespace mindspore::transform { +// PadD +INPUT_MAP(PadD) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(PadD) = {{"paddings", ATTR_DESC(paddings, AnyTraits>>())}}; +OUTPUT_MAP(PadD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(PadD, kNamePadD, ADPT_DESC(PadD)) + +// Diag +INPUT_MAP(Diag) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Diag) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Diag) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Diag, kNameDiag, ADPT_DESC(Diag)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/pad_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/pad_ops_declare.h new file mode 100644 index 00000000000..43420d6bb9c --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/pad_ops_declare.h @@ -0,0 +1,32 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_PAD_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_PAD_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/pad_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(PadD) +DECLARE_OP_USE_OUTPUT(PadD) + +DECLARE_OP_ADAPTER(Diag) +DECLARE_OP_USE_OUTPUT(Diag) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_PAD_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/quantize_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/quantize_ops_declare.cc new file mode 100644 index 00000000000..864f4de7e83 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/quantize_ops_declare.cc @@ -0,0 +1,36 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/quantize_ops_declare.h" + +namespace mindspore::transform { +// AscendQuant +INPUT_MAP(AscendQuant) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(AscendQuant) = {{"scale", ATTR_DESC(scale, AnyTraits())}, + {"offset", ATTR_DESC(offset, AnyTraits())}, + {"sqrt_mode", ATTR_DESC(sqrt_mode, AnyTraits())}, + {"round_mode", ATTR_DESC(round_mode, AnyTraits())}}; +OUTPUT_MAP(AscendQuant) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(AscendQuant, kNameAscendQuant, ADPT_DESC(AscendQuant)) + +// AscendDequant +INPUT_MAP(AscendDequant) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(deq_scale)}}; +ATTR_MAP(AscendDequant) = {{"sqrt_mode", ATTR_DESC(sqrt_mode, AnyTraits())}, + {"relu_flag", ATTR_DESC(relu_flag, AnyTraits())}, + {"dtype", ATTR_DESC(dtype, AnyTraits())}}; +OUTPUT_MAP(AscendDequant) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(AscendDequant, kNameAscendDequant, ADPT_DESC(AscendDequant)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/quantize_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/quantize_ops_declare.h new file mode 100644 index 00000000000..60cb9a84c86 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/quantize_ops_declare.h @@ -0,0 +1,32 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_QUANTIZE_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_QUANTIZE_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/quantize_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(AscendQuant) +DECLARE_OP_USE_OUTPUT(AscendQuant) + +DECLARE_OP_ADAPTER(AscendDequant) +DECLARE_OP_USE_OUTPUT(AscendDequant) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_QUANTIZE_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/random_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/random_ops_declare.cc new file mode 100644 index 00000000000..a407d8c66a2 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/random_ops_declare.cc @@ -0,0 +1,41 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/random_ops_declare.h" + +namespace mindspore::transform { +// DropOutGenMask +INPUT_MAP(DropOutGenMask) = {{1, INPUT_DESC(shape)}, {2, INPUT_DESC(prob)}}; +ATTR_MAP(DropOutGenMask) = {{"Seed0", ATTR_DESC(seed, AnyTraits())}, + {"Seed1", ATTR_DESC(seed2, AnyTraits())}}; +OUTPUT_MAP(DropOutGenMask) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(DropOutGenMask, prim::kPrimDropoutGenMask->name(), ADPT_DESC(DropOutGenMask)) + +// RandomChoiceWithMask +INPUT_MAP(RandomChoiceWithMask) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(RandomChoiceWithMask) = {{"count", ATTR_DESC(count, AnyTraits())}, + {"seed", ATTR_DESC(seed, AnyTraits())}, + {"seed2", ATTR_DESC(seed2, AnyTraits())}}; +OUTPUT_MAP(RandomChoiceWithMask) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(mask)}}; +REG_ADPT_DESC(RandomChoiceWithMask, kNameRandomChoiceWithMask, ADPT_DESC(RandomChoiceWithMask)) + +// TruncatedNormal +INPUT_MAP(TruncatedNormal) = {{1, INPUT_DESC(shape)}}; +ATTR_MAP(TruncatedNormal) = {{"seed", ATTR_DESC(seed, AnyTraits())}, + {"seed2", ATTR_DESC(seed2, AnyTraits())}}; +OUTPUT_MAP(TruncatedNormal) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(TruncatedNormal, kNameTruncatedNormal, ADPT_DESC(TruncatedNormal)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/random_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/random_ops_declare.h new file mode 100644 index 00000000000..65f3f6b2806 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/random_ops_declare.h @@ -0,0 +1,35 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_RANDOM_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_RANDOM_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/random_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(DropOutGenMask) +DECLARE_OP_USE_OUTPUT(DropOutGenMask) + +DECLARE_OP_ADAPTER(RandomChoiceWithMask) +DECLARE_OP_USE_OUTPUT(RandomChoiceWithMask) + +DECLARE_OP_ADAPTER(TruncatedNormal) +DECLARE_OP_USE_OUTPUT(TruncatedNormal) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_RANDOM_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.cc new file mode 100644 index 00000000000..e641b5d4650 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.cc @@ -0,0 +1,68 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/reduce_ops_declare.h" +#include + +namespace mindspore::transform { +// ReduceSumD +INPUT_MAP(ReduceSumD) = {{1, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(ReduceSumD) = { + {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; +ATTR_MAP(ReduceSumD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; +OUTPUT_MAP(ReduceSumD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ReduceSumD, prim::kPrimReduceSum->name(), ADPT_DESC(ReduceSumD)) + +// ReduceProdD +INPUT_MAP(ReduceProdD) = {{1, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(ReduceProdD) = { + {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; +ATTR_MAP(ReduceProdD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; +OUTPUT_MAP(ReduceProdD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ReduceProdD, kNameReduceProd, ADPT_DESC(ReduceProdD)) + +// ReduceAllD +INPUT_MAP(ReduceAllD) = {{1, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(ReduceAllD) = { + {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; +ATTR_MAP(ReduceAllD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; +OUTPUT_MAP(ReduceAllD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ReduceAllD, prim::kPrimReduceAll->name(), ADPT_DESC(ReduceAllD)) + +// ReduceMeanD +INPUT_MAP(ReduceMeanD) = {{1, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(ReduceMeanD) = { + {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; +ATTR_MAP(ReduceMeanD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; +OUTPUT_MAP(ReduceMeanD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ReduceMeanD, prim::kPrimReduceMean->name(), ADPT_DESC(ReduceMeanD)) + +// ReduceMinD +INPUT_MAP(ReduceMinD) = {{1, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(ReduceMinD) = { + {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; +ATTR_MAP(ReduceMinD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; +OUTPUT_MAP(ReduceMinD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ReduceMinD, prim::kPrimReduceMin->name(), ADPT_DESC(ReduceMinD)) + +// ReduceMaxD +INPUT_MAP(ReduceMaxD) = {{1, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(ReduceMaxD) = { + {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; +ATTR_MAP(ReduceMaxD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; +OUTPUT_MAP(ReduceMaxD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ReduceMaxD, prim::kPrimReduceMax->name(), ADPT_DESC(ReduceMaxD)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.h new file mode 100644 index 00000000000..6bb0e703ae9 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.h @@ -0,0 +1,52 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_REDUCE_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_REDUCE_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/reduce_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(ReduceMean) + +DECLARE_OP_ADAPTER(ReduceMinD) +DECLARE_OP_USE_INPUT_ATTR(ReduceMinD) +DECLARE_OP_USE_OUTPUT(ReduceMinD) + +DECLARE_OP_ADAPTER(ReduceMaxD) +DECLARE_OP_USE_INPUT_ATTR(ReduceMaxD) +DECLARE_OP_USE_OUTPUT(ReduceMaxD) + +DECLARE_OP_ADAPTER(ReduceAllD) +DECLARE_OP_USE_INPUT_ATTR(ReduceAllD) +DECLARE_OP_USE_OUTPUT(ReduceAllD) + +DECLARE_OP_ADAPTER(ReduceSumD) +DECLARE_OP_USE_INPUT_ATTR(ReduceSumD) +DECLARE_OP_USE_OUTPUT(ReduceSumD) + +DECLARE_OP_ADAPTER(ReduceMeanD) +DECLARE_OP_USE_INPUT_ATTR(ReduceMeanD) +DECLARE_OP_USE_OUTPUT(ReduceMeanD) + +DECLARE_OP_ADAPTER(ReduceProdD) +DECLARE_OP_USE_INPUT_ATTR(ReduceProdD) +DECLARE_OP_USE_OUTPUT(ReduceProdD) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_REDUCE_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.cc new file mode 100644 index 00000000000..a6275f8df52 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.cc @@ -0,0 +1,51 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/rnn_declare.h" + +namespace mindspore::transform { +// BasicLSTMCell +INPUT_MAP(BasicLSTMCell) = { + {1, INPUT_DESC(x)}, {2, INPUT_DESC(h)}, {3, INPUT_DESC(c)}, {4, INPUT_DESC(w)}, {5, INPUT_DESC(b)}}; +ATTR_MAP(BasicLSTMCell) = {{"keep_prob", ATTR_DESC(keep_prob, AnyTraits())}, + {"forget_bias", ATTR_DESC(forget_bias, AnyTraits())}, + {"state_is_tuple", ATTR_DESC(state_is_tuple, AnyTraits())}, + {"activation", ATTR_DESC(activation, AnyTraits())}}; +OUTPUT_MAP(BasicLSTMCell) = {{0, OUTPUT_DESC(ct)}, {1, OUTPUT_DESC(ht)}, {2, OUTPUT_DESC(it)}, {3, OUTPUT_DESC(jt)}, + {4, OUTPUT_DESC(ft)}, {5, OUTPUT_DESC(ot)}, {7, OUTPUT_DESC(tanhct)}}; +REG_ADPT_DESC(BasicLSTMCell, kNameBasicLSTMCell, ADPT_DESC(BasicLSTMCell)) + +// BasicLSTMCellInputGrad +INPUT_MAP(BasicLSTMCellInputGrad) = {{1, INPUT_DESC(dgate)}, {2, INPUT_DESC(w)}}; +ATTR_MAP(BasicLSTMCellInputGrad) = {{"keep_prob", ATTR_DESC(keep_prob, AnyTraits())}}; +OUTPUT_MAP(BasicLSTMCellInputGrad) = {{0, OUTPUT_DESC(dxt)}, {1, OUTPUT_DESC(dht)}}; +REG_ADPT_DESC(BasicLSTMCellInputGrad, kNameBasicLSTMCellInputGrad, ADPT_DESC(BasicLSTMCellInputGrad)) + +// BasicLSTMCellWeightGrad +INPUT_MAP(BasicLSTMCellWeightGrad) = {{1, INPUT_DESC(h)}, {2, INPUT_DESC(x)}, {3, INPUT_DESC(dgate)}}; +ATTR_MAP(BasicLSTMCellWeightGrad) = EMPTY_ATTR_MAP; +OUTPUT_MAP(BasicLSTMCellWeightGrad) = {{0, OUTPUT_DESC(dw)}, {1, OUTPUT_DESC(db)}}; +REG_ADPT_DESC(BasicLSTMCellWeightGrad, kNameBasicLSTMCellWeightGrad, ADPT_DESC(BasicLSTMCellWeightGrad)) + +// BasicLSTMCellCStateGrad +INPUT_MAP(BasicLSTMCellCStateGrad) = {{1, INPUT_DESC(c)}, {2, INPUT_DESC(dht)}, {3, INPUT_DESC(dct)}, + {4, INPUT_DESC(it)}, {5, INPUT_DESC(jt)}, {6, INPUT_DESC(ft)}, + {7, INPUT_DESC(ot)}, {8, INPUT_DESC(tanhct)}}; +ATTR_MAP(BasicLSTMCellCStateGrad) = {{"forget_bias", ATTR_DESC(forget_bias, AnyTraits())}, + {"activation", ATTR_DESC(activation, AnyTraits())}}; +OUTPUT_MAP(BasicLSTMCellCStateGrad) = {{0, OUTPUT_DESC(dgate)}, {1, OUTPUT_DESC(dct_1)}}; +REG_ADPT_DESC(BasicLSTMCellCStateGrad, kNameBasicLSTMCellCStateGrad, ADPT_DESC(BasicLSTMCellCStateGrad)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.h new file mode 100644 index 00000000000..247c6a1a144 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.h @@ -0,0 +1,38 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_RNN_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_RNN_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/rnn.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(BasicLSTMCell) +DECLARE_OP_USE_OUTPUT(BasicLSTMCell) + +DECLARE_OP_ADAPTER(BasicLSTMCellInputGrad) +DECLARE_OP_USE_OUTPUT(BasicLSTMCellInputGrad) + +DECLARE_OP_ADAPTER(BasicLSTMCellWeightGrad) +DECLARE_OP_USE_OUTPUT(BasicLSTMCellWeightGrad) + +DECLARE_OP_ADAPTER(BasicLSTMCellCStateGrad) +DECLARE_OP_USE_OUTPUT(BasicLSTMCellCStateGrad) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_RNN_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/rpn_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/rpn_ops_declare.cc new file mode 100644 index 00000000000..00f1eacf4d2 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/rpn_ops_declare.cc @@ -0,0 +1,26 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/rpn_ops_declare.h" + +namespace mindspore::transform { +// NMSWithMask +INPUT_MAP(NMSWithMask) = {{1, INPUT_DESC(box_scores)}}; +ATTR_MAP(NMSWithMask) = {{"iou_threshold", ATTR_DESC(iou_threshold, AnyTraits())}}; +OUTPUT_MAP(NMSWithMask) = { + {0, OUTPUT_DESC(selected_boxes)}, {1, OUTPUT_DESC(selected_idx)}, {2, OUTPUT_DESC(selected_mask)}}; +REG_ADPT_DESC(NMSWithMask, kNameNMSWithMask, ADPT_DESC(NMSWithMask)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/rpn_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/rpn_ops_declare.h new file mode 100644 index 00000000000..97928a4cad4 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/rpn_ops_declare.h @@ -0,0 +1,29 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_RPN_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_RPN_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/rpn_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(NMSWithMask) +DECLARE_OP_USE_OUTPUT(NMSWithMask) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_RPN_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.cc new file mode 100644 index 00000000000..27dfe612b65 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.cc @@ -0,0 +1,136 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/selection_ops_declare.h" +#include + +namespace mindspore::transform { +// CumsumD +INPUT_MAP(CumsumD) = {{1, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(CumsumD) = {{2, ATTR_DESC(axis, AnyTraits())}}; +ATTR_MAP(CumsumD) = {{"exclusive", ATTR_DESC(exclusive, AnyTraits())}, + {"reverse", ATTR_DESC(reverse, AnyTraits())}}; +OUTPUT_MAP(CumsumD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(CumsumD, kNameCumSum, ADPT_DESC(CumsumD)) + +// GatherV2 +INPUT_MAP(GatherV2) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(axis)}}; +ATTR_MAP(GatherV2) = EMPTY_ATTR_MAP; +OUTPUT_MAP(GatherV2) = {{0, OUTPUT_DESC(y)}}; + +// CumprodD +INPUT_MAP(CumprodD) = {{1, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(CumprodD) = {{2, ATTR_DESC(axis, AnyTraits())}}; +ATTR_MAP(CumprodD) = {{"exclusive", ATTR_DESC(exclusive, AnyTraits())}, + {"reverse", ATTR_DESC(reverse, AnyTraits())}}; +OUTPUT_MAP(CumprodD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(CumprodD, kNameCumProd, ADPT_DESC(CumprodD)) + +INPUT_MAP(SliceD) = {{1, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(SliceD) = {{2, ATTR_DESC(offsets, AnyTraits(), AnyTraits>())}, + {3, ATTR_DESC(size, AnyTraits(), AnyTraits>())}}; +ATTR_MAP(SliceD) = EMPTY_ATTR_MAP; +OUTPUT_MAP(SliceD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(SliceD, kNameSlice, ADPT_DESC(SliceD)) + +// TopK +INPUT_MAP(TopK) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(k)}}; +ATTR_MAP(TopK) = {{"sorted", ATTR_DESC(sorted, AnyTraits())}}; +OUTPUT_MAP(TopK) = {{0, OUTPUT_DESC(values)}, {1, OUTPUT_DESC(indices)}}; +REG_ADPT_DESC(TopK, kNameTopK, ADPT_DESC(TopK)) + +// TileD +INPUT_MAP(TileD) = {{1, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(TileD) = {{2, ATTR_DESC(multiples, AnyTraits(), AnyTraits>())}}; +ATTR_MAP(TileD) = EMPTY_ATTR_MAP; +OUTPUT_MAP(TileD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(TileD, kNameTile, ADPT_DESC(TileD)) + +// OneHot +INPUT_MAP(OneHot) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(depth)}, {3, INPUT_DESC(on_value)}, {4, INPUT_DESC(off_value)}}; +ATTR_MAP(OneHot) = {{"axis", ATTR_DESC(axis, AnyTraits())}}; +OUTPUT_MAP(OneHot) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(OneHot, prim::kPrimOneHot->name(), ADPT_DESC(OneHot)) + +// GatherV2D +INPUT_MAP(GatherV2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}}; +INPUT_ATTR_MAP(GatherV2D) = {{3, ATTR_DESC(axis, AnyTraits())}}; +ATTR_MAP(GatherV2D) = EMPTY_ATTR_MAP; +OUTPUT_MAP(GatherV2D) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(GatherV2D, prim::kPrimGatherV2->name(), ADPT_DESC(GatherV2D)) + +// ScatterNdD +INPUT_MAP(ScatterNdD) = {{1, INPUT_DESC(indices)}, {2, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(ScatterNdD) = { + {3, ATTR_DESC(shape, AnyTraits>(), AnyTraits>())}}; +ATTR_MAP(ScatterNdD) = EMPTY_ATTR_MAP; +OUTPUT_MAP(ScatterNdD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ScatterNdD, kNameScatterNdD, ADPT_DESC(ScatterNdD)) + +// GatherNd +INPUT_MAP(GatherNd) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}}; +ATTR_MAP(GatherNd) = EMPTY_ATTR_MAP; +OUTPUT_MAP(GatherNd) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(GatherNd, kNameGatherNd, ADPT_DESC(GatherNd)) + +// Range +INPUT_MAP(RangeD) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(RangeD) = {{"start", ATTR_DESC(start, AnyTraits())}, + {"limit", ATTR_DESC(limit, AnyTraits())}, + {"delta", ATTR_DESC(delta, AnyTraits())}}; +OUTPUT_MAP(RangeD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(RangeD, kNameRange, ADPT_DESC(RangeD)) + +// Select +INPUT_MAP(Select) = {{1, INPUT_DESC(condition)}, {2, INPUT_DESC(x1)}, {3, INPUT_DESC(x2)}}; +ATTR_MAP(Select) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Select) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Select, prim::kPrimSelect->name(), ADPT_DESC(Select)) + +// StridedSliceGrad +INPUT_MAP(StridedSliceGrad) = { + {1, INPUT_DESC(dy)}, {2, INPUT_DESC(shape)}, {3, INPUT_DESC(begin)}, {4, INPUT_DESC(end)}, {5, INPUT_DESC(strides)}}; +ATTR_MAP(StridedSliceGrad) = {{"begin_mask", ATTR_DESC(begin_mask, AnyTraits())}, + {"end_mask", ATTR_DESC(end_mask, AnyTraits())}, + {"ellipsis_mask", ATTR_DESC(ellipsis_mask, AnyTraits())}, + {"new_axis_mask", ATTR_DESC(new_axis_mask, AnyTraits())}, + {"shrink_axis_mask", ATTR_DESC(shrink_axis_mask, AnyTraits())}}; +OUTPUT_MAP(StridedSliceGrad) = {{0, OUTPUT_DESC(output)}}; +REG_ADPT_DESC(StridedSliceGrad, kNameStridedSliceGrad, ADPT_DESC(StridedSliceGrad)) + +// StridedSlice +INPUT_MAP(StridedSlice) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(begin)}, {3, INPUT_DESC(end)}, {4, INPUT_DESC(strides)}}; +ATTR_MAP(StridedSlice) = {{"begin_mask", ATTR_DESC(begin_mask, AnyTraits())}, + {"end_mask", ATTR_DESC(end_mask, AnyTraits())}, + {"ellipsis_mask", ATTR_DESC(ellipsis_mask, AnyTraits())}, + {"new_axis_mask", ATTR_DESC(new_axis_mask, AnyTraits())}, + {"shrink_axis_mask", ATTR_DESC(shrink_axis_mask, AnyTraits())}}; +OUTPUT_MAP(StridedSlice) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(StridedSlice, kNameStridedSlice, ADPT_DESC(StridedSlice)) + +// UnsortedSegmentSum +INPUT_MAP(UnsortedSegmentSumD) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(segment_ids)}}; +INPUT_ATTR_MAP(UnsortedSegmentSumD) = {{3, ATTR_DESC(num_segments, AnyTraits())}}; +ATTR_MAP(UnsortedSegmentSumD) = EMPTY_ATTR_MAP; +OUTPUT_MAP(UnsortedSegmentSumD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(UnsortedSegmentSumD, prim::kPrimUnsortedSegmentSum->name(), ADPT_DESC(UnsortedSegmentSumD)) + +// UnsortedSegmentMin +INPUT_MAP(UnsortedSegmentMin) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(segment_ids)}, {3, INPUT_DESC(num_segments)}}; +ATTR_MAP(UnsortedSegmentMin) = EMPTY_ATTR_MAP; +OUTPUT_MAP(UnsortedSegmentMin) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(UnsortedSegmentMin, prim::kPrimUnsortedSegmentMin->name(), ADPT_DESC(UnsortedSegmentMin)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.h new file mode 100644 index 00000000000..c2b30977380 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.h @@ -0,0 +1,81 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_SELECTION_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_SELECTION_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/selection_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(SliceD) +DECLARE_OP_USE_INPUT_ATTR(SliceD) +DECLARE_OP_USE_OUTPUT(SliceD) + +DECLARE_OP_ADAPTER(ScatterNdD) +DECLARE_OP_USE_INPUT_ATTR(ScatterNdD) +DECLARE_OP_USE_OUTPUT(ScatterNdD) + +DECLARE_OP_ADAPTER(GatherNd) +DECLARE_OP_USE_OUTPUT(GatherNd) + +DECLARE_OP_ADAPTER(TopK) +DECLARE_OP_USE_OUTPUT(TopK) + +DECLARE_OP_ADAPTER(Select) +DECLARE_OP_USE_OUTPUT(Select) + +DECLARE_OP_ADAPTER(StridedSliceGrad) +DECLARE_OP_USE_OUTPUT(StridedSliceGrad) + +DECLARE_OP_ADAPTER(StridedSlice) +DECLARE_OP_USE_OUTPUT(StridedSlice) + +DECLARE_OP_ADAPTER(UnsortedSegmentSumD) +DECLARE_OP_USE_INPUT_ATTR(UnsortedSegmentSumD) +DECLARE_OP_USE_OUTPUT(UnsortedSegmentSumD) + +DECLARE_OP_ADAPTER(UnsortedSegmentMin) +DECLARE_OP_USE_OUTPUT(UnsortedSegmentMin) + +DECLARE_OP_ADAPTER(CumprodD) +DECLARE_OP_USE_INPUT_ATTR(CumprodD) +DECLARE_OP_USE_OUTPUT(CumprodD) + +DECLARE_OP_ADAPTER(TileD) +DECLARE_OP_USE_INPUT_ATTR(TileD) +DECLARE_OP_USE_OUTPUT(TileD) + +DECLARE_OP_ADAPTER(OneHot) +DECLARE_OP_USE_OUTPUT(OneHot) + +DECLARE_OP_ADAPTER(GatherV2D) +DECLARE_OP_USE_INPUT_ATTR(GatherV2D) +DECLARE_OP_USE_OUTPUT(GatherV2D) + +DECLARE_OP_ADAPTER(RangeD) +DECLARE_OP_USE_OUTPUT(RangeD) + +DECLARE_OP_ADAPTER(CumsumD) +DECLARE_OP_USE_INPUT_ATTR(CumsumD) +DECLARE_OP_USE_OUTPUT(CumsumD) + +DECLARE_OP_ADAPTER(GatherV2) +DECLARE_OP_USE_OUTPUT(GatherV2) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_SELECTION_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/split_combination_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/split_combination_ops_declare.cc new file mode 100644 index 00000000000..61a778ac24e --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/split_combination_ops_declare.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/split_combination_ops_declare.h" + +namespace mindspore::transform { +// SplitD +INPUT_MAP(SplitD) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(SplitD) = {{"axis", ATTR_DESC(split_dim, AnyTraits())}, + {"output_num", ATTR_DESC(num_split, AnyTraits())}}; +DYN_OUTPUT_MAP(SplitD) = {{0, DYN_OUTPUT_DESC(y)}}; +REG_ADPT_DESC(SplitD, kNameSplitD, ADPT_DESC(SplitD)) + +// Pack +INPUT_MAP(Pack) = EMPTY_INPUT_MAP; +DYN_INPUT_MAP(Pack) = {{1, DYN_INPUT_DESC(x)}}; +ATTR_MAP(Pack) = {{"num", ATTR_DESC(N, AnyTraits())}, {"axis", ATTR_DESC(axis, AnyTraits())}}; +OUTPUT_MAP(Pack) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Pack, kNamePack, ADPT_DESC(Pack)) + +// ConcatD +INPUT_MAP(ConcatD) = EMPTY_INPUT_MAP; +DYN_INPUT_MAP(ConcatD) = {{1, DYN_INPUT_DESC(x)}}; +ATTR_MAP(ConcatD) = { + {"axis", ATTR_DESC(concat_dim, AnyTraits())}, + {"inputNums", ATTR_DESC(N, AnyTraits())}, +}; +OUTPUT_MAP(ConcatD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ConcatD, prim::kPrimConcat->name(), ADPT_DESC(ConcatD)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/split_combination_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/split_combination_ops_declare.h new file mode 100644 index 00000000000..79cad8093b7 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/split_combination_ops_declare.h @@ -0,0 +1,37 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_SPLIT_COMBINATION_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_SPLIT_COMBINATION_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/split_combination_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(SplitD) +DECLARE_OP_USE_DYN_OUTPUT(SplitD) + +DECLARE_OP_ADAPTER(ConcatD) +DECLARE_OP_USE_DYN_INPUT(ConcatD) +DECLARE_OP_USE_OUTPUT(ConcatD) + +DECLARE_OP_ADAPTER(Pack) +DECLARE_OP_USE_DYN_INPUT(Pack) +DECLARE_OP_USE_OUTPUT(Pack) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_SPLIT_COMBINATION_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/state_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/state_ops_declare.cc new file mode 100644 index 00000000000..3286fcb0d16 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/state_ops_declare.cc @@ -0,0 +1,23 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/state_ops_declare.h" + +namespace mindspore::transform { +// Variable +INPUT_MAP(Variable) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Variable) = EMPTY_ATTR_MAP; +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/state_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/state_ops_declare.h new file mode 100644 index 00000000000..6c7c0c639f0 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/state_ops_declare.h @@ -0,0 +1,28 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_STATE_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_STATE_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/state_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(Variable) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_STATE_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.cc new file mode 100644 index 00000000000..81fd2cd4e0e --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.cc @@ -0,0 +1,76 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "transform/graph_ir/op_declare/transformation_ops_declare.h" +#include + +namespace mindspore::transform { +// Flatten +INPUT_MAP(Flatten) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Flatten) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Flatten) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Flatten, prim::kPrimFlatten->name(), ADPT_DESC(Flatten)) + +// Unpack +INPUT_MAP(Unpack) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Unpack) = {{"axis", ATTR_DESC(axis, AnyTraits())}, {"num", ATTR_DESC(num, AnyTraits())}}; +DYN_OUTPUT_MAP(Unpack) = {{0, DYN_OUTPUT_DESC(y)}}; +REG_ADPT_DESC(Unpack, kNameUnpack, ADPT_DESC(Unpack)) + +// ExtractImagePatches +INPUT_MAP(ExtractImagePatches) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(ExtractImagePatches) = {{"ksizes", ATTR_DESC(ksizes, AnyTraits(), AnyTraits>())}, + {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, + {"rates", ATTR_DESC(rates, AnyTraits(), AnyTraits>())}, + {"padding", ATTR_DESC(padding, AnyTraits())}}; +OUTPUT_MAP(ExtractImagePatches) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(ExtractImagePatches, kNameExtractImagePatches, ADPT_DESC(ExtractImagePatches)) + +// Transpose +INPUT_MAP(TransposeD) = {{1, INPUT_DESC(x)}}; +INPUT_ATTR_MAP(TransposeD) = {{2, ATTR_DESC(perm, AnyTraits(), AnyTraits>())}}; +ATTR_MAP(TransposeD) = EMPTY_ATTR_MAP; +// Do not set Transpose operator output descriptor +REG_ADPT_DESC(TransposeD, prim::kPrimTranspose->name(), ADPT_DESC(TransposeD)) + +// SpaceToDepth +INPUT_MAP(SpaceToDepth) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(SpaceToDepth) = {{"block_size", ATTR_DESC(block_size, AnyTraits())}}; +OUTPUT_MAP(SpaceToDepth) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(SpaceToDepth, kNameSpaceToDepth, ADPT_DESC(SpaceToDepth)) + +// DepthToSpace +INPUT_MAP(DepthToSpace) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(DepthToSpace) = {{"block_size", ATTR_DESC(block_size, AnyTraits())}}; +OUTPUT_MAP(DepthToSpace) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(DepthToSpace, kNameDepthToSpace, ADPT_DESC(DepthToSpace)) + +// SpaceToBatchD +INPUT_MAP(SpaceToBatchD) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(SpaceToBatchD) = { + {"block_size", ATTR_DESC(block_size, AnyTraits())}, + {"paddings", ATTR_DESC(paddings, AnyTraits>>(), AnyTraits>())}}; +OUTPUT_MAP(SpaceToBatchD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(SpaceToBatchD, kNameSpaceToBatch, ADPT_DESC(SpaceToBatchD)) + +// BatchToSpaceD +INPUT_MAP(BatchToSpaceD) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(BatchToSpaceD) = { + {"block_size", ATTR_DESC(block_size, AnyTraits())}, + {"crops", ATTR_DESC(crops, AnyTraits>>(), AnyTraits>())}}; +OUTPUT_MAP(BatchToSpaceD) = {{0, OUTPUT_DESC(y)}}; +REG_ADPT_DESC(BatchToSpaceD, kNameBatchToSpace, ADPT_DESC(BatchToSpaceD)) +} // namespace mindspore::transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.h new file mode 100644 index 00000000000..ea2571c4258 --- /dev/null +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.h @@ -0,0 +1,50 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_TRANSFORMATION_OPS_DECLARE_H_ +#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_TRANSFORMATION_OPS_DECLARE_H_ + +#include +#include +#include "transform/graph_ir/op_declare/op_declare_macro.h" +#include "ops/transformation_ops.h" + +namespace mindspore::transform { +DECLARE_OP_ADAPTER(ExtractImagePatches) +DECLARE_OP_USE_OUTPUT(ExtractImagePatches) + +DECLARE_OP_ADAPTER(Unpack) +DECLARE_OP_USE_DYN_OUTPUT(Unpack) + +DECLARE_OP_ADAPTER(TransposeD) +DECLARE_OP_USE_INPUT_ATTR(TransposeD) + +DECLARE_OP_ADAPTER(Flatten) +DECLARE_OP_USE_OUTPUT(Flatten) + +DECLARE_OP_ADAPTER(SpaceToDepth) +DECLARE_OP_USE_OUTPUT(SpaceToDepth) + +DECLARE_OP_ADAPTER(DepthToSpace) +DECLARE_OP_USE_OUTPUT(DepthToSpace) + +DECLARE_OP_ADAPTER(SpaceToBatchD) +DECLARE_OP_USE_OUTPUT(SpaceToBatchD) + +DECLARE_OP_ADAPTER(BatchToSpaceD) +DECLARE_OP_USE_OUTPUT(BatchToSpaceD) +} // namespace mindspore::transform +#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_TRANSFORMATION_OPS_DECLARE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/util.cc b/mindspore/ccsrc/transform/graph_ir/util.cc index 6ae665d69ff..4c653b3c80d 100644 --- a/mindspore/ccsrc/transform/graph_ir/util.cc +++ b/mindspore/ccsrc/transform/graph_ir/util.cc @@ -17,7 +17,6 @@ #include "transform/graph_ir/util.h" #include -#include #include #include "securec/include/securec.h" diff --git a/mindspore/ccsrc/transform/onnx/ir_exporter.cc b/mindspore/ccsrc/transform/onnx/ir_exporter.cc index f1967a7abf5..337cf2f3d88 100644 --- a/mindspore/ccsrc/transform/onnx/ir_exporter.cc +++ b/mindspore/ccsrc/transform/onnx/ir_exporter.cc @@ -14,11 +14,9 @@ * limitations under the License. */ -#include #include #include #include -#include #include #include #include diff --git a/mindspore/ccsrc/transform/onnx/onnx_exporter.cc b/mindspore/ccsrc/transform/onnx/onnx_exporter.cc index 041c2e69e8c..629c03849fc 100644 --- a/mindspore/ccsrc/transform/onnx/onnx_exporter.cc +++ b/mindspore/ccsrc/transform/onnx/onnx_exporter.cc @@ -14,13 +14,10 @@ * limitations under the License. */ -#include #include #include #include -#include #include -#include #include #include "debug/anf_ir_utils.h" diff --git a/mindspore/ccsrc/transform/op_declare.cc b/mindspore/ccsrc/transform/op_declare.cc deleted file mode 100644 index fc0ac095abd..00000000000 --- a/mindspore/ccsrc/transform/op_declare.cc +++ /dev/null @@ -1,1295 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "transform/op_declare.h" - -#include - -#include "transform/all_ops.h" -#include "utils/utils.h" - -namespace mindspore { -namespace transform { -#define INPUT_MAP(T) \ - template <> \ - const std::unordered_map OpAdapter::input_map_ -#define EMPTY_INPUT_MAP std::unordered_map() -#define INPUT_DESC(name) \ - { \ -#name, \ - [](const OperatorPtr op, const OperatorPtr input) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->set_input_##name(*input); \ - }, \ - [](const OperatorPtr op, const OutHandler& handle) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->set_input_##name(*(handle.op), handle.out); \ - }, \ - [](const OperatorPtr op, const GeTensorDesc desc) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->update_input_desc_##name(desc); \ - } \ - } - -#define DYN_INPUT_MAP(T) \ - template <> \ - const std::unordered_map OpAdapter::dyn_input_map_ -#define DYN_INPUT_DESC(name) \ - { \ -#name, \ - [](const OperatorPtr op, unsigned int num) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->create_dynamic_input_##name(num); \ - }, \ - [](const OperatorPtr op, unsigned int index, const OperatorPtr input) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->set_dynamic_input_##name(index, *input); \ - }, \ - [](const OperatorPtr op, unsigned int index, const OutHandler& handle) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->set_dynamic_input_##name(index, *(handle.op), handle.out); \ - } \ - } - -#define ATTR_MAP(T) \ - template <> \ - const std::unordered_map OpAdapter::attr_map_ -#define EMPTY_ATTR_MAP std::unordered_map() -#define ATTR_DESC(name, ...) \ - { \ -#name, \ - [](const OperatorPtr op, const ValuePtr& value) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->set_attr_##name(ConvertAny(value, __VA_ARGS__)); \ - } \ - } - -#define INPUT_ATTR_MAP(T) \ - template <> \ - const std::unordered_map OpAdapter::input_attr_map_ - -#define OUTPUT_MAP(T) \ - template <> \ - const std::unordered_map OpAdapter::output_map_ -#define OUTPUT_DESC(name) \ - { \ -#name, \ - [](const OperatorPtr op, const GeTensorDesc desc) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->update_output_desc_##name(desc); \ - } \ - } - -#define DYN_OUTPUT_MAP(T) \ - template <> \ - const std::unordered_map OpAdapter::dyn_output_map_ - -#define DYN_OUTPUT_DESC(name) \ - { \ -#name, \ - [](const OperatorPtr op, unsigned int num) { \ - auto p = std::static_pointer_cast(op); \ - (void)p->create_dynamic_output_##name(num); \ - } \ - } - -template <> -std::unordered_map> OpAdapter::cus_input_map_{}; -template <> -std::unordered_map> OpAdapter::cus_output_map_{}; - -// --------------specialization for each operator---------- -// const -INPUT_MAP(Const) = EMPTY_INPUT_MAP; -ATTR_MAP(Const) = {{"value", ATTR_DESC(value, AnyTraits())}}; -OUTPUT_MAP(Const) = {{0, OUTPUT_DESC(y)}}; - -// Assign -INPUT_MAP(Assign) = {{1, INPUT_DESC(ref)}, {2, INPUT_DESC(value)}}; -ATTR_MAP(Assign) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Assign) = {{0, OUTPUT_DESC(ref)}}; - -// Constant -INPUT_MAP(Constant) = EMPTY_INPUT_MAP; -ATTR_MAP(Constant) = {{"value", ATTR_DESC(value, AnyTraits())}}; -OUTPUT_MAP(Constant) = {{0, OUTPUT_DESC(y)}}; - -// ApplyMomentumD -INPUT_MAP(ApplyMomentumD) = { - {1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INPUT_DESC(lr)}, {4, INPUT_DESC(grad)}, {5, INPUT_DESC(momentum)}}; -ATTR_MAP(ApplyMomentumD) = {{"use_nesterov", ATTR_DESC(use_nesterov, AnyTraits())}, - {"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ApplyMomentumD) = {{0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(accum)}}; - -// ScalarSummary -INPUT_MAP(Summary) = {{2, INPUT_DESC(x)}}; -ATTR_MAP(Summary) = EMPTY_ATTR_MAP; - -// Data -INPUT_MAP(Data) = EMPTY_INPUT_MAP; -ATTR_MAP(Data) = EMPTY_ATTR_MAP; - -// BatchNorm -INPUT_MAP(BatchNorm) = {{1, INPUT_DESC(x)}, - {2, INPUT_DESC(scale)}, - {3, INPUT_DESC(offset)}, - {4, INPUT_DESC(mean)}, - {5, INPUT_DESC(variance)}}; -ATTR_MAP(BatchNorm) = {{"data_format", ATTR_DESC(data_format, AnyTraits())}, - {"epsilon", ATTR_DESC(epsilon, AnyTraits())}, - {"is_training", ATTR_DESC(is_training, AnyTraits())}}; -OUTPUT_MAP(BatchNorm) = {{0, OUTPUT_DESC(y)}, - {1, OUTPUT_DESC(batch_mean)}, - {2, OUTPUT_DESC(batch_variance)}, - {3, OUTPUT_DESC(reserve_space_1)}, - {4, OUTPUT_DESC(reserve_space_2)}}; - -// BatchNormGrad -INPUT_MAP(BatchNormGrad) = {{1, INPUT_DESC(y_backprop)}, - {2, INPUT_DESC(x)}, - {3, INPUT_DESC(scale)}, - {4, INPUT_DESC(reserve_space_1)}, - {5, INPUT_DESC(reserve_space_2)}}; -ATTR_MAP(BatchNormGrad) = {{"data_format", ATTR_DESC(data_format, AnyTraits())}, - {"epsilon", ATTR_DESC(epsilon, AnyTraits())}, - {"is_training", ATTR_DESC(is_training, AnyTraits())}}; -OUTPUT_MAP(BatchNormGrad) = {{0, OUTPUT_DESC(x_backprop)}, - {1, OUTPUT_DESC(scale_backprop)}, - {2, OUTPUT_DESC(offset_backprop)}, - {3, OUTPUT_DESC(reserve_space_4)}, - {4, OUTPUT_DESC(reserve_space_5)}}; - -// Relu -INPUT_MAP(Relu) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Relu) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Relu) = {{0, OUTPUT_DESC(y)}}; - -// Elu -INPUT_MAP(Elu) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Elu) = {{"alpha", ATTR_DESC(alpha, AnyTraits())}}; -OUTPUT_MAP(Elu) = {{0, OUTPUT_DESC(y)}}; - -// EluGrad -INPUT_MAP(EluGrad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(activations)}}; -ATTR_MAP(EluGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(EluGrad) = {{0, OUTPUT_DESC(y)}}; - -// PRelu -INPUT_MAP(PRelu) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(weight)}}; -ATTR_MAP(PRelu) = EMPTY_ATTR_MAP; -OUTPUT_MAP(PRelu) = {{0, OUTPUT_DESC(y)}}; - -// PReluGrad -INPUT_MAP(PReluGrad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(features)}, {3, INPUT_DESC(weights)}}; -ATTR_MAP(PReluGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(PReluGrad) = {{0, OUTPUT_DESC(dx)}, {1, OUTPUT_DESC(da)}}; - -// Sigmoid -INPUT_MAP(Sigmoid) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Sigmoid) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Sigmoid) = {{0, OUTPUT_DESC(y)}}; - -// SigmoidGrad -INPUT_MAP(SigmoidGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; -ATTR_MAP(SigmoidGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(SigmoidGrad) = {{0, OUTPUT_DESC(z)}}; - -// L2NormalizeGrad -INPUT_MAP(L2NormalizeGrad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}, {3, INPUT_DESC(dy)}}; -ATTR_MAP(L2NormalizeGrad) = { - {"axis", ATTR_DESC(dim, AnyTraits>(), AnyTraits>())}, - {"epsilon", ATTR_DESC(eps, AnyTraits())}}; -OUTPUT_MAP(L2NormalizeGrad) = {{0, OUTPUT_DESC(dx)}}; - -// LarsV2Update -INPUT_MAP(LarsV2Update) = {{1, INPUT_DESC(w)}, - {2, INPUT_DESC(g)}, - {3, INPUT_DESC(w_square_sum)}, - {4, INPUT_DESC(g_square_sum)}, - {5, INPUT_DESC(weight_decay)}, - {6, INPUT_DESC(learning_rate)}}; -ATTR_MAP(LarsV2Update) = {{"epsilon", ATTR_DESC(epsilon, AnyTraits())}, - {"hyperpara", ATTR_DESC(hyperpara, AnyTraits())}, - {"use_clip", ATTR_DESC(use_clip, AnyTraits())}}; -OUTPUT_MAP(LarsV2Update) = {{0, OUTPUT_DESC(g_new)}}; - -// L2Normalize -INPUT_MAP(L2Normalize) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(L2Normalize) = { - {"axis", ATTR_DESC(axis, AnyTraits>(), AnyTraits>())}, - {"epsilon", ATTR_DESC(eps, AnyTraits())}}; -OUTPUT_MAP(L2Normalize) = {{0, OUTPUT_DESC(y)}}; - -// CumsumD -INPUT_MAP(CumsumD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(CumsumD) = {{2, ATTR_DESC(axis, AnyTraits())}}; -ATTR_MAP(CumsumD) = {{"exclusive", ATTR_DESC(exclusive, AnyTraits())}, - {"reverse", ATTR_DESC(reverse, AnyTraits())}}; -OUTPUT_MAP(CumsumD) = {{0, OUTPUT_DESC(y)}}; - -// SoftmaxV2 -INPUT_MAP(SoftmaxV2) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(SoftmaxV2) = { - {"axis", ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}, -}; -OUTPUT_MAP(SoftmaxV2) = {{0, OUTPUT_DESC(y)}}; - -// SoftmaxGrad -INPUT_MAP(SoftmaxGrad) = {{1, INPUT_DESC(softmax)}, {2, INPUT_DESC(grad_softmax)}}; -OUTPUT_MAP(SoftmaxGrad) = {{0, OUTPUT_DESC(grad_x)}}; -ATTR_MAP(SoftmaxGrad) = EMPTY_ATTR_MAP; - -// Flatten -INPUT_MAP(Flatten) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Flatten) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Flatten) = {{0, OUTPUT_DESC(y)}}; - -// add -INPUT_MAP(Add) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Add) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Add) = {{0, OUTPUT_DESC(y)}}; - -// GatherV2 -INPUT_MAP(GatherV2) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(axis)}}; -ATTR_MAP(GatherV2) = EMPTY_ATTR_MAP; -OUTPUT_MAP(GatherV2) = {{0, OUTPUT_DESC(y)}}; - -// ReduceSumD -INPUT_MAP(ReduceSumD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ReduceSumD) = { - {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ReduceSumD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ReduceSumD) = {{0, OUTPUT_DESC(y)}}; - -// ReduceProdD -INPUT_MAP(ReduceProdD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ReduceProdD) = { - {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ReduceProdD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ReduceProdD) = {{0, OUTPUT_DESC(y)}}; - -// CumprodD -INPUT_MAP(CumprodD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(CumprodD) = {{2, ATTR_DESC(axis, AnyTraits())}}; -ATTR_MAP(CumprodD) = {{"exclusive", ATTR_DESC(exclusive, AnyTraits())}, - {"reverse", ATTR_DESC(reverse, AnyTraits())}}; -OUTPUT_MAP(CumprodD) = {{0, OUTPUT_DESC(y)}}; - -// SoftmaxCrossEntropyWithLogits -INPUT_MAP(SoftmaxCrossEntropyWithLogits) = {{1, INPUT_DESC(features)}, {2, INPUT_DESC(labels)}}; -ATTR_MAP(SoftmaxCrossEntropyWithLogits) = EMPTY_ATTR_MAP; -OUTPUT_MAP(SoftmaxCrossEntropyWithLogits) = {{0, OUTPUT_DESC(loss)}, {1, OUTPUT_DESC(backprop)}}; - -INPUT_MAP(SliceD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(SliceD) = {{2, ATTR_DESC(offsets, AnyTraits(), AnyTraits>())}, - {3, ATTR_DESC(size, AnyTraits(), AnyTraits>())}}; -ATTR_MAP(SliceD) = EMPTY_ATTR_MAP; -OUTPUT_MAP(SliceD) = {{0, OUTPUT_DESC(y)}}; - -// MaxPool -INPUT_MAP(MaxPool) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(MaxPool) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}}; -OUTPUT_MAP(MaxPool) = {{0, OUTPUT_DESC(y)}}; - -// AvgPool -INPUT_MAP(AvgPool) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(AvgPool) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}}; -OUTPUT_MAP(AvgPool) = {{0, OUTPUT_DESC(y)}}; - -// GreaterEqual -INPUT_MAP(GreaterEqual) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(GreaterEqual) = EMPTY_ATTR_MAP; -OUTPUT_MAP(GreaterEqual) = {{0, OUTPUT_DESC(y)}}; - -// AssignAdd -INPUT_MAP(AssignAdd) = {{1, INPUT_DESC(ref)}, {2, INPUT_DESC(value)}}; -ATTR_MAP(AssignAdd) = EMPTY_ATTR_MAP; -OUTPUT_MAP(AssignAdd) = {{0, OUTPUT_DESC(ref)}}; - -// AssignSub -INPUT_MAP(AssignSub) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(value)}}; -ATTR_MAP(AssignSub) = EMPTY_ATTR_MAP; -OUTPUT_MAP(AssignSub) = {{0, OUTPUT_DESC(var)}}; - -// Cos -INPUT_MAP(Cos) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Cos) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Cos) = {{0, OUTPUT_DESC(y)}}; - -// Acos -INPUT_MAP(Acos) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Acos) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Acos) = {{0, OUTPUT_DESC(y)}}; - -// AcosGrad -INPUT_MAP(AcosGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; -ATTR_MAP(AcosGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(AcosGrad) = {{0, OUTPUT_DESC(z)}}; - -// Acosh -INPUT_MAP(Acosh) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Acosh) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Acosh) = {{0, OUTPUT_DESC(y)}}; - -// AcoshGrad -INPUT_MAP(AcoshGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; -ATTR_MAP(AcoshGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(AcoshGrad) = {{0, OUTPUT_DESC(z)}}; - -// Floor -INPUT_MAP(Floor) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Floor) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Floor) = {{0, OUTPUT_DESC(y)}}; - -// FloorDiv -INPUT_MAP(FloorDiv) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(FloorDiv) = EMPTY_ATTR_MAP; -OUTPUT_MAP(FloorDiv) = {{0, OUTPUT_DESC(y)}}; - -// FloorMod -INPUT_MAP(FloorMod) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(FloorMod) = EMPTY_ATTR_MAP; -OUTPUT_MAP(FloorMod) = {{0, OUTPUT_DESC(y)}}; - -// Sin -INPUT_MAP(Sin) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Sin) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Sin) = {{0, OUTPUT_DESC(y)}}; - -// Exp -INPUT_MAP(Exp) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Exp) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Exp) = {{0, OUTPUT_DESC(y)}}; - -// BoundingBoxEncode -INPUT_MAP(BoundingBoxEncode) = { - {1, INPUT_DESC(anchor_box)}, - {2, INPUT_DESC(ground_truth_box)}, -}; -ATTR_MAP(BoundingBoxEncode) = { - {"means", ATTR_DESC(means, AnyTraits>(), AnyTraits())}, - {"stds", ATTR_DESC(stds, AnyTraits>(), AnyTraits())}, -}; -OUTPUT_MAP(BoundingBoxEncode) = {{0, OUTPUT_DESC(delats)}}; - -// BoundingBoxDecode -INPUT_MAP(BoundingBoxDecode) = { - {1, INPUT_DESC(rois)}, - {2, INPUT_DESC(deltas)}, -}; -ATTR_MAP(BoundingBoxDecode) = { - {"means", ATTR_DESC(means, AnyTraits>(), AnyTraits())}, - {"stds", ATTR_DESC(stds, AnyTraits>(), AnyTraits())}, - {"max_shape", ATTR_DESC(max_shape, AnyTraits>(), AnyTraits>())}, - {"wh_ratio_clip", ATTR_DESC(wh_ratio_clip, AnyTraits())}, -}; -OUTPUT_MAP(BoundingBoxDecode) = {{0, OUTPUT_DESC(bboxes)}}; - -// TopK -INPUT_MAP(TopK) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(k)}}; -ATTR_MAP(TopK) = {{"sorted", ATTR_DESC(sorted, AnyTraits())}}; -OUTPUT_MAP(TopK) = {{0, OUTPUT_DESC(values)}, {1, OUTPUT_DESC(indices)}}; - -// TileD -INPUT_MAP(TileD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(TileD) = {{2, ATTR_DESC(multiples, AnyTraits(), AnyTraits>())}}; -ATTR_MAP(TileD) = EMPTY_ATTR_MAP; -OUTPUT_MAP(TileD) = {{0, OUTPUT_DESC(y)}}; - -// OneHot -INPUT_MAP(OneHot) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(depth)}, {3, INPUT_DESC(on_value)}, {4, INPUT_DESC(off_value)}}; -ATTR_MAP(OneHot) = {{"axis", ATTR_DESC(axis, AnyTraits())}}; -OUTPUT_MAP(OneHot) = {{0, OUTPUT_DESC(y)}}; - -// GatherV2D -INPUT_MAP(GatherV2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}}; -INPUT_ATTR_MAP(GatherV2D) = {{3, ATTR_DESC(axis, AnyTraits())}}; -ATTR_MAP(GatherV2D) = EMPTY_ATTR_MAP; -OUTPUT_MAP(GatherV2D) = {{0, OUTPUT_DESC(y)}}; - -// Reshape -INPUT_MAP(Reshape) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(shape)}}; -ATTR_MAP(Reshape) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Reshape) = {{0, OUTPUT_DESC(y)}}; - -// TransShape -INPUT_MAP(TransShape) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(TransShape) = {{2, ATTR_DESC(outShape, AnyTraits(), AnyTraits>())}}; -ATTR_MAP(TransShape) = EMPTY_ATTR_MAP; -OUTPUT_MAP(TransShape) = {{0, OUTPUT_DESC(y)}}; - -// BiasAdd -INPUT_MAP(BiasAdd) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(bias)}}; -ATTR_MAP(BiasAdd) = {{"data_format", ATTR_DESC(data_format, AnyTraits())}}; -OUTPUT_MAP(BiasAdd) = {{0, OUTPUT_DESC(y)}}; - -// Iou -INPUT_MAP(Iou) = {{1, INPUT_DESC(bboxes)}, {2, INPUT_DESC(gtboxes)}}; -ATTR_MAP(Iou) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; -OUTPUT_MAP(Iou) = {{0, OUTPUT_DESC(overlap)}}; - -// ResizeNearestNeighborV2D -INPUT_MAP(ResizeNearestNeighborV2D) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ResizeNearestNeighborV2D) = { - {"size", ATTR_DESC(size, AnyTraits>(), AnyTraits>())}, - {"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; -OUTPUT_MAP(ResizeNearestNeighborV2D) = {{0, OUTPUT_DESC(y)}}; - -// ResizeNearestNeighborV2Grad -INPUT_MAP(ResizeNearestNeighborV2Grad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(size)}}; -ATTR_MAP(ResizeNearestNeighborV2Grad) = {{"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; -OUTPUT_MAP(ResizeNearestNeighborV2Grad) = {{0, OUTPUT_DESC(y)}}; - -// ApplyAdam -INPUT_MAP(ApplyAdam) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(m)}, {3, INPUT_DESC(v)}, - {4, INPUT_DESC(beta1_power)}, {5, INPUT_DESC(beta2_power)}, {6, INPUT_DESC(lr)}, - {7, INPUT_DESC(beta1)}, {8, INPUT_DESC(beta2)}, {9, INPUT_DESC(epsilon)}, - {10, INPUT_DESC(grad)}}; -ATTR_MAP(ApplyAdam) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}, - {"use_nesterov", ATTR_DESC(use_nesterov, AnyTraits())}}; -OUTPUT_MAP(ApplyAdam) = {{0, OUTPUT_DESC(var)}}; - -// ApplyAdamD -INPUT_MAP(ApplyAdamD) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(m)}, {3, INPUT_DESC(v)}, - {4, INPUT_DESC(beta1_power)}, {5, INPUT_DESC(beta2_power)}, {6, INPUT_DESC(lr)}, - {7, INPUT_DESC(beta1)}, {8, INPUT_DESC(beta2)}, {9, INPUT_DESC(epsilon)}, - {10, INPUT_DESC(grad)}}; -ATTR_MAP(ApplyAdamD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}, - {"use_nesterov", ATTR_DESC(use_nesterov, AnyTraits())}}; -OUTPUT_MAP(ApplyAdamD) = {{0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(m)}, {2, OUTPUT_DESC(v)}}; - -// Relu6 -INPUT_MAP(Relu6) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Relu6) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Relu6) = {{0, OUTPUT_DESC(y)}}; - -// Relu6Grad -INPUT_MAP(Relu6Grad) = {{1, INPUT_DESC(gradients)}, {2, INPUT_DESC(features)}}; -ATTR_MAP(Relu6Grad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Relu6Grad) = {{0, OUTPUT_DESC(backprops)}}; - -// ResizeBilinearV2Grad -INPUT_MAP(ResizeBilinearV2Grad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(original_image)}}; -ATTR_MAP(ResizeBilinearV2Grad) = {{"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; -OUTPUT_MAP(ResizeBilinearV2Grad) = {{0, OUTPUT_DESC(y)}}; - -// ResizeBilinearV2D -INPUT_MAP(ResizeBilinearV2D) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ResizeBilinearV2D) = { - {"size", ATTR_DESC(size, AnyTraits>(), AnyTraits>())}, - {"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; -OUTPUT_MAP(ResizeBilinearV2D) = {{0, OUTPUT_DESC(y)}}; - -// ZerosLike -INPUT_MAP(ZerosLike) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ZerosLike) = EMPTY_ATTR_MAP; -OUTPUT_MAP(ZerosLike) = {{0, OUTPUT_DESC(y)}}; - -// OnesLike -INPUT_MAP(OnesLike) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(OnesLike) = EMPTY_ATTR_MAP; -OUTPUT_MAP(OnesLike) = {{0, OUTPUT_DESC(y)}}; - -// NMSWithMask -INPUT_MAP(NMSWithMask) = {{1, INPUT_DESC(box_scores)}}; -ATTR_MAP(NMSWithMask) = {{"iou_threshold", ATTR_DESC(iou_threshold, AnyTraits())}}; -OUTPUT_MAP(NMSWithMask) = { - {0, OUTPUT_DESC(selected_boxes)}, {1, OUTPUT_DESC(selected_idx)}, {2, OUTPUT_DESC(selected_mask)}}; - -// Unpack -INPUT_MAP(Unpack) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Unpack) = {{"axis", ATTR_DESC(axis, AnyTraits())}, {"num", ATTR_DESC(num, AnyTraits())}}; -DYN_OUTPUT_MAP(Unpack) = {{0, DYN_OUTPUT_DESC(y)}}; - -// TensorScatterUpdate -INPUT_MAP(TensorScatterUpdate) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; -ATTR_MAP(TensorScatterUpdate) = EMPTY_ATTR_MAP; -OUTPUT_MAP(TensorScatterUpdate) = {{0, OUTPUT_DESC(y)}}; - -// ScatterUpdate -INPUT_MAP(ScatterUpdate) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; -ATTR_MAP(ScatterUpdate) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ScatterUpdate) = {{0, OUTPUT_DESC(var)}}; - -// ScatterNdUpdate -INPUT_MAP(ScatterNdUpdate) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; -ATTR_MAP(ScatterNdUpdate) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ScatterNdUpdate) = {{0, OUTPUT_DESC(var)}}; - -// ScatterMax -INPUT_MAP(ScatterMax) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; -ATTR_MAP(ScatterMax) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ScatterMax) = {{0, OUTPUT_DESC(var)}}; - -// CheckValid -INPUT_MAP(CheckValid) = {{1, INPUT_DESC(bbox_tensor)}, {2, INPUT_DESC(img_metas)}}; -ATTR_MAP(CheckValid) = EMPTY_ATTR_MAP; -OUTPUT_MAP(CheckValid) = {{0, OUTPUT_DESC(valid_tensor)}}; - -// SmoothL1Loss -INPUT_MAP(SmoothL1Loss) = {{1, INPUT_DESC(predict)}, {2, INPUT_DESC(label)}}; -ATTR_MAP(SmoothL1Loss) = {{"sigma", ATTR_DESC(sigma, AnyTraits())}}; -OUTPUT_MAP(SmoothL1Loss) = {{0, OUTPUT_DESC(loss)}}; - -// SmoothL1LossGrad -INPUT_MAP(SmoothL1LossGrad) = {{1, INPUT_DESC(predict)}, {2, INPUT_DESC(label)}, {3, INPUT_DESC(dout)}}; -ATTR_MAP(SmoothL1LossGrad) = {{"sigma", ATTR_DESC(sigma, AnyTraits())}}; -OUTPUT_MAP(SmoothL1LossGrad) = {{0, OUTPUT_DESC(gradient)}}; - -// SigmoidCrossEntropyWithLogits -INPUT_MAP(SigmoidCrossEntropyWithLogits) = {{1, INPUT_DESC(predict)}, {2, INPUT_DESC(target)}}; -ATTR_MAP(SigmoidCrossEntropyWithLogits) = EMPTY_ATTR_MAP; -OUTPUT_MAP(SigmoidCrossEntropyWithLogits) = {{0, OUTPUT_DESC(loss)}}; - -// SigmoidCrossEntropyWithLogitsGrad -INPUT_MAP(SigmoidCrossEntropyWithLogitsGrad) = { - {1, INPUT_DESC(predict)}, {2, INPUT_DESC(target)}, {3, INPUT_DESC(dout)}}; -ATTR_MAP(SigmoidCrossEntropyWithLogitsGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(SigmoidCrossEntropyWithLogitsGrad) = {{0, OUTPUT_DESC(gradient)}}; - -// ScatterNdD -INPUT_MAP(ScatterNdD) = {{1, INPUT_DESC(indices)}, {2, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ScatterNdD) = { - {3, ATTR_DESC(shape, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ScatterNdD) = EMPTY_ATTR_MAP; -OUTPUT_MAP(ScatterNdD) = {{0, OUTPUT_DESC(y)}}; - -// PadD -INPUT_MAP(PadD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(PadD) = {{"paddings", ATTR_DESC(paddings, AnyTraits>>())}}; -OUTPUT_MAP(PadD) = {{0, OUTPUT_DESC(y)}}; - -// MirrorPad -INPUT_MAP(MirrorPad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(paddings)}}; -ATTR_MAP(MirrorPad) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; -OUTPUT_MAP(MirrorPad) = {{0, OUTPUT_DESC(y)}}; - -// MirrorPadGrad -INPUT_MAP(MirrorPadGrad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(paddings)}}; -ATTR_MAP(MirrorPadGrad) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; -OUTPUT_MAP(MirrorPadGrad) = {{0, OUTPUT_DESC(y)}}; - -// GatherNd -INPUT_MAP(GatherNd) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}}; -ATTR_MAP(GatherNd) = EMPTY_ATTR_MAP; -OUTPUT_MAP(GatherNd) = {{0, OUTPUT_DESC(y)}}; - -// ROIAlign -INPUT_MAP(ROIAlign) = {{1, INPUT_DESC(features)}, {2, INPUT_DESC(rois)}}; -OUTPUT_MAP(ROIAlign) = {{0, OUTPUT_DESC(y)}}; -ATTR_MAP(ROIAlign) = {{"pooled_height", ATTR_DESC(pooled_height, AnyTraits())}, - {"pooled_width", ATTR_DESC(pooled_width, AnyTraits())}, - {"spatial_scale", ATTR_DESC(spatial_scale, AnyTraits())}, - {"sample_num", ATTR_DESC(sample_num, AnyTraits())}}; - -// ROIAlignGrad -INPUT_MAP(ROIAlignGrad) = {{1, INPUT_DESC(ydiff)}, {2, INPUT_DESC(rois)}}; -OUTPUT_MAP(ROIAlignGrad) = {{0, OUTPUT_DESC(xdiff)}}; -ATTR_MAP(ROIAlignGrad) = { - {"xdiff_shape", ATTR_DESC(xdiff_shape, AnyTraits>(), AnyTraits>())}, - {"pooled_height", ATTR_DESC(pooled_height, AnyTraits())}, - {"pooled_width", ATTR_DESC(pooled_width, AnyTraits())}, - {"spatial_scale", ATTR_DESC(spatial_scale, AnyTraits())}, - {"sample_num", ATTR_DESC(sample_num, AnyTraits())}}; - -// ArgMaxD -INPUT_MAP(ArgMaxD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ArgMaxD) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, - {"output_type", ATTR_DESC(dtype, AnyTraits())}}; -OUTPUT_MAP(ArgMaxD) = {{0, OUTPUT_DESC(y)}}; - -// ArgMinD -INPUT_MAP(ArgMinD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ArgMinD) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, - {"output_type", ATTR_DESC(dtype, AnyTraits())}}; -OUTPUT_MAP(ArgMinD) = {{0, OUTPUT_DESC(y)}}; - -// ArgMaxWithValue -INPUT_MAP(ArgMaxWithValue) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ArgMaxWithValue) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, - {"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ArgMaxWithValue) = {{0, OUTPUT_DESC(indice)}, {1, OUTPUT_DESC(values)}}; - -// ArgMinWithValue -INPUT_MAP(ArgMinWithValue) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ArgMinWithValue) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, - {"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ArgMinWithValue) = {{0, OUTPUT_DESC(indice)}, {1, OUTPUT_DESC(values)}}; - -// ReduceAllD -INPUT_MAP(ReduceAllD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ReduceAllD) = { - {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ReduceAllD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ReduceAllD) = {{0, OUTPUT_DESC(y)}}; - -// ReduceMeanD -INPUT_MAP(ReduceMeanD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ReduceMeanD) = { - {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ReduceMeanD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ReduceMeanD) = {{0, OUTPUT_DESC(y)}}; - -// HCOMAllreduce -INPUT_MAP(HcomAllReduce) = {{1, INPUT_DESC(x)}}; -OUTPUT_MAP(HcomAllReduce) = {{0, OUTPUT_DESC(y)}}; -ATTR_MAP(HcomAllReduce) = {{"op", ATTR_DESC(reduction, AnyTraits())}, - {"group", ATTR_DESC(group, AnyTraits())}, - {"fusion", ATTR_DESC(fusion, AnyTraits())}}; - -// HCOMBraodcast -INPUT_MAP(HcomBroadcast) = EMPTY_INPUT_MAP; -DYN_INPUT_MAP(HcomBroadcast) = {{1, DYN_INPUT_DESC(x)}}; -DYN_OUTPUT_MAP(HcomBroadcast) = {{0, DYN_OUTPUT_DESC(y)}}; -ATTR_MAP(HcomBroadcast) = {{"root_rank", ATTR_DESC(root_rank, AnyTraits())}, - {"group", ATTR_DESC(group, AnyTraits())}}; - -// HCOMAllreduce -INPUT_MAP(HcomAllGather) = {{1, INPUT_DESC(x)}}; -OUTPUT_MAP(HcomAllGather) = {{0, OUTPUT_DESC(y)}}; -ATTR_MAP(HcomAllGather) = {{"group", ATTR_DESC(group, AnyTraits())}, - {"rank_size", ATTR_DESC(rank_size, AnyTraits())}}; - -// HCOMReduceScatter -INPUT_MAP(HcomReduceScatter) = {{1, INPUT_DESC(x)}}; -OUTPUT_MAP(HcomReduceScatter) = {{0, OUTPUT_DESC(y)}}; -ATTR_MAP(HcomReduceScatter) = {{"group", ATTR_DESC(group, AnyTraits())}, - {"op", ATTR_DESC(reduction, AnyTraits())}, - {"rank_size", ATTR_DESC(rank_size, AnyTraits())}}; - -// Variable -INPUT_MAP(Variable) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Variable) = EMPTY_ATTR_MAP; - -// ReluGrad -INPUT_MAP(ReluGrad) = {{1, INPUT_DESC(gradients)}, {2, INPUT_DESC(features)}}; -ATTR_MAP(ReluGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(ReluGrad) = {{0, OUTPUT_DESC(backprops)}}; - -// BiasAddGrad -INPUT_MAP(BiasAddGrad) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(BiasAddGrad) = {{"data_format", ATTR_DESC(data_format, AnyTraits())}}; -OUTPUT_MAP(BiasAddGrad) = {{0, OUTPUT_DESC(y)}}; - -// MaxPoolGrad -INPUT_MAP(MaxPoolGrad) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(grad)}}; -ATTR_MAP(MaxPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}}; -OUTPUT_MAP(MaxPoolGrad) = {{0, OUTPUT_DESC(y)}}; - -// avgpoolgrad -INPUT_MAP(AvgPoolGrad) = {{1, INPUT_DESC(orig_input_shape)}, {2, INPUT_DESC(input_grad)}}; -ATTR_MAP(AvgPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}}; -OUTPUT_MAP(AvgPoolGrad) = {{0, OUTPUT_DESC(out_grad)}}; - -// MaxPoolWithArgmax -INPUT_MAP(MaxPoolWithArgmax) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(MaxPoolWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}}; -OUTPUT_MAP(MaxPoolWithArgmax) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(argmax)}}; - -// MaxPoolGradWithArgmax -INPUT_MAP(MaxPoolGradWithArgmax) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(grad)}, {3, INPUT_DESC(argmax)}}; -ATTR_MAP(MaxPoolGradWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}}; -OUTPUT_MAP(MaxPoolGradWithArgmax) = {{0, OUTPUT_DESC(y)}}; - -// ExtractImagePatches -INPUT_MAP(ExtractImagePatches) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(ExtractImagePatches) = {{"ksizes", ATTR_DESC(ksizes, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"rates", ATTR_DESC(rates, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}}; -OUTPUT_MAP(ExtractImagePatches) = {{0, OUTPUT_DESC(y)}}; - -// Conv2D -INPUT_MAP(Conv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}}; -ATTR_MAP(Conv2D) = { - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, - {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}, - {"group", ATTR_DESC(groups, AnyTraits())}, -}; -OUTPUT_MAP(Conv2D) = {{0, OUTPUT_DESC(y)}}; - -// Conv2DBackpropInputD -INPUT_MAP(Conv2DBackpropInputD) = {{1, INPUT_DESC(out_backprop)}, {2, INPUT_DESC(filter)}}; -INPUT_ATTR_MAP(Conv2DBackpropInputD) = { - {3, ATTR_DESC(input_size, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(Conv2DBackpropInputD) = { - {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}, - {"group", ATTR_DESC(groups, AnyTraits())}, -}; -OUTPUT_MAP(Conv2DBackpropInputD) = {{0, OUTPUT_DESC(y)}}; - -// Conv2DBackpropFilterD -INPUT_MAP(Conv2DBackpropFilterD) = {{1, INPUT_DESC(out_backprop)}, {2, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(Conv2DBackpropFilterD) = { - {3, ATTR_DESC(filter_size, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(Conv2DBackpropFilterD) = { - {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}, - {"group", ATTR_DESC(groups, AnyTraits())}, -}; -OUTPUT_MAP(Conv2DBackpropFilterD) = {{0, OUTPUT_DESC(y)}}; - -// DepthwiseConv2D -INPUT_MAP(DepthwiseConv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}}; -ATTR_MAP(DepthwiseConv2D) = { - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, - {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, - {"data_format", ATTR_DESC(data_format, AnyTraits())}, -}; -OUTPUT_MAP(DepthwiseConv2D) = {{0, OUTPUT_DESC(y)}}; - -// DepthwiseConv2DBackpropInputD -INPUT_MAP(DepthwiseConv2DBackpropInputD) = {{2, INPUT_DESC(filter)}, {3, INPUT_DESC(out_backprop)}}; -INPUT_ATTR_MAP(DepthwiseConv2DBackpropInputD) = { - {1, ATTR_DESC(input_size, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(DepthwiseConv2DBackpropInputD) = { - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, - {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, -}; -OUTPUT_MAP(DepthwiseConv2DBackpropInputD) = {{0, OUTPUT_DESC(input_grad)}}; - -// DepthwiseConv2DBackpropFilterD -INPUT_MAP(DepthwiseConv2DBackpropFilterD) = {{1, INPUT_DESC(input)}, {3, INPUT_DESC(out_backprop)}}; -INPUT_ATTR_MAP(DepthwiseConv2DBackpropFilterD) = { - {2, ATTR_DESC(filter_size, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(DepthwiseConv2DBackpropFilterD) = { - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, - {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, -}; -OUTPUT_MAP(DepthwiseConv2DBackpropFilterD) = {{0, OUTPUT_DESC(filter_grad)}}; - -// MatMul -INPUT_MAP(MatMul) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(MatMul) = {{"transpose_a", ATTR_DESC(transpose_x1, AnyTraits())}, - {"transpose_b", ATTR_DESC(transpose_x2, AnyTraits())}}; -OUTPUT_MAP(MatMul) = {{0, OUTPUT_DESC(y)}}; - -// Merge -INPUT_MAP(Merge) = EMPTY_INPUT_MAP; -DYN_INPUT_MAP(Merge) = {{1, DYN_INPUT_DESC(x)}}; -ATTR_MAP(Merge) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Merge) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(value_index)}}; - -// Switch -INPUT_MAP(Switch) = {{1, INPUT_DESC(data)}, {2, INPUT_DESC(pred)}}; -OUTPUT_MAP(Switch) = {{0, OUTPUT_DESC(output_false)}, {1, OUTPUT_DESC(output_true)}}; -ATTR_MAP(Switch) = EMPTY_ATTR_MAP; - -// AddN -INPUT_MAP(AddN) = EMPTY_INPUT_MAP; -DYN_INPUT_MAP(AddN) = {{1, DYN_INPUT_DESC(x)}}; -ATTR_MAP(AddN) = {{"n", ATTR_DESC(N, AnyTraits())}}; -OUTPUT_MAP(AddN) = {{0, OUTPUT_DESC(y)}}; - -// Mul -INPUT_MAP(Mul) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Mul) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Mul) = {{0, OUTPUT_DESC(y)}}; - -// RealDiv -INPUT_MAP(RealDiv) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(RealDiv) = EMPTY_ATTR_MAP; -OUTPUT_MAP(RealDiv) = {{0, OUTPUT_DESC(y)}}; - -// Cast -INPUT_MAP(Cast) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(Cast) = {{2, ATTR_DESC(dst_type, AnyTraits())}}; -ATTR_MAP(Cast) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Cast) = {{0, OUTPUT_DESC(y)}}; - -// Reciprocal -INPUT_MAP(Reciprocal) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Reciprocal) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Reciprocal) = {{0, OUTPUT_DESC(y)}}; - -// Sub -INPUT_MAP(Sub) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Sub) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Sub) = {{0, OUTPUT_DESC(y)}}; - -// SplitD -INPUT_MAP(SplitD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(SplitD) = {{"axis", ATTR_DESC(split_dim, AnyTraits())}, - {"output_num", ATTR_DESC(num_split, AnyTraits())}}; -DYN_OUTPUT_MAP(SplitD) = {{0, DYN_OUTPUT_DESC(y)}}; - -// Range -INPUT_MAP(RangeD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(RangeD) = {{"start", ATTR_DESC(start, AnyTraits())}, - {"limit", ATTR_DESC(limit, AnyTraits())}, - {"delta", ATTR_DESC(delta, AnyTraits())}}; -OUTPUT_MAP(RangeD) = {{0, OUTPUT_DESC(y)}}; - -// Neg -INPUT_MAP(Neg) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Neg) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Neg) = {{0, OUTPUT_DESC(y)}}; - -// Transpose -INPUT_MAP(TransposeD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(TransposeD) = {{2, ATTR_DESC(perm, AnyTraits(), AnyTraits>())}}; -ATTR_MAP(TransposeD) = EMPTY_ATTR_MAP; -// Do not set Transpose operator output descriptor - -// DropOutGenMask -INPUT_MAP(DropOutGenMask) = {{1, INPUT_DESC(shape)}, {2, INPUT_DESC(prob)}}; -ATTR_MAP(DropOutGenMask) = {{"Seed0", ATTR_DESC(seed, AnyTraits())}, - {"Seed1", ATTR_DESC(seed2, AnyTraits())}}; -OUTPUT_MAP(DropOutGenMask) = {{0, OUTPUT_DESC(y)}}; - -// Pack -INPUT_MAP(Pack) = EMPTY_INPUT_MAP; -DYN_INPUT_MAP(Pack) = {{1, DYN_INPUT_DESC(x)}}; -ATTR_MAP(Pack) = {{"num", ATTR_DESC(N, AnyTraits())}, {"axis", ATTR_DESC(axis, AnyTraits())}}; -OUTPUT_MAP(Pack) = {{0, OUTPUT_DESC(y)}}; - -// ConcatD -INPUT_MAP(ConcatD) = EMPTY_INPUT_MAP; -DYN_INPUT_MAP(ConcatD) = {{1, DYN_INPUT_DESC(x)}}; -ATTR_MAP(ConcatD) = { - {"axis", ATTR_DESC(concat_dim, AnyTraits())}, - {"inputNums", ATTR_DESC(N, AnyTraits())}, -}; -OUTPUT_MAP(ConcatD) = {{0, OUTPUT_DESC(y)}}; - -// Less -INPUT_MAP(Less) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Less) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Less) = {{0, OUTPUT_DESC(y)}}; - -// Rsqrt -INPUT_MAP(Rsqrt) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Rsqrt) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Rsqrt) = {{0, OUTPUT_DESC(y)}}; - -// Sqrt -INPUT_MAP(Sqrt) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Sqrt) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Sqrt) = {{0, OUTPUT_DESC(y)}}; - -// Square -INPUT_MAP(Square) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Square) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Square) = {{0, OUTPUT_DESC(y)}}; - -// SquareSumAll -INPUT_MAP(SquareSumAll) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(SquareSumAll) = EMPTY_ATTR_MAP; -OUTPUT_MAP(SquareSumAll) = {{0, OUTPUT_DESC(y1)}, {1, OUTPUT_DESC(y2)}}; - -// Tanh -INPUT_MAP(Tanh) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Tanh) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Tanh) = {{0, OUTPUT_DESC(y)}}; - -// TanhGrad -INPUT_MAP(TanhGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; -ATTR_MAP(TanhGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(TanhGrad) = {{0, OUTPUT_DESC(z)}}; - -// ReduceMinD -INPUT_MAP(ReduceMinD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ReduceMinD) = { - {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ReduceMinD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ReduceMinD) = {{0, OUTPUT_DESC(y)}}; - -// ReduceMaxD -INPUT_MAP(ReduceMaxD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(ReduceMaxD) = { - {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -ATTR_MAP(ReduceMaxD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; -OUTPUT_MAP(ReduceMaxD) = {{0, OUTPUT_DESC(y)}}; - -// Maximum -INPUT_MAP(Maximum) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Maximum) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Maximum) = {{0, OUTPUT_DESC(y)}}; - -// Minimum -INPUT_MAP(Minimum) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Minimum) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Minimum) = {{0, OUTPUT_DESC(y)}}; - -// MaximumGrad -INPUT_MAP(MaximumGrad) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(grads)}}; -ATTR_MAP(MaximumGrad) = {{"grad_x", ATTR_DESC(grad_x, AnyTraits())}, - {"grad_y", ATTR_DESC(grad_y, AnyTraits())}}; -OUTPUT_MAP(MaximumGrad) = {{0, OUTPUT_DESC(y1)}, {1, OUTPUT_DESC(y2)}}; - -// MinimumGrad -INPUT_MAP(MinimumGrad) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(grads)}}; -ATTR_MAP(MinimumGrad) = {{"grad_x", ATTR_DESC(grad_x, AnyTraits())}, - {"grad_y", ATTR_DESC(grad_y, AnyTraits())}}; -OUTPUT_MAP(MinimumGrad) = {{0, OUTPUT_DESC(y1)}, {1, OUTPUT_DESC(y2)}}; - -// Pow -INPUT_MAP(Pow) = { - {1, INPUT_DESC(x1)}, - {2, INPUT_DESC(x2)}, -}; -ATTR_MAP(Pow) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Pow) = {{0, OUTPUT_DESC(y)}}; - -// Equal -INPUT_MAP(Equal) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Equal) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Equal) = {{0, OUTPUT_DESC(y)}}; - -// NotEqual -INPUT_MAP(NotEqual) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(NotEqual) = EMPTY_ATTR_MAP; -OUTPUT_MAP(NotEqual) = {{0, OUTPUT_DESC(y)}}; - -// Log -INPUT_MAP(Log) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Log) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Log) = {{0, OUTPUT_DESC(y)}}; - -// LogicalAnd -INPUT_MAP(LogicalAnd) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(LogicalAnd) = EMPTY_ATTR_MAP; -OUTPUT_MAP(LogicalAnd) = {{0, OUTPUT_DESC(y)}}; - -// LogicalOr -INPUT_MAP(LogicalOr) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(LogicalOr) = EMPTY_ATTR_MAP; -OUTPUT_MAP(LogicalOr) = {{0, OUTPUT_DESC(y)}}; - -// LogicalNot -INPUT_MAP(LogicalNot) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(LogicalNot) = EMPTY_ATTR_MAP; -OUTPUT_MAP(LogicalNot) = {{0, OUTPUT_DESC(y)}}; - -// Greater -INPUT_MAP(Greater) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Greater) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Greater) = {{0, OUTPUT_DESC(y)}}; - -// LogSoftmaxGrad -INPUT_MAP(LogSoftmaxGrad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(grad)}}; -ATTR_MAP(LogSoftmaxGrad) = { - {"axis", ATTR_DESC(axis, AnyTraits>(), AnyTraits>())}}; -OUTPUT_MAP(LogSoftmaxGrad) = {{0, OUTPUT_DESC(y)}}; - -// Select -INPUT_MAP(Select) = {{1, INPUT_DESC(condition)}, {2, INPUT_DESC(x1)}, {3, INPUT_DESC(x2)}}; -ATTR_MAP(Select) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Select) = {{0, OUTPUT_DESC(y)}}; - -// LessEqual -INPUT_MAP(LessEqual) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(LessEqual) = EMPTY_ATTR_MAP; -OUTPUT_MAP(LessEqual) = {{0, OUTPUT_DESC(y)}}; - -// LogSoftmaxV2 -INPUT_MAP(LogSoftmaxV2) = {{1, INPUT_DESC(logits)}}; -ATTR_MAP(LogSoftmaxV2) = { - {"axis", ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; -OUTPUT_MAP(LogSoftmaxV2) = {{0, OUTPUT_DESC(logsoftmax)}}; - -// RandomChoiceWithMask -INPUT_MAP(RandomChoiceWithMask) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(RandomChoiceWithMask) = {{"count", ATTR_DESC(count, AnyTraits())}, - {"seed", ATTR_DESC(seed, AnyTraits())}, - {"seed2", ATTR_DESC(seed2, AnyTraits())}}; -OUTPUT_MAP(RandomChoiceWithMask) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(mask)}}; - -// TruncatedNormal -INPUT_MAP(TruncatedNormal) = {{1, INPUT_DESC(shape)}}; -ATTR_MAP(TruncatedNormal) = {{"seed", ATTR_DESC(seed, AnyTraits())}, - {"seed2", ATTR_DESC(seed2, AnyTraits())}}; -OUTPUT_MAP(TruncatedNormal) = {{0, OUTPUT_DESC(y)}}; - -// StridedSliceGrad -INPUT_MAP(StridedSliceGrad) = { - {1, INPUT_DESC(dy)}, {2, INPUT_DESC(shape)}, {3, INPUT_DESC(begin)}, {4, INPUT_DESC(end)}, {5, INPUT_DESC(strides)}}; -ATTR_MAP(StridedSliceGrad) = {{"begin_mask", ATTR_DESC(begin_mask, AnyTraits())}, - {"end_mask", ATTR_DESC(end_mask, AnyTraits())}, - {"ellipsis_mask", ATTR_DESC(ellipsis_mask, AnyTraits())}, - {"new_axis_mask", ATTR_DESC(new_axis_mask, AnyTraits())}, - {"shrink_axis_mask", ATTR_DESC(shrink_axis_mask, AnyTraits())}}; -OUTPUT_MAP(StridedSliceGrad) = {{0, OUTPUT_DESC(output)}}; - -// Gelu -INPUT_MAP(Gelu) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Gelu) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Gelu) = {{0, OUTPUT_DESC(y)}}; - -// GeluGrad -INPUT_MAP(GeluGrad) = {{1, INPUT_DESC(dy)}, {2, INPUT_DESC(x)}, {3, INPUT_DESC(y)}}; -ATTR_MAP(GeluGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(GeluGrad) = {{0, OUTPUT_DESC(z)}}; - -// StridedSlice -INPUT_MAP(StridedSlice) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(begin)}, {3, INPUT_DESC(end)}, {4, INPUT_DESC(strides)}}; -ATTR_MAP(StridedSlice) = {{"begin_mask", ATTR_DESC(begin_mask, AnyTraits())}, - {"end_mask", ATTR_DESC(end_mask, AnyTraits())}, - {"ellipsis_mask", ATTR_DESC(ellipsis_mask, AnyTraits())}, - {"new_axis_mask", ATTR_DESC(new_axis_mask, AnyTraits())}, - {"shrink_axis_mask", ATTR_DESC(shrink_axis_mask, AnyTraits())}}; -OUTPUT_MAP(StridedSlice) = {{0, OUTPUT_DESC(y)}}; - -// UnsortedSegmentSum -INPUT_MAP(UnsortedSegmentSumD) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(segment_ids)}}; -INPUT_ATTR_MAP(UnsortedSegmentSumD) = {{3, ATTR_DESC(num_segments, AnyTraits())}}; -ATTR_MAP(UnsortedSegmentSumD) = EMPTY_ATTR_MAP; -OUTPUT_MAP(UnsortedSegmentSumD) = {{0, OUTPUT_DESC(y)}}; - -// UnsortedSegmentMin -INPUT_MAP(UnsortedSegmentMin) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(segment_ids)}, {3, INPUT_DESC(num_segments)}}; -ATTR_MAP(UnsortedSegmentMin) = EMPTY_ATTR_MAP; -OUTPUT_MAP(UnsortedSegmentMin) = {{0, OUTPUT_DESC(y)}}; - -// ExpandDims -INPUT_MAP(ExpandDims) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(axis)}}; -ATTR_MAP(ExpandDims) = EMPTY_ATTR_MAP; -OUTPUT_MAP(ExpandDims) = {{0, OUTPUT_DESC(y)}}; - -// Squeeze -INPUT_MAP(Squeeze) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Squeeze) = {{"axis", ATTR_DESC(axis, AnyTraits(), AnyTraits>())}}; -OUTPUT_MAP(Squeeze) = {{0, OUTPUT_DESC(y)}}; - -// SGD -INPUT_MAP(SGD) = {{1, INPUT_DESC(parameters)}, {2, INPUT_DESC(gradient)}, {3, INPUT_DESC(learning_rate)}, - {4, INPUT_DESC(accum)}, {5, INPUT_DESC(momentum)}, {6, INPUT_DESC(stat)}}; -ATTR_MAP(SGD) = {{"dampening", ATTR_DESC(dampening, AnyTraits())}, - {"weight_decay", ATTR_DESC(weight_decay, AnyTraits())}, - {"nesterov", ATTR_DESC(nesterov, AnyTraits())}}; -OUTPUT_MAP(SGD) = {{0, OUTPUT_DESC(parameters)}}; - -// LayerNorm -INPUT_MAP(LayerNorm) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(gamma)}, {3, INPUT_DESC(beta)}}; -ATTR_MAP(LayerNorm) = {{"begin_norm_axis", ATTR_DESC(begin_norm_axis, AnyTraits())}, - {"begin_params_axis", ATTR_DESC(begin_params_axis, AnyTraits())}, - {"epsilon", ATTR_DESC(epsilon, AnyTraits())}}; -OUTPUT_MAP(LayerNorm) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(mean)}, {2, OUTPUT_DESC(variance)}}; - -// LayerNormGrad -INPUT_MAP(LayerNormGrad) = { - {1, INPUT_DESC(x)}, {2, INPUT_DESC(dy)}, {3, INPUT_DESC(variance)}, {4, INPUT_DESC(mean)}, {5, INPUT_DESC(gamma)}}; -ATTR_MAP(LayerNormGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(LayerNormGrad) = {{0, OUTPUT_DESC(pd_x)}, {1, OUTPUT_DESC(pd_gamma)}, {2, OUTPUT_DESC(pd_beta)}}; - -// BatchMatMul -INPUT_MAP(BatchMatMul) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(BatchMatMul) = {{"transpose_x1", ATTR_DESC(adj_x1, AnyTraits())}, - {"transpose_x2", ATTR_DESC(adj_x2, AnyTraits())}}; -OUTPUT_MAP(BatchMatMul) = {{0, OUTPUT_DESC(y)}}; - -// DropoutDoMask -INPUT_MAP(DropOutDoMask) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(mask)}, {3, INPUT_DESC(keep_prob)}}; -ATTR_MAP(DropOutDoMask) = EMPTY_ATTR_MAP; -OUTPUT_MAP(DropOutDoMask) = {{0, OUTPUT_DESC(y)}}; - -// NPUGetFloatStatus -INPUT_MAP(NPUGetFloatStatus) = {{1, INPUT_DESC(addr)}}; -OUTPUT_MAP(NPUGetFloatStatus) = {{0, OUTPUT_DESC(data)}}; -ATTR_MAP(NPUGetFloatStatus) = EMPTY_ATTR_MAP; - -// NPUAllocFloatStatus -INPUT_MAP(NPUAllocFloatStatus) = EMPTY_INPUT_MAP; -ATTR_MAP(NPUAllocFloatStatus) = EMPTY_ATTR_MAP; -OUTPUT_MAP(NPUAllocFloatStatus) = {{0, OUTPUT_DESC(data)}}; - -// NPUClearFloatStatus -INPUT_MAP(NPUClearFloatStatus) = {{1, INPUT_DESC(addr)}}; -OUTPUT_MAP(NPUClearFloatStatus) = {{0, OUTPUT_DESC(data)}}; -ATTR_MAP(NPUClearFloatStatus) = EMPTY_ATTR_MAP; - -// Abs -INPUT_MAP(Abs) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Abs) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Abs) = {{0, OUTPUT_DESC(y)}}; - -// AbsGrad -INPUT_MAP(AbsGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; -ATTR_MAP(AbsGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(AbsGrad) = {{0, OUTPUT_DESC(z)}}; - -// BinaryCrossEntropy -INPUT_MAP(BinaryCrossEntropy) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}, {3, INPUT_DESC(weight)}}; -ATTR_MAP(BinaryCrossEntropy) = {{"reduction", ATTR_DESC(reduction, AnyTraits())}}; -OUTPUT_MAP(BinaryCrossEntropy) = {{0, OUTPUT_DESC(output)}}; - -// BinaryCrossEntropyGrad -INPUT_MAP(BinaryCrossEntropyGrad) = { - {1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}, {3, INPUT_DESC(grad_output)}, {4, INPUT_DESC(weight)}}; -ATTR_MAP(BinaryCrossEntropyGrad) = {{"reduction", ATTR_DESC(reduction, AnyTraits())}}; -OUTPUT_MAP(BinaryCrossEntropyGrad) = {{0, OUTPUT_DESC(output)}}; - -// SparseApplyAdagradD -INPUT_MAP(SparseApplyAdagradD) = { - {1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INPUT_DESC(grad)}, {4, INPUT_DESC(indices)}}; -ATTR_MAP(SparseApplyAdagradD) = {{"lr", ATTR_DESC(lr, AnyTraits())}, - {"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(SparseApplyAdagradD) = {{0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(accum)}}; - -// ApplyProximalAdagradD -INPUT_MAP(ApplyProximalAdagradD) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INPUT_DESC(lr)}, - {4, INPUT_DESC(l1)}, {5, INPUT_DESC(l2)}, {6, INPUT_DESC(grad)}}; -ATTR_MAP(ApplyProximalAdagradD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ApplyProximalAdagradD) = {{0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(accum)}}; - -// SparseApplyFtrlD -INPUT_MAP(SparseApplyFtrlD) = {{1, INPUT_DESC(var)}, - {2, INPUT_DESC(accum)}, - {3, INPUT_DESC(linear)}, - {4, INPUT_DESC(grad)}, - {5, INPUT_DESC(indices)}}; -ATTR_MAP(SparseApplyFtrlD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}, - {"lr", ATTR_DESC(lr, AnyTraits())}, - {"l1", ATTR_DESC(l1, AnyTraits())}, - {"l2", ATTR_DESC(l2, AnyTraits())}, - {"lr_power", ATTR_DESC(lr_power, AnyTraits())}}; -OUTPUT_MAP(SparseApplyFtrlD) = {{0, OUTPUT_DESC(var)}}; - -// SpaceToDepth -INPUT_MAP(SpaceToDepth) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(SpaceToDepth) = {{"block_size", ATTR_DESC(block_size, AnyTraits())}}; -OUTPUT_MAP(SpaceToDepth) = {{0, OUTPUT_DESC(y)}}; - -// DepthToSpace -INPUT_MAP(DepthToSpace) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(DepthToSpace) = {{"block_size", ATTR_DESC(block_size, AnyTraits())}}; -OUTPUT_MAP(DepthToSpace) = {{0, OUTPUT_DESC(y)}}; - -// Sign -INPUT_MAP(Sign) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Sign) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Sign) = {{0, OUTPUT_DESC(y)}}; - -// Round -INPUT_MAP(Round) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Round) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Round) = {{0, OUTPUT_DESC(y)}}; - -// ApplyFtrlD -INPUT_MAP(ApplyFtrlD) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INPUT_DESC(linear)}, - {4, INPUT_DESC(grad)}, {5, INPUT_DESC(lr)}, {6, INPUT_DESC(l1)}, - {7, INPUT_DESC(l2)}, {8, INPUT_DESC(lr_power)}}; -ATTR_MAP(ApplyFtrlD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ApplyFtrlD) = {{0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(accum)}, {2, OUTPUT_DESC(linear)}}; - -// Diag -INPUT_MAP(Diag) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Diag) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Diag) = {{0, OUTPUT_DESC(y)}}; - -// DiagPart -INPUT_MAP(DiagPart) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(DiagPart) = EMPTY_ATTR_MAP; -OUTPUT_MAP(DiagPart) = {{0, OUTPUT_DESC(y)}}; - -// SpaceToBatchD -INPUT_MAP(SpaceToBatchD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(SpaceToBatchD) = { - {"block_size", ATTR_DESC(block_size, AnyTraits())}, - {"paddings", ATTR_DESC(paddings, AnyTraits>>(), AnyTraits>())}}; -OUTPUT_MAP(SpaceToBatchD) = {{0, OUTPUT_DESC(y)}}; - -// BatchToSpaceD -INPUT_MAP(BatchToSpaceD) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(BatchToSpaceD) = { - {"block_size", ATTR_DESC(block_size, AnyTraits())}, - {"crops", ATTR_DESC(crops, AnyTraits>>(), AnyTraits>())}}; -OUTPUT_MAP(BatchToSpaceD) = {{0, OUTPUT_DESC(y)}}; - -// Atan2 -INPUT_MAP(Atan2) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Atan2) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Atan2) = {{0, OUTPUT_DESC(y)}}; - -// ApplyRMSPropD -INPUT_MAP(ApplyRMSPropD) = { - {1, INPUT_DESC(var)}, {2, INPUT_DESC(ms)}, {3, INPUT_DESC(mom)}, {4, INPUT_DESC(lr)}, {5, INPUT_DESC(grad)}}; -INPUT_ATTR_MAP(ApplyRMSPropD) = {{6, ATTR_DESC(rho, AnyTraits())}, - {7, ATTR_DESC(momentum, AnyTraits())}, - {8, ATTR_DESC(epsilon, AnyTraits())}}; -ATTR_MAP(ApplyRMSPropD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ApplyRMSPropD) = {{0, OUTPUT_DESC(var)}}; - -// ApplyCenteredRMSProp -INPUT_MAP(ApplyCenteredRMSProp) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(mg)}, {3, INPUT_DESC(ms)}, - {4, INPUT_DESC(mom)}, {5, INPUT_DESC(grad)}, {6, INPUT_DESC(lr)}, - {7, INPUT_DESC(rho)}, {8, INPUT_DESC(momentum)}, {9, INPUT_DESC(epsilon)}}; -ATTR_MAP(ApplyCenteredRMSProp) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; -OUTPUT_MAP(ApplyCenteredRMSProp) = {{0, OUTPUT_DESC(var)}}; - -// L2Loss -INPUT_MAP(L2Loss) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(L2Loss) = EMPTY_ATTR_MAP; -OUTPUT_MAP(L2Loss) = {{0, OUTPUT_DESC(y)}}; - -// CTCLoss -INPUT_MAP(CTCLoss) = {{1, INPUT_DESC(inputs)}, - {2, INPUT_DESC(labels_indices)}, - {3, INPUT_DESC(labels_values)}, - {4, INPUT_DESC(sequence_length)}}; -ATTR_MAP(CTCLoss) = { - {"preprocess_collapse_repeated", ATTR_DESC(preprocess_collapse_repeated, AnyTraits())}, - {"ctc_merge_repeated", ATTR_DESC(ctc_merge_repeated, AnyTraits())}, - {"ignore_longer_outputs_than_inputs", ATTR_DESC(ignore_longer_outputs_than_inputs, AnyTraits())}}; -OUTPUT_MAP(CTCLoss) = {{0, OUTPUT_DESC(loss)}, {1, OUTPUT_DESC(gradient)}}; - -// AscendQuant -INPUT_MAP(AscendQuant) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(AscendQuant) = {{"scale", ATTR_DESC(scale, AnyTraits())}, - {"offset", ATTR_DESC(offset, AnyTraits())}, - {"sqrt_mode", ATTR_DESC(sqrt_mode, AnyTraits())}, - {"round_mode", ATTR_DESC(round_mode, AnyTraits())}}; -OUTPUT_MAP(AscendQuant) = {{0, OUTPUT_DESC(y)}}; - -// AscendDequant -INPUT_MAP(AscendDequant) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(deq_scale)}}; -ATTR_MAP(AscendDequant) = {{"sqrt_mode", ATTR_DESC(sqrt_mode, AnyTraits())}, - {"relu_flag", ATTR_DESC(relu_flag, AnyTraits())}}; -OUTPUT_MAP(AscendDequant) = {{0, OUTPUT_DESC(y)}}; -#ifdef ENABLE_GE -// Print -INPUT_MAP(Print) = EMPTY_INPUT_MAP; -DYN_INPUT_MAP(Print) = {{1, DYN_INPUT_DESC(x)}}; -ATTR_MAP(Print) = EMPTY_ATTR_MAP; -#endif -} // namespace transform -} // namespace mindspore diff --git a/mindspore/ccsrc/utils/callbacks.cc b/mindspore/ccsrc/utils/callbacks.cc index ceb95d5c8c0..d9e21483dcb 100644 --- a/mindspore/ccsrc/utils/callbacks.cc +++ b/mindspore/ccsrc/utils/callbacks.cc @@ -18,9 +18,7 @@ #include #include #include -#include #include "pybind11/pybind11.h" -#include "pipeline/jit/parse/data_converter.h" #include "pipeline/jit/parse/python_adapter.h" #include "utils/visible.h" diff --git a/mindspore/ccsrc/utils/context/context_extends.cc b/mindspore/ccsrc/utils/context/context_extends.cc index aabd286ee9d..ff482c11c57 100644 --- a/mindspore/ccsrc/utils/context/context_extends.cc +++ b/mindspore/ccsrc/utils/context/context_extends.cc @@ -20,7 +20,6 @@ #include #include #include -#include namespace mindspore { namespace context { diff --git a/mindspore/ccsrc/utils/load_onnx/anf_converter.cc b/mindspore/ccsrc/utils/load_onnx/anf_converter.cc index f811bfe1f1b..d5626d14c14 100644 --- a/mindspore/ccsrc/utils/load_onnx/anf_converter.cc +++ b/mindspore/ccsrc/utils/load_onnx/anf_converter.cc @@ -15,12 +15,9 @@ */ #include -#include -#include #include #include #include -#include #include #include "utils/base_ref_extends.h" #include "utils/load_onnx/anf_model_parser.h" diff --git a/mindspore/ccsrc/utils/load_onnx/anf_model_parser.cc b/mindspore/ccsrc/utils/load_onnx/anf_model_parser.cc index 83525cbe270..4125675bd5c 100644 --- a/mindspore/ccsrc/utils/load_onnx/anf_model_parser.cc +++ b/mindspore/ccsrc/utils/load_onnx/anf_model_parser.cc @@ -20,7 +20,6 @@ #include #include #include -#include "google/protobuf/io/zero_copy_stream_impl.h" #include "ir/tensor.h" #include "ir/param_info.h" #include "frontend/operator/ops.h" diff --git a/mindspore/ccsrc/utils/summary/event_writer.cc b/mindspore/ccsrc/utils/summary/event_writer.cc index b540ecd58b6..ae4c0221d80 100644 --- a/mindspore/ccsrc/utils/summary/event_writer.cc +++ b/mindspore/ccsrc/utils/summary/event_writer.cc @@ -17,7 +17,6 @@ #include "utils/summary/event_writer.h" #include #include -#include #include "utils/log_adapter.h" #include "utils/convert_utils.h" diff --git a/mindspore/ccsrc/utils/system/file_system.cc b/mindspore/ccsrc/utils/system/file_system.cc index ce27108a39b..39933d56474 100644 --- a/mindspore/ccsrc/utils/system/file_system.cc +++ b/mindspore/ccsrc/utils/system/file_system.cc @@ -17,8 +17,6 @@ #include "utils/system/file_system.h" #include #include -#include -#include namespace mindspore { namespace system { diff --git a/mindspore/ccsrc/utils/tensorprint_utils.cc b/mindspore/ccsrc/utils/tensorprint_utils.cc index a9a2c83d0db..f427a382c1a 100644 --- a/mindspore/ccsrc/utils/tensorprint_utils.cc +++ b/mindspore/ccsrc/utils/tensorprint_utils.cc @@ -15,7 +15,6 @@ */ #include "utils/tensorprint_utils.h" #include -#include #include #include #include @@ -23,8 +22,6 @@ #include "ir/tensor.h" #include "pybind11/pybind11.h" #include "utils/ms_utils.h" -#include "runtime/device/convert_tensor_utils.h" -#include "./securec.h" #ifndef NO_DLIB #include "tdt/tsd_client.h" #include "tdt/tdt_host_interface.h" diff --git a/mindspore/ccsrc/vm/vmimpl.cc b/mindspore/ccsrc/vm/vmimpl.cc index 6751f8c38ba..7731a50794a 100644 --- a/mindspore/ccsrc/vm/vmimpl.cc +++ b/mindspore/ccsrc/vm/vmimpl.cc @@ -20,20 +20,14 @@ #include #include -#include -#include #include #include -#include -#include "ir/tensor.h" #include "frontend/operator/ops.h" #include "ir/manager.h" #include "ir/func_graph_cloner.h" -#include "pybind_api/ir/primitive_py.h" #include "utils/convert_utils.h" #include "utils/primitive_utils.h" -#include "debug/draw.h" namespace mindspore { namespace compile { diff --git a/mindspore/core/utils/trace_base.cc b/mindspore/core/utils/trace_base.cc index aa9fde6f5bd..b051c5e00ce 100644 --- a/mindspore/core/utils/trace_base.cc +++ b/mindspore/core/utils/trace_base.cc @@ -16,15 +16,9 @@ #include "utils/trace_base.h" -#include -#include -#include -#include #include #include -#include #include -#include #include #include "ir/graph_utils.h" diff --git a/mindspore/core/utils/trace_info.cc b/mindspore/core/utils/trace_info.cc index 26bcb8e7ce4..6e20a155578 100644 --- a/mindspore/core/utils/trace_info.cc +++ b/mindspore/core/utils/trace_info.cc @@ -15,8 +15,6 @@ */ #include "utils/trace_info.h" -#include -#include #include #include "ir/anf.h" diff --git a/tests/ut/cpp/CMakeLists.txt b/tests/ut/cpp/CMakeLists.txt index 71c0f39d366..c5d6e11c21b 100644 --- a/tests/ut/cpp/CMakeLists.txt +++ b/tests/ut/cpp/CMakeLists.txt @@ -78,6 +78,7 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "../../../mindspore/ccsrc/debug/*.cc" "../../../mindspore/ccsrc/frontend/operator/*.cc" "../../../mindspore/ccsrc/transform/graph_ir/*.cc" + "../../../mindspore/ccsrc/transform/graph_ir/op_declare/*.cc" "../../../mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc" "../../../mindspore/ccsrc/backend/session/ascend_session.cc" "../../../mindspore/ccsrc/backend/session/ascend_control_parser.cc" diff --git a/tests/ut/cpp/transform/op_adapter_test.cc b/tests/ut/cpp/transform/op_adapter_test.cc index 2aa6ba37e33..5976bcc52f8 100644 --- a/tests/ut/cpp/transform/op_adapter_test.cc +++ b/tests/ut/cpp/transform/op_adapter_test.cc @@ -19,8 +19,8 @@ #include "common/common_test.h" -#include "transform/graph_ir/op_declare.h" - +#include "transform/graph_ir/op_adapter.h" +#include "transform/graph_ir/op_declare/array_ops_declare.h" #include "frontend/operator/ops.h" #include "./common.h"