From 41dcac9c49ac20f6bb693657c6b375d8ca438a7c Mon Sep 17 00:00:00 2001 From: He Wei Date: Wed, 24 Nov 2021 08:51:27 +0800 Subject: [PATCH] Replace std::unordered_map/set with robin-hood-hashing Robin-hood-hashing (https://github.com/martinus/robin-hood-hashing) is considered faster then std::unordered_map/set, so we use it to improve mindspore performance. 1. robin_hood head file in `third_party/robin_hood/include`; 2. In `utils/hash_map.h` and `utils/hash_set.h`, we define: - mindspore::HashMap as an alias of robin_hood::unordered_map; - mindspore::HashSet as an alias of robin_hood::unordered_set; 3. Replace: - `#include ` --> `#include "utils/hash_map.h"`; - `#include ` --> `#include "utils/hash_set.h"`; - `std::unordered_map` --> `mindspore::HashMap`; - `std::unordered_set` --> `mindspore::HashSet`; - `map.insert(std::pair(key, value))` --> `map.emplace(key, value)`; - `[] (const std::pair &p) {..} ` --> `[] (const auto &p) {..} `; 4. Fix issues found by switch to robin_hood: - AnfNodeConfig hash and equal; - Fix a bug in `Slice::operator==()`; - Fix a bug in `CNode::HasPrimalAttr()`; - Fix map.erase() usage bugs: `map.erase(iter++)` --> `iter = map.erase(iter)`; - Fix some iterator invalidated problem; 5. Some std::unordered_map/set can not replace by robin_hood: - As parameter of functions that exposed to python by pybind11; - Use bad hash that cause robin_hood::map over_flow, such as AbstractBasePtrListHasher; 6. Update cpp unit tests; 7. Add build option '-F' to enable robin_hood, default on. --- CMakeLists.txt | 5 + cmake/options.cmake | 1 + .../batchmatmul_fusedmuladd_fusion_pass.cc | 2 +- .../batchmatmul_fusedmuladd_fusion_pass.h | 4 +- .../bnupdate_eltwise_eltwise_fusion_pass.cc | 6 +- .../bnupdate_eltwise_eltwise_fusion_pass.h | 6 +- .../bnupdate_eltwise_fusion_pass.cc | 4 +- .../bnupdate_eltwise_fusion_pass.h | 6 +- ...v2dbackprop_eltwise_eltwise_fusion_pass.cc | 4 +- ...nv2dbackprop_eltwise_eltwise_fusion_pass.h | 6 +- .../conv2dbackprop_eltwise_fusion_pass.cc | 4 +- .../conv2dbackprop_eltwise_fusion_pass.h | 6 +- .../conv_bnreduce_fusion_pass.cc | 4 +- .../buffer_fusion/conv_bnreduce_fusion_pass.h | 6 +- .../conv_double_in_fusion_pass.cc | 4 +- .../conv_double_in_fusion_pass.h | 6 +- .../conv_single_in_fusion_pass.cc | 6 +- .../conv_single_in_fusion_pass.h | 6 +- .../depthwiseconv_eltwise_fusion_pass.cc | 6 +- .../depthwiseconv_eltwise_fusion_pass.h | 6 +- .../buffer_fusion/eltwise_fusion_pass.cc | 4 +- .../buffer_fusion/eltwise_fusion_pass.h | 6 +- .../ascend/buffer_fusion/fusion_base_pass.cc | 4 +- .../ascend/buffer_fusion/fusion_base_pass.h | 10 +- .../matmul_confusiontranspose_fusion_pass.cc | 4 +- .../matmul_confusiontranspose_fusion_pass.h | 6 +- .../matmul_eltwise_fusion_pass.cc | 2 +- .../matmul_eltwise_fusion_pass.h | 6 +- .../buffer_fusion/multi_output_fusion_pass.cc | 4 +- .../buffer_fusion/multi_output_fusion_pass.h | 6 +- .../reduce_eltwise_fusion_pass.cc | 2 +- .../reduce_eltwise_fusion_pass.h | 6 +- .../segment_eltwise_fusion_pass.cc | 4 +- .../segment_eltwise_fusion_pass.h | 6 +- ...ridedread_conv_stridedwrite_fusion_pass.cc | 4 +- ...tridedread_conv_stridedwrite_fusion_pass.h | 6 +- .../ascend/buffer_fusion/ub_pattern_fusion.cc | 36 +- .../ascend/buffer_fusion/ub_pattern_fusion.h | 12 +- .../ascend/format_type/convert_cast_format.cc | 10 +- .../ascend/format_type/convert_cast_format.h | 6 +- .../optimizer/ascend/ir_fission/topk_split.cc | 4 +- .../ir_fusion/input_to_output_registry.cc | 6 +- .../ir_fusion/input_to_output_registry.h | 6 +- .../ascend/ir_fusion/lamb_next_mv_rule.h | 4 +- .../ascend/ir_fusion/lamb_update_with_lr_v2.h | 4 +- .../common/const_input_to_attr_registry.cc | 8 +- .../common/const_input_to_attr_registry.h | 16 +- .../ccsrc/backend/optimizer/common/helper.cc | 10 +- .../ccsrc/backend/optimizer/common/helper.h | 6 +- .../backend/optimizer/common/node_pass.cc | 12 +- .../backend/optimizer/common/optimizer.h | 4 +- .../backend/optimizer/common/pattern_engine.h | 8 +- .../ccsrc/backend/optimizer/common/visit.h | 4 +- .../cpu/insert_format_transform_op.cc | 4 +- ...l_optimizer_recompute_all_gather_fusion.cc | 4 +- .../backend/optimizer/gpu/insert_cast_gpu.cc | 4 +- .../graph_kernel/arithmetic_simplify.cc | 32 +- .../graph_kernel/arithmetic_simplify.h | 4 +- .../graph_kernel/expanders/expander_factory.h | 4 +- .../graph_kernel/graph_kernel_cluster.cc | 5 +- .../graph_kernel/graph_kernel_cluster.h | 5 +- .../graph_kernel/graph_kernel_cse.cc | 23 +- .../graph_kernel/graph_kernel_helper.cc | 2 +- .../graph_kernel/graph_kernel_helper.h | 2 +- .../graph_kernel/graph_kernel_splitter.cc | 57 +- .../graph_kernel/model/lite_graph.cc | 4 +- .../optimizer/graph_kernel/model/lite_graph.h | 4 +- .../optimizer/graph_kernel/model/node.cc | 10 +- .../optimizer/graph_kernel/model/node.h | 8 +- .../optimizer/graph_kernel/model/op_node.cc | 6 +- .../optimizer/graph_kernel/model/op_node.h | 2 +- .../graph_kernel/model/op_register.h | 4 +- .../optimizer/graph_kernel/reorder_ops.cc | 8 +- .../backend/optimizer/mem_reuse/mem_reuse.cc | 4 +- .../backend/optimizer/mem_reuse/mem_reuse.h | 8 +- .../optimizer/mem_reuse/mem_reuse_checker.cc | 10 +- .../optimizer/mem_reuse/mem_swap_manager.h | 12 +- .../optimizer/pass/add_training_attr.cc | 8 +- ...parallel_optimizer_recompute_all_gather.cc | 2 +- .../optimizer/pass/communication_op_fusion.cc | 4 +- .../pass/convert_const_input_to_attr.h | 8 +- .../pass/convert_tuple_output_to_maketuple.cc | 4 +- .../pass/custom_op_const_input_to_attr.cc | 4 +- .../optimizer/pass/eliminate_redundant_op.cc | 2 +- .../optimizer/pass/eliminate_redundant_op.h | 4 +- .../ccsrc/backend/optimizer/somas/somas.cc | 4 +- .../ccsrc/backend/optimizer/somas/somas.h | 6 +- .../backend/optimizer/somas/somas_node.h | 14 +- .../optimizer/somas/somas_solver_alg.cc | 24 +- .../optimizer/somas/somas_solver_alg.h | 11 +- .../optimizer/somas/somas_solver_core.cc | 18 +- .../optimizer/somas/somas_solver_core.h | 4 +- .../optimizer/somas/somas_solver_pre.cc | 4 +- .../optimizer/somas/somas_solver_pre.h | 8 +- .../backend/optimizer/somas/somas_tensor.h | 8 +- .../optimizer/trt_pass/graph_converter.cc | 6 +- .../optimizer/trt_pass/graph_partitioner.cc | 6 +- .../optimizer/trt_pass/graph_partitioner.h | 6 +- .../trt_pass/trt_converter_context.cc | 8 +- .../trt_pass/trt_converter_context.h | 4 +- .../optimizer/trt_pass/trt_op_factory.h | 6 +- .../backend/session/anf_runtime_algorithm.h | 1 - .../session/ascend_inference_session.h | 4 +- .../ccsrc/backend/session/ascend_session.cc | 6 +- .../ccsrc/backend/session/ascend_session.h | 8 +- mindspore/ccsrc/backend/session/executor.cc | 2 +- .../backend/session/gpu_inference_session.h | 2 +- .../ccsrc/backend/session/kernel_graph.cc | 128 +- .../ccsrc/backend/session/kernel_graph.h | 48 +- .../ccsrc/backend/session/session_basic.cc | 10 +- .../ccsrc/backend/session/session_basic.h | 18 +- mindspore/ccsrc/debug/anf_ir_dump.cc | 6 +- mindspore/ccsrc/debug/anf_ir_utils.cc | 2 +- mindspore/ccsrc/debug/anf_ir_utils.h | 4 +- mindspore/ccsrc/debug/data_dump/npy_header.cc | 4 +- .../ccsrc/debug/debugger/proto_exporter.cc | 4 +- .../ccsrc/debug/debugger/tensor_summary.h | 10 +- mindspore/ccsrc/debug/rdr/recorder_manager.h | 4 +- mindspore/ccsrc/debug/trace.cc | 14 +- .../ccsrc/fl/server/consistent_hash_ring.cc | 2 +- .../fl/server/distributed_count_service.h | 7 +- .../fl/server/distributed_metadata_store.h | 3 +- .../ccsrc/fl/server/kernel/kernel_factory.h | 4 +- .../fl/server/kernel/round/round_kernel.cc | 2 +- .../fl/server/kernel/round/round_kernel.h | 4 +- .../kernel/round/round_kernel_factory.h | 4 +- mindspore/ccsrc/fl/server/local_meta_store.h | 4 +- .../frontend/operator/composite/composite.h | 6 +- .../operator/composite/do_signature.h | 4 +- .../operator/composite/multitype_funcgraph.h | 4 +- .../frontend/operator/composite/unpack_call.h | 4 +- .../operator/composite/zip_operation.h | 4 +- .../frontend/operator/prim_to_function.h | 6 +- .../ccsrc/frontend/optimizer/ad/dfunctor.cc | 18 +- .../ccsrc/frontend/optimizer/ad/dfunctor.h | 20 +- mindspore/ccsrc/frontend/optimizer/ad/grad.cc | 2 +- .../ccsrc/frontend/optimizer/ad/kprim.cc | 18 +- .../ccsrc/frontend/optimizer/ad/kpynative.cc | 6 +- .../optimizer/ad/prim_bprop_optimizer.h | 5 +- .../optimizer/auto_monad_eliminate.cc | 7 +- mindspore/ccsrc/frontend/optimizer/cse.cc | 8 +- mindspore/ccsrc/frontend/optimizer/cse.h | 4 +- mindspore/ccsrc/frontend/optimizer/cse_pass.h | 4 +- .../frontend/optimizer/graph_transform.h | 8 +- .../optimizer/irpass/branch_culling.cc | 16 +- .../irpass/call_graph_tuple_transform.h | 6 +- .../optimizer/irpass/env_item_eliminate.h | 12 +- .../optimizer/irpass/grad_var_prepare.cc | 4 +- .../optimizer/irpass/grad_var_prepare.h | 4 +- .../optimizer/irpass/incorporate_call.h | 4 +- .../optimizer/irpass/incorporate_getitem.h | 10 +- .../ccsrc/frontend/optimizer/irpass/inline.h | 4 +- .../irpass/less_batch_normalization.cc | 4 +- .../irpass/less_batch_normalization.h | 4 +- .../optimizer/irpass/parameter_eliminate.h | 8 +- .../optimizer/irpass/partial_eliminate.h | 2 +- .../optimizer/irpass/recompute_prepare.h | 6 +- .../optimizer/irpass/specialize_transform.h | 6 +- mindspore/ccsrc/frontend/optimizer/opt.cc | 6 +- mindspore/ccsrc/frontend/optimizer/opt.h | 4 +- mindspore/ccsrc/frontend/optimizer/pattern.h | 4 +- mindspore/ccsrc/frontend/optimizer/py_pass.cc | 4 +- mindspore/ccsrc/frontend/optimizer/py_pass.h | 6 +- .../frontend/optimizer/py_pass_manager.cc | 4 +- .../frontend/optimizer/py_pass_manager.h | 6 +- .../ccsrc/frontend/optimizer/recompute.cc | 52 +- .../allreduce_fusion/allreduce_fusion.cc | 14 +- .../allreduce_fusion/allreduce_fusion.h | 6 +- .../allreduce_fusion/allreduce_graph.cc | 4 +- .../allreduce_fusion/allreduce_graph.h | 18 +- .../allreduce_fusion/allreduce_node.h | 12 +- .../cache_embedding/cache_embedding.cc | 14 +- .../ccsrc/frontend/parallel/device_manager.cc | 6 +- .../parallel/graph_util/generate_graph.cc | 4 +- .../parallel/graph_util/generate_graph.h | 8 +- .../frontend/parallel/graph_util/node_info.cc | 6 +- .../frontend/parallel/graph_util/node_info.h | 10 +- .../parallel/ops_info/activation_info.h | 4 +- .../parallel/ops_info/arithmetic_info.h | 4 +- .../parallel/ops_info/batch_parallel_info.h | 4 +- .../parallel/ops_info/batchnorm_info.h | 2 +- .../parallel/ops_info/bias_add_info.h | 4 +- .../parallel/ops_info/broadcast_to_info.h | 4 +- .../ops_info/comparison_function_info.h | 4 +- .../frontend/parallel/ops_info/concat_info.h | 4 +- .../frontend/parallel/ops_info/conv2d_info.h | 2 +- .../parallel/ops_info/dropout_do_mask_info.h | 4 +- .../parallel/ops_info/dsd_matmul_info.h | 2 +- .../ops_info/elementary_function_info.h | 4 +- .../frontend/parallel/ops_info/gather_info.h | 4 +- .../frontend/parallel/ops_info/gatherd_info.h | 2 +- .../parallel/ops_info/gathernd_info.h | 2 +- .../parallel/ops_info/get_next_info.h | 4 +- .../parallel/ops_info/l2_normalize_info.h | 4 +- .../parallel/ops_info/layer_norm_info.h | 4 +- .../frontend/parallel/ops_info/loss_info.h | 4 +- .../parallel/ops_info/matmul_dds_info.h | 2 +- .../frontend/parallel/ops_info/matmul_info.h | 4 +- .../frontend/parallel/ops_info/maxpool_info.h | 2 +- .../frontend/parallel/ops_info/onehot_info.h | 4 +- .../parallel/ops_info/operator_info.h | 10 +- .../frontend/parallel/ops_info/pack_info.h | 4 +- .../frontend/parallel/ops_info/prelu_info.h | 4 +- .../frontend/parallel/ops_info/range_info.h | 4 +- .../parallel/ops_info/reduce_method_info.h | 4 +- .../frontend/parallel/ops_info/reluv2_info.h | 4 +- .../frontend/parallel/ops_info/reshape_info.h | 4 +- .../parallel/ops_info/resizebilinear_info.h | 2 +- .../parallel/ops_info/scatter_update_info.h | 2 +- .../frontend/parallel/ops_info/select_info.h | 2 +- .../frontend/parallel/ops_info/slice_info.h | 4 +- .../parallel/ops_info/strided_slice_info.h | 4 +- .../parallel/ops_info/tensordot_info.h | 4 +- .../frontend/parallel/ops_info/tile_info.h | 4 +- .../frontend/parallel/ops_info/topk_info.h | 2 +- .../parallel/ops_info/transpose_info.h | 4 +- .../parallel/ops_info/uniform_real_info.h | 2 +- .../frontend/parallel/ops_info/unique_info.h | 4 +- .../ops_info/unsorted_segment_op_info.h | 4 +- .../parallel/ops_info/virtual_dataset_info.h | 4 +- .../parallel/ops_info/virtual_output_info.h | 2 +- .../frontend/parallel/parameter_manager.cc | 4 +- .../pipeline_transformer.cc | 10 +- .../frontend/parallel/step_auto_parallel.cc | 10 +- .../ccsrc/frontend/parallel/step_parallel.cc | 10 +- .../ccsrc/frontend/parallel/step_parallel.h | 8 +- .../frontend/parallel/step_parallel_utils.cc | 2 +- .../parallel_strategy_checkpoint.h | 10 +- .../redistribution_operator_infer.cc | 16 +- .../redistribution_operator_infer.h | 6 +- .../pipeline/jit/parse/data_converter.cc | 18 +- .../ccsrc/pipeline/jit/parse/data_converter.h | 6 +- .../ccsrc/pipeline/jit/parse/function_block.h | 10 +- mindspore/ccsrc/pipeline/jit/parse/parse.cc | 8 +- .../ccsrc/pipeline/jit/parse/parse_dynamic.cc | 4 +- mindspore/ccsrc/pipeline/jit/pass.cc | 4 +- mindspore/ccsrc/pipeline/jit/pipeline.cc | 3 +- mindspore/ccsrc/pipeline/jit/pipeline.h | 2 +- mindspore/ccsrc/pipeline/jit/pipeline_ge.cc | 4 +- mindspore/ccsrc/pipeline/jit/pipeline_ge.h | 4 +- .../pipeline/jit/remove_value_node_dup.h | 6 +- mindspore/ccsrc/pipeline/jit/resource.cc | 2 +- mindspore/ccsrc/pipeline/jit/resource.h | 6 +- mindspore/ccsrc/pipeline/jit/resource_base.h | 8 +- .../jit/static_analysis/auto_monad.cc | 34 +- .../pipeline/jit/static_analysis/evaluator.cc | 2 +- .../pipeline/jit/static_analysis/evaluator.h | 3 +- .../jit/static_analysis/order_enforce.cc | 22 +- .../pipeline/jit/static_analysis/prim.cc | 49 +- .../ccsrc/pipeline/jit/static_analysis/prim.h | 4 +- .../jit/static_analysis/program_specialize.cc | 1 + .../jit/static_analysis/program_specialize.h | 13 +- .../jit/static_analysis/static_analysis.cc | 22 +- .../jit/static_analysis/static_analysis.h | 43 +- mindspore/ccsrc/pipeline/jit/validator.h | 4 +- mindspore/ccsrc/pipeline/pynative/base.h | 6 +- .../ccsrc/pipeline/pynative/pynative_cache.h | 34 +- .../pipeline/pynative/pynative_execute.cc | 37 +- .../pipeline/pynative/pynative_execute.h | 26 +- .../pipeline/pynative/pynative_execute_ge.cc | 8 +- .../pipeline/pynative/pynative_execute_ge.h | 6 +- .../pipeline/pynative/pynative_profiling.h | 1 - mindspore/ccsrc/profiler/device/profiling.h | 1 + mindspore/ccsrc/ps/core/abstract_node.cc | 4 +- mindspore/ccsrc/ps/core/abstract_node.h | 18 +- .../ps/core/communicator/communicator_base.h | 4 +- .../ps/core/communicator/http_communicator.h | 4 +- .../core/communicator/http_request_handler.cc | 2 +- .../core/communicator/http_request_handler.h | 4 +- .../ccsrc/ps/core/communicator/http_server.h | 9 +- .../ps/core/communicator/tcp_communicator.h | 4 +- mindspore/ccsrc/ps/core/configuration.h | 2 +- mindspore/ccsrc/ps/core/file_configuration.h | 2 +- mindspore/ccsrc/ps/core/node.cc | 10 +- mindspore/ccsrc/ps/core/node.h | 12 +- mindspore/ccsrc/ps/core/node_manager.cc | 6 +- mindspore/ccsrc/ps/core/node_manager.h | 24 +- mindspore/ccsrc/ps/core/scheduler_node.cc | 16 +- mindspore/ccsrc/ps/core/scheduler_node.h | 10 +- mindspore/ccsrc/ps/core/server_node.h | 4 +- mindspore/ccsrc/ps/parameter_server.cc | 2 +- mindspore/ccsrc/ps/parameter_server.h | 38 +- .../ccsrc/ps/ps_cache/embedding_hash_map.h | 8 +- mindspore/ccsrc/ps/util.cc | 10 +- mindspore/ccsrc/ps/util.h | 10 +- mindspore/ccsrc/ps/worker.cc | 8 +- mindspore/ccsrc/ps/worker.h | 12 +- mindspore/ccsrc/pybind_api/ir/primitive_py.h | 6 +- .../runtime/framework/actor/actor_common.h | 2 +- .../actor/control_flow/entrance_actor.h | 4 +- .../framework/actor/control_flow/exit_actor.h | 16 +- .../actor/control_flow/gather_actor.h | 7 +- .../actor/control_flow/stack_actor.h | 4 +- .../actor/control_flow/switch_actor.h | 1 - .../runtime/framework/actor/copy_actor.h | 2 +- .../framework/actor/data_prepare_actor.h | 2 +- .../framework/actor/data_source_actor.h | 4 +- .../runtime/framework/actor/kernel_actor.h | 2 +- .../framework/actor/loop_count_actor.h | 2 +- .../framework/actor/memory_manager_actor.h | 2 +- .../runtime/framework/actor/output_actor.h | 2 +- .../runtime/framework/control_node_parser.cc | 10 +- .../runtime/framework/control_node_parser.h | 32 +- .../runtime/framework/device_tensor_store.h | 4 +- .../ccsrc/runtime/framework/graph_compiler.h | 6 +- .../runtime/framework/graph_scheduler.cc | 6 +- .../ccsrc/runtime/framework/graph_scheduler.h | 6 +- .../transform/express_ir/mindir_exporter.cc | 10 +- .../transform/express_ir/onnx_exporter.cc | 18 +- mindspore/ccsrc/transform/graph_ir/convert.cc | 14 +- mindspore/ccsrc/transform/graph_ir/convert.h | 22 +- .../ccsrc/transform/graph_ir/io_format_map.cc | 4 +- .../ccsrc/transform/graph_ir/io_format_map.h | 6 +- .../ccsrc/transform/graph_ir/op_adapter.cc | 16 +- .../ccsrc/transform/graph_ir/op_adapter.h | 108 +- .../transform/graph_ir/op_adapter_base.h | 18 +- .../transform/graph_ir/op_adapter_map.cc | 10 +- .../ccsrc/transform/graph_ir/op_adapter_map.h | 6 +- .../graph_ir/op_declare/array_ops_declare.h | 4 +- .../op_declare/control_flow_ops_declare.h | 4 +- .../graph_ir/op_declare/ctc_ops_declare.h | 4 +- .../elewise_calculation_ops_declare.h | 2 +- .../op_declare/functional_ops_declare.h | 4 +- .../graph_ir/op_declare/hcom_ops_declare.h | 4 +- .../graph_ir/op_declare/image_ops_declare.h | 4 +- .../graph_ir/op_declare/logging_ops_declare.h | 4 +- .../graph_ir/op_declare/math_ops_declare.h | 2 +- .../matrix_calculation_ops_declare.h | 2 +- .../op_declare/nn_batch_norm_ops_declare.h | 4 +- .../op_declare/nn_calculation_ops_declare.h | 4 +- .../op_declare/nn_detect_ops_declare.h | 2 +- .../graph_ir/op_declare/nn_norm_ops_declare.h | 2 +- .../op_declare/nn_pooling_ops_declare.h | 2 +- .../op_declare/nn_training_ops_declare.h | 2 +- .../op_declare/nonlinear_fuc_ops_declare.h | 2 +- .../op_declare/npu_loss_scale_ops_declare.h | 4 +- .../graph_ir/op_declare/op_declare_macro.h | 38 +- .../graph_ir/op_declare/pad_ops_declare.h | 2 +- .../op_declare/quantize_ops_declare.h | 4 +- .../graph_ir/op_declare/random_ops_declare.h | 2 +- .../graph_ir/op_declare/reduce_ops_declare.h | 2 +- .../graph_ir/op_declare/rnn_declare.h | 2 +- .../graph_ir/op_declare/rpn_ops_declare.h | 4 +- .../op_declare/selection_ops_declare.h | 2 +- .../split_combination_ops_declare.h | 2 +- .../graph_ir/op_declare/state_ops_declare.h | 4 +- .../op_declare/transformation_ops_declare.h | 2 +- mindspore/ccsrc/transform/graph_ir/types.h | 6 +- mindspore/ccsrc/utils/convert_utils.cc | 4 +- mindspore/ccsrc/utils/convert_utils.h | 14 +- mindspore/ccsrc/vm/backend.h | 8 +- mindspore/ccsrc/vm/segment_runner.cc | 13 +- mindspore/ccsrc/vm/segment_runner.h | 4 +- mindspore/ccsrc/vm/transform.h | 8 +- mindspore/ccsrc/vm/vm.h | 4 +- mindspore/ccsrc/vm/vmimpl.h | 10 +- mindspore/core/abstract/abstract_value.cc | 5 +- mindspore/core/abstract/abstract_value.h | 8 +- mindspore/core/abstract/analysis_context.h | 9 +- mindspore/core/abstract/dshape.h | 1 - .../core/abstract/primitive_infer_map.cc | 414 +-- mindspore/core/abstract/primitive_infer_map.h | 7 +- mindspore/core/api/ir/func_graph_manager.h | 4 +- mindspore/core/base/base.h | 1 - mindspore/core/base/core_ops.h | 4 +- mindspore/core/base/user_data.h | 4 +- mindspore/core/ir/anf.cc | 4 +- mindspore/core/ir/anf.h | 27 +- mindspore/core/ir/anf_extends.cc | 4 +- mindspore/core/ir/cell.cc | 4 +- mindspore/core/ir/cell.h | 8 +- mindspore/core/ir/dtype.cc | 4 +- mindspore/core/ir/dtype.h | 1 - mindspore/core/ir/dtype/container.cc | 4 +- mindspore/core/ir/dtype/container.h | 15 +- mindspore/core/ir/dtype/empty.h | 3 +- mindspore/core/ir/dtype/number.h | 3 +- mindspore/core/ir/dtype/tensor_type.h | 3 +- mindspore/core/ir/dtype/type.cc | 4 +- mindspore/core/ir/dtype/type.h | 6 +- mindspore/core/ir/dtype_extends.cc | 4 +- mindspore/core/ir/func_graph.cc | 2 +- mindspore/core/ir/func_graph.h | 33 +- mindspore/core/ir/func_graph_cloner.cc | 6 +- mindspore/core/ir/func_graph_cloner.h | 18 +- mindspore/core/ir/func_graph_extends.cc | 12 +- mindspore/core/ir/graph_utils.cc | 6 +- mindspore/core/ir/graph_utils.h | 6 +- mindspore/core/ir/graph_utils_extends.cc | 6 +- mindspore/core/ir/manager.cc | 46 +- mindspore/core/ir/manager.h | 6 +- mindspore/core/ir/meta_func_graph.h | 4 +- mindspore/core/ir/primal_attr.h | 10 +- mindspore/core/ir/primal_debug_info.h | 2 +- mindspore/core/ir/primitive.cc | 6 +- mindspore/core/ir/primitive.h | 18 +- .../core/load_mindir/anf_model_parser.cc | 32 +- mindspore/core/load_mindir/anf_model_parser.h | 10 +- .../core/mindrt/include/actor/op_actor.h | 6 +- .../core/mindrt/include/actor/switch_actor.h | 2 +- mindspore/core/ops/custom.h | 1 - mindspore/core/utils/counter.h | 4 +- mindspore/core/utils/hash_map.h | 38 + mindspore/core/utils/hash_set.h | 38 + .../core/utils/interpret_node_recorder.h | 5 +- mindspore/core/utils/label.h | 4 +- mindspore/core/utils/ms_utils.h | 13 +- mindspore/core/utils/ordered_map.h | 8 +- mindspore/core/utils/ordered_set.h | 6 +- mindspore/core/utils/overload.h | 10 +- mindspore/core/utils/symbolic.cc | 9 +- mindspore/core/utils/symbolic.h | 6 +- mindspore/lite/cmake/file_list.cmake | 4 +- scripts/build/build_mindspore.sh | 5 + scripts/build/default_options.sh | 1 + scripts/build/option_proc_mindspore.sh | 8 +- scripts/build/process_options.sh | 6 +- scripts/build/usage.sh | 3 +- tests/ut/cpp/abstract/abstract_test.cc | 4 +- tests/ut/cpp/operator/ops_test.cc | 2 +- tests/ut/cpp/optimizer/clean_test.cc | 8 +- .../parallel/auto_parallel/dp_algo_test.cc | 80 +- .../auto_parallel/edge_costmodel_test.cc | 12 +- .../auto_parallel/graph_costmodel_test.cc | 14 +- .../parallel/ops_info/activation_info_test.cc | 4 +- .../cpp/parallel/ops_info/activation_test.cc | 6 +- .../cpp/parallel/ops_info/gelu_info_test.cc | 4 +- .../ops_info/l2_normalize_info_test.cc | 4 +- .../ops_info/log_softmax_info_test.cc | 4 +- .../cpp/parallel/ops_info/matmul_info_test.cc | 10 +- .../cpp/parallel/ops_info/onehot_info_test.cc | 4 +- .../ops_info/onehot_info_test_axis_0.cc | 4 +- .../ut/cpp/parallel/ops_info/pow_info_test.cc | 4 +- tests/ut/cpp/parallel/ops_info/prelu_test.cc | 6 +- .../parallel/ops_info/reduce_method_test.cc | 4 +- .../ut/cpp/parallel/ops_info/reshape_test.cc | 4 +- .../softmax_entropy_loss_info_test.cc | 4 +- .../parallel/ops_info/softmax_info_test.cc | 6 +- .../cpp/parallel/ops_info/tanh_info_test.cc | 4 +- .../parallel/ops_info/tensor_add_info_test.cc | 4 +- .../cpp/parallel/ops_info/tmpidentity_test.cc | 4 +- .../cpp/parallel/ops_info/transpose_test.cc | 4 +- tests/ut/cpp/parallel/step_parallel_test.cc | 6 +- .../tensor_layout/construct_operator_test.cc | 4 +- tests/ut/cpp/parallel/virtual_dataset_test.cc | 4 +- tests/ut/cpp/pipeline/resource_test.cc | 4 +- .../cpp/pipeline/static_analysis/data_test.cc | 4 +- .../ascend/ir_fission/topk_split_test.cc | 4 +- tests/ut/cpp/transform/op_adapter_test.cc | 6 +- third_party/robin_hood/LICENSE | 21 + third_party/robin_hood/README.md | 72 + .../include/robin_hood/robin_hood.h | 2529 +++++++++++++++++ 452 files changed, 4687 insertions(+), 1965 deletions(-) create mode 100644 mindspore/core/utils/hash_map.h create mode 100644 mindspore/core/utils/hash_set.h create mode 100644 third_party/robin_hood/LICENSE create mode 100644 third_party/robin_hood/README.md create mode 100644 third_party/robin_hood/include/robin_hood/robin_hood.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 90180706888..0824f804003 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,6 +57,11 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/securec/include) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/flatbuffers/include) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/flatbuffers/include/flatbuffers) +if(ENABLE_FAST_HASH_TABLE) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DENABLE_FAST_HASH_TABLE=1") +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/robin_hood/include) +endif() + include(${CMAKE_SOURCE_DIR}/cmake/dependency_utils.cmake) find_package(Python3 COMPONENTS Interpreter Development) if(Python3_FOUND) diff --git a/cmake/options.cmake b/cmake/options.cmake index d94fc1d64c3..06dcffdd468 100644 --- a/cmake/options.cmake +++ b/cmake/options.cmake @@ -27,6 +27,7 @@ option(MODE_ASCEND_ALL "supports all ascend platform" OFF) option(MODE_ASCEND_ACL "supports ascend acl mode only" OFF) option(ENABLE_SYM_FILE "enable sym file" OFF) option(BUILD_DEV_MODE "MindSpore build nightly dev mode" OFF) +option(ENABLE_FAST_HASH_TABLE "Enable use fast hash table instead of std ones" ON) if(CMAKE_SYSTEM_NAME MATCHES "Darwin") diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/batchmatmul_fusedmuladd_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/batchmatmul_fusedmuladd_fusion_pass.cc index 24c42221fab..1cf319b462f 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/batchmatmul_fusedmuladd_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/batchmatmul_fusedmuladd_fusion_pass.cc @@ -30,7 +30,7 @@ void BatchMatmulFusedMulAddFusionPass::MatchBatchMatmulFusedMulAdd(const CNodePt auto batch_matmul = cnode->input(kIndex2); MS_EXCEPTION_IF_NULL(batch_matmul); if (batch_matmul->isa() && AnfAlgo::CheckPrimitiveType(batch_matmul, prim::kPrimBatchMatMul)) { - std::unordered_set record{cnode, batch_matmul}; + mindspore::HashSet record{cnode, batch_matmul}; candidate_fusion->push_back(record); SetRecordFusionId(record); } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/batchmatmul_fusedmuladd_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/batchmatmul_fusedmuladd_fusion_pass.h index 32ae07ea3b0..96144f2ae57 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/batchmatmul_fusedmuladd_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/batchmatmul_fusedmuladd_fusion_pass.h @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BATCHMATMUL_FUSEDMULADD_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BATCHMATMUL_FUSEDMULADD_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class BatchMatmulFusedMulAddFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.cc index f928865c80d..2a26fd89a50 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,9 +15,9 @@ */ #include "backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.h" #include -#include #include #include +#include "utils/hash_set.h" #include "backend/kernel_compiler/kernel_fusion.h" #include "debug/anf_ir_dump.h" #include "backend/session/anf_runtime_algorithm.h" @@ -45,7 +45,7 @@ void BnupdateEltwiseEltwiseFusionPass::MatchBnupdateAddRelu(const CNodePtr &cnod MS_EXCEPTION_IF_NULL(bnupdate); if (bnupdate->isa() && AnfAlgo::GetCNodeName(bnupdate) == kBNTrainingUpdateOpName && GetNodeOutputTotalUsedNum(kernel_graph, bnupdate) == kBNTrainingUpdateOutputUsedTotalNum) { - std::unordered_set record{cnode, relu_input, bnupdate}; + mindspore::HashSet record{cnode, relu_input, bnupdate}; candidate_fusion->push_back(record); SetRecordFusionId(record); } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.h index 6a586241ae9..d3e3d8a158d 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BNUPDATE_ELTWISE_ELTWISE_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BNUPDATE_ELTWISE_ELTWISE_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class BnupdateEltwiseEltwiseFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_fusion_pass.cc index 784725539b9..715ef46617d 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ void BnupdateEltwiseFusionPass::MatchBnupdateDoubleOutputEltwise(const CNodePtr MS_EXCEPTION_IF_NULL(bnupdate); if (bnupdate->isa() && AnfAlgo::GetCNodeName(bnupdate) == kBNTrainingUpdateOpName && GetNodeOutputTotalUsedNum(kernel_graph, bnupdate) == kBNTrainingUpdateOutputUsedTotalNum) { - std::unordered_set record{cnode, bnupdate}; + mindspore::HashSet record{cnode, bnupdate}; candidate_fusion->push_back(record); SetRecordFusionId(record); } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_fusion_pass.h index 0314b4c1005..8a8c49c5db8 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BNUPDATE_ELTWISE_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BNUPDATE_ELTWISE_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class BnupdateEltwiseFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_eltwise_fusion_pass.cc index c3cc2466784..4a156f8081e 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_eltwise_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ void Conv2DBackpropEltwiseEltwiseFusionPass::MatchConv2DBackpropInputEltwiseEltw const CNodePtr &cnode, const session::KernelGraph &kernel_graph, FusedNodeRecord *candidate_fusion) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(candidate_fusion); - std::unordered_set record{cnode}; + mindspore::HashSet record{cnode}; auto eltwise_input = cnode->input(kIndex1); MS_EXCEPTION_IF_NULL(eltwise_input); if (CheckDoubleInEltWiseNode(kernel_graph, eltwise_input)) { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_eltwise_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_eltwise_fusion_pass.h index 8722327c9be..440a2d96104 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_eltwise_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_eltwise_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV2DBACKPROP_ELTWISE_ELTWISE_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV2DBACKPROP_ELTWISE_ELTWISE_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class Conv2DBackpropEltwiseEltwiseFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_fusion_pass.cc index 7b3cd90b92a..9aef8c35a94 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ void Conv2DBackpropEltwiseFusionPass::MatchConv2DBackpropInputEltwise(const CNod FusedNodeRecord *candidate_fusion) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(candidate_fusion); - std::unordered_set record{cnode}; + mindspore::HashSet record{cnode}; auto eltwise_input = cnode->input(kIndex1); MS_EXCEPTION_IF_NULL(eltwise_input); if (!eltwise_input->isa() || !AnfUtils::IsRealCNodeKernel(eltwise_input) || diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_fusion_pass.h index 3c5c98f067f..58de9e4f675 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv2dbackprop_eltwise_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV2DBACKPROP_ELTWISE_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV2DBACKPROP_ELTWISE_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class Conv2DBackpropEltwiseFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_bnreduce_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_bnreduce_fusion_pass.cc index f61033dc310..81203f3144b 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_bnreduce_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_bnreduce_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -32,7 +32,7 @@ void ConvBnReduceFusionPass::MatchConvBnreduce(const CNodePtr &cnode, const sess MS_EXCEPTION_IF_NULL(conv); if (conv->isa() && AnfAlgo::GetCNodeName(conv) == prim::kPrimConv2D->name() && GetNodeOutputTotalUsedNum(kernel_graph, conv) == kConvOutputUsedTotalNum) { - std::unordered_set record{cnode, conv}; + mindspore::HashSet record{cnode, conv}; candidate_fusion->push_back(record); SetRecordFusionId(record); } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_bnreduce_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_bnreduce_fusion_pass.h index 5dbfcc88986..aaaf13c0fe1 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_bnreduce_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_bnreduce_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_CONV_BNREDUCE_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_CONV_BNREDUCE_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class ConvBnReduceFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_double_in_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_double_in_fusion_pass.cc index fd1607c8453..ba0f1c63d5e 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_double_in_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_double_in_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ void ConvDoubleInFusionPass::MatchConvDoubleInEltwise(const CNodePtr &cnode, con FusedNodeRecord *candidate_fusion) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(candidate_fusion); - std::unordered_set record{cnode}; + mindspore::HashSet record{cnode}; auto eltwise_input = cnode->input(kIndex1); MS_EXCEPTION_IF_NULL(eltwise_input); if (CheckDoubleInEltWiseNode(kernel_graph, eltwise_input)) { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_double_in_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_double_in_fusion_pass.h index dbeae261701..32d79d367dc 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_double_in_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_double_in_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV_DOUBLE_IN_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV_DOUBLE_IN_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class ConvDoubleInFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.cc index c073b288362..fd82de784d4 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,8 +15,8 @@ */ #include "backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.h" #include -#include #include +#include "utils/hash_set.h" #include "backend/kernel_compiler/kernel_fusion.h" #include "debug/anf_ir_dump.h" #include "backend/session/anf_runtime_algorithm.h" @@ -30,7 +30,7 @@ void ConvSingleInFusionPass::MatchConvSingleInEltwise(const CNodePtr &cnode, con FusedNodeRecord *candidate_fusion) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(candidate_fusion); - std::unordered_set record{cnode}; + mindspore::HashSet record{cnode}; auto eltwise_input = cnode->input(kIndex1); while (CheckEltWiseNode(kernel_graph, eltwise_input)) { (void)record.insert(eltwise_input); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.h index 202ddb0a7a6..b3b7e85026a 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV_SINGLE_IN_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV_SINGLE_IN_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class ConvSingleInFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/depthwiseconv_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/depthwiseconv_eltwise_fusion_pass.cc index 47d8457346a..6409a129572 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/depthwiseconv_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/depthwiseconv_eltwise_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -33,7 +33,7 @@ void DepthwiseConvEltwiseFusionPass::MatchDepthwiseConvRelu(const CNodePtr &cnod auto depthwise_conv = cnode->input(kIndex1); MS_EXCEPTION_IF_NULL(depthwise_conv); if (cnode->isa() && IsPrimitiveCNode(depthwise_conv, prim::kPrimDepthwiseConv2dNative)) { - std::unordered_set record{cnode, depthwise_conv}; + mindspore::HashSet record{cnode, depthwise_conv}; candidate_fusion->push_back(record); SetRecordFusionId(record); } @@ -42,7 +42,7 @@ void DepthwiseConvEltwiseFusionPass::MatchDepthwiseConvRelu(const CNodePtr &cnod auto relu = cnode->input(kIndex1); MS_EXCEPTION_IF_NULL(relu); if (cnode->isa() && (IsPrimitiveCNode(relu, prim::kPrimRelu) || IsPrimitiveCNode(relu, prim::kPrimReluV2))) { - std::unordered_set record{cnode, relu}; + mindspore::HashSet record{cnode, relu}; candidate_fusion->push_back(record); SetRecordFusionId(record); } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/depthwiseconv_eltwise_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/depthwiseconv_eltwise_fusion_pass.h index 43652f6057b..e5117b40bed 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/depthwiseconv_eltwise_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/depthwiseconv_eltwise_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_DEPTHWISECONV_ELTWISE_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_DEPTHWISECONV_ELTWISE_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class DepthwiseConvEltwiseFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/eltwise_fusion_pass.cc index 1d44224e8e8..66091145403 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/eltwise_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ void EltwiseFusionPass::MatchEltwise(const CNodePtr &cnode, const session::Kerne FusedNodeRecord *candidate_fusion) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(candidate_fusion); - std::unordered_set record{cnode}; + mindspore::HashSet record{cnode}; auto eltwise_input = cnode->input(kIndex1); MS_EXCEPTION_IF_NULL(eltwise_input); while (CheckEltWiseNode(kernel_graph, eltwise_input)) { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/eltwise_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/eltwise_fusion_pass.h index e0e8b0c5364..6851d1f940b 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/eltwise_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/eltwise_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_ELTWISE_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_ELTWISE_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class EltwiseFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/fusion_base_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/fusion_base_pass.cc index 68ca856d0f6..6180ff1860a 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/fusion_base_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/fusion_base_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -81,7 +81,7 @@ size_t FusionBasePass::GetNotUpdateStateUserNums(const session::KernelGraph &ker return not_updatestate_users; } -void FusionBasePass::SetRecordFusionId(const std::unordered_set &record) { +void FusionBasePass::SetRecordFusionId(const mindspore::HashSet &record) { auto id = fusion_id_allocator->AllocateFusionId(); for (auto node : record) { fusion_id_allocator->SetFusionId(node, id); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h index 2f7e0809cd9..12ae5cc4ebd 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,11 +15,11 @@ */ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_FUSION_BASE_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_FUSION_BASE_PASS_H_ -#include -#include #include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" #include "backend/optimizer/common/fusion_id_allocator.h" @@ -45,7 +45,7 @@ const int8_t MULTI_ELTWISE_SIZE = 4; constexpr int64_t kBNTrainingUpdateOutputUsedTotalNum = 5; constexpr int64_t kConvOutputUsedTotalNum = 4; -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; struct BufferFusionInfo_t { std::string full_name; @@ -66,7 +66,7 @@ class FusionBasePass : public PassWithSwitch { bool RunPass(const FuncGraphPtr &graph) override; virtual void MatchSingleFusionPattern(const session::KernelGraph &kernel_graph, FusedNodeRecord *candidate_fusion) = 0; - void SetRecordFusionId(const std::unordered_set &record); + void SetRecordFusionId(const mindspore::HashSet &record); bool CheckEltWiseNode(const session::KernelGraph &kernel_graph, const AnfNodePtr &node); bool CheckDoubleInEltWiseNode(const session::KernelGraph &kernel_graph, const AnfNodePtr &node); bool CheckMultiOutputEltWiseNode(const session::KernelGraph &kernel_graph, const AnfNodePtr &node); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_confusiontranspose_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_confusiontranspose_fusion_pass.cc index 0b3735f41bb..1e32beecbba 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_confusiontranspose_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_confusiontranspose_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -31,7 +31,7 @@ void MatmulConfusionTranposeFusionPass::MatchMatmulConfusionTranpose(const CNode MS_EXCEPTION_IF_NULL(matmul); if (matmul->isa() && (AnfAlgo::CheckPrimitiveType(matmul, prim::kPrimMatMul) || AnfAlgo::CheckPrimitiveType(matmul, prim::kPrimBatchMatMul))) { - std::unordered_set record{cnode, matmul}; + mindspore::HashSet record{cnode, matmul}; candidate_fusion->push_back(record); SetRecordFusionId(record); } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_confusiontranspose_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_confusiontranspose_fusion_pass.h index b1174d98084..4544ec58346 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_confusiontranspose_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_confusiontranspose_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_CONFUSIONTRANSPOSE_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_CONFUSIONTRANSPOSE_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class MatmulConfusionTranposeFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_eltwise_fusion_pass.cc index 82670236fa1..08baaf8b0b3 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_eltwise_fusion_pass.cc @@ -30,7 +30,7 @@ void MatmulEltwiseFusionPass::MatchMatmulEltwise(const CNodePtr &cnode, const An if (fusion_id_allocator->HasFusionIdAttr(relu_input)) { return; } - std::unordered_set record{cnode, relu_input}; + mindspore::HashSet record{cnode, relu_input}; candidate_fusion->push_back(record); SetRecordFusionId(record); } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_eltwise_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_eltwise_fusion_pass.h index 9c709f2ccd2..2581f90b6ba 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_eltwise_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/matmul_eltwise_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_ELTWISE_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_ELTWISE_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class MatmulEltwiseFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/multi_output_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/multi_output_fusion_pass.cc index 734ecda578c..ab9004aa0cf 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/multi_output_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/multi_output_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ void MultiOutputFusionPass::MatchMultiOutputEltwise(const CNodePtr &cnode, const FusedNodeRecord *candidate_fusion) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(candidate_fusion); - std::unordered_set record{cnode}; + mindspore::HashSet record{cnode}; auto eltwise_input = cnode->input(kIndex1); MS_EXCEPTION_IF_NULL(eltwise_input); if (CheckMultiOutputEltWiseNode(kernel_graph, eltwise_input)) { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/multi_output_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/multi_output_fusion_pass.h index 071be0f2ebc..285e4819260 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/multi_output_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/multi_output_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MULTI_OUTPUT_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MULTI_OUTPUT_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class MultiOutputFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/reduce_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/reduce_eltwise_fusion_pass.cc index 9a57b344ea2..4b431926be7 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/reduce_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/reduce_eltwise_fusion_pass.cc @@ -28,7 +28,7 @@ void ReduceEltwiseFusionPass::MatchReduceEltwise(const CNodePtr &cnode, const se FusedNodeRecord *candidate_fusion) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(candidate_fusion); - std::unordered_set record{cnode}; + mindspore::HashSet record{cnode}; auto eltwise_input = cnode->input(kIndex1); while (CheckEltWiseNode(kernel_graph, eltwise_input)) { (void)record.insert(eltwise_input); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/reduce_eltwise_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/reduce_eltwise_fusion_pass.h index f238f425f56..3f86a437ee0 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/reduce_eltwise_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/reduce_eltwise_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_REDUCE_ELTWISE_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_REDUCE_ELTWISE_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class ReduceEltwiseFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/segment_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/segment_eltwise_fusion_pass.cc index 8263439f3eb..3e35ba4c7b5 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/segment_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/segment_eltwise_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ void SegmentEltwiseFusionPass::MatchSegmentEltwise(const CNodePtr &cnode, const FusedNodeRecord *candidate_fusion) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(candidate_fusion); - std::unordered_set record{cnode}; + mindspore::HashSet record{cnode}; auto eltwise_input = cnode->input(kIndex1); while (CheckEltWiseNode(kernel_graph, eltwise_input)) { (void)record.insert(eltwise_input); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/segment_eltwise_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/segment_eltwise_fusion_pass.h index 011f1349966..20290e00397 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/segment_eltwise_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/segment_eltwise_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_SEGMENT_ELTWISE_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_SEGMENT_ELTWISE_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class SegmentEltwiseFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/stridedread_conv_stridedwrite_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/stridedread_conv_stridedwrite_fusion_pass.cc index d9adbcba764..97ef1354cdb 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/stridedread_conv_stridedwrite_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/stridedread_conv_stridedwrite_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ void StridedReadConvStridedWriteFusionPass::MatchStridedReadConvStridedWrite(con FusedNodeRecord *candidate_fusion) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(candidate_fusion); - std::unordered_set record{cnode}; + mindspore::HashSet record{cnode}; auto write_input = cnode->input(kIndex1); if (CheckEltWiseNode(kernel_graph, write_input)) { (void)record.insert(write_input); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/stridedread_conv_stridedwrite_fusion_pass.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/stridedread_conv_stridedwrite_fusion_pass.h index 419e7012742..11168c63c82 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/stridedread_conv_stridedwrite_fusion_pass.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/stridedread_conv_stridedwrite_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_STRIDEDREAD_CONV_STRIDEDWRITE_FUSION_PASS_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_STRIDEDREAD_CONV_STRIDEDWRITE_FUSION_PASS_H_ -#include #include +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -29,7 +29,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class StridedReadConvStridedWriteFusionPass : public FusionBasePass { public: diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.cc index 20bdfebbbe1..fe02298a965 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.cc @@ -16,10 +16,10 @@ #include "backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.h" #include #include -#include #include #include #include +#include "utils/hash_map.h" #include "backend/kernel_compiler/tbe/tbe_kernel_compile.h" #include "backend/kernel_compiler/tbe/tbe_utils.h" #include "debug/anf_ir_dump.h" @@ -163,7 +163,7 @@ AnfNodePtr CreateTupleGetItem(const AnfNodePtr &buffer_fusion_kernel, session::K return tuple_item; } -void ReplaceInputNodeInOtherFusionScope(std::unordered_map *buffer_fusion_infos, +void ReplaceInputNodeInOtherFusionScope(mindspore::HashMap *buffer_fusion_infos, int64_t fusion_id, const AnfNodePtr &output_item, const AnfNodePtr &replace_item) { for (int64_t id = fusion_id + 1; id <= SizeToLong(buffer_fusion_infos->size()); ++id) { @@ -176,7 +176,7 @@ void ReplaceInputNodeInOtherFusionScope(std::unordered_map *buffer_fusion_infos, int64_t fusion_id, +void ReplaceOldNode(mindspore::HashMap *buffer_fusion_infos, int64_t fusion_id, const AnfNodePtr &buffer_fusion_kernel, session::KernelGraph *kernel_graph) { MS_EXCEPTION_IF_NULL(kernel_graph); MS_EXCEPTION_IF_NULL(buffer_fusion_infos); @@ -204,7 +204,7 @@ void ReplaceOldNode(std::unordered_map *buffer_fusi } void GetFusionScopeComputeNodeList(session::KernelGraph *kernel_graph, - std::unordered_map *buffer_fusion_infos) { + mindspore::HashMap *buffer_fusion_infos) { MS_EXCEPTION_IF_NULL(buffer_fusion_infos); MS_EXCEPTION_IF_NULL(kernel_graph); auto nodes = TopoSort(kernel_graph->get_return()); @@ -222,7 +222,7 @@ void GetFusionScopeComputeNodeList(session::KernelGraph *kernel_graph, } void GetFusionScopeInputNodeList(const session::KernelGraph &kernel_graph, - std::unordered_map *buffer_fusion_infos) { + mindspore::HashMap *buffer_fusion_infos) { MS_EXCEPTION_IF_NULL(buffer_fusion_infos); auto manager = kernel_graph.manager(); MS_EXCEPTION_IF_NULL(manager); @@ -283,7 +283,7 @@ AnfNodePtr RemoveNodeFromUpdateState(session::KernelGraph *kernel_graph, const A } void GetFusionScopeOutputNodeList(session::KernelGraph *kernel_graph, - std::unordered_map *buffer_fusion_infos) { + mindspore::HashMap *buffer_fusion_infos) { MS_EXCEPTION_IF_NULL(kernel_graph); MS_EXCEPTION_IF_NULL(buffer_fusion_infos); auto manager = kernel_graph->manager(); @@ -349,7 +349,7 @@ void GetFusionScopeOutputNodeList(session::KernelGraph *kernel_graph, } void SetOutputUsedNumAttr(const session::KernelGraph &kernel_graph, - const std::unordered_map &buffer_fusion_infos) { + const mindspore::HashMap &buffer_fusion_infos) { for (auto &fusion_info : buffer_fusion_infos) { auto &fusion_nodes = fusion_info.second.anf_nodes; for (auto iter = fusion_nodes.begin(); iter != fusion_nodes.end() - 1; ++iter) { @@ -408,7 +408,7 @@ bool CheckCircle(const session::KernelGraph &kernel_graph, const BufferFusionInf } void RemoveCircle(const session::KernelGraph &kernel_graph, - std::unordered_map *buffer_fusion_infos) { + mindspore::HashMap *buffer_fusion_infos) { MS_EXCEPTION_IF_NULL(buffer_fusion_infos); std::vector fusion_ids; for (auto &[fusion_id, fusion_info] : *buffer_fusion_infos) { @@ -425,7 +425,7 @@ void RemoveCircle(const session::KernelGraph &kernel_graph, } // namespace void UbPatternFusion::GetBufferFusionInfo(session::KernelGraph *kernel_graph, - std::unordered_map *buffer_fusion_infos) const { + mindspore::HashMap *buffer_fusion_infos) const { MS_EXCEPTION_IF_NULL(buffer_fusion_infos); MS_EXCEPTION_IF_NULL(kernel_graph); GetFusionScopeComputeNodeList(kernel_graph, buffer_fusion_infos); @@ -449,17 +449,17 @@ void UbPatternFusion::GetBufferFusionInfo(session::KernelGraph *kernel_graph, bool UbPatternFusion::FuseBufferFusionPattern(session::KernelGraph *kernel_graph) const { MS_EXCEPTION_IF_NULL(kernel_graph); bool change = false; - std::unordered_map buffer_fusion_infos; + mindspore::HashMap buffer_fusion_infos; GetBufferFusionInfo(kernel_graph, &buffer_fusion_infos); std::vector fusion_scope_infos; - std::transform( - buffer_fusion_infos.begin(), buffer_fusion_infos.end(), std::back_inserter(fusion_scope_infos), - [](const std::pair &buffer_fusion_info) -> mindspore::kernel::FusionScopeInfo { - return mindspore::kernel::FusionScopeInfo( - buffer_fusion_info.first, buffer_fusion_info.second.full_name, buffer_fusion_info.second.inputs_list, - buffer_fusion_info.second.anf_nodes, buffer_fusion_info.second.outputs_list); - }); + std::transform(buffer_fusion_infos.begin(), buffer_fusion_infos.end(), std::back_inserter(fusion_scope_infos), + [](const auto &buffer_fusion_info) -> mindspore::kernel::FusionScopeInfo { + return mindspore::kernel::FusionScopeInfo( + buffer_fusion_info.first, buffer_fusion_info.second.full_name, + buffer_fusion_info.second.inputs_list, buffer_fusion_info.second.anf_nodes, + buffer_fusion_info.second.outputs_list); + }); auto &build_manager = kernel::ascend::TbeKernelCompileManager::GetInstance(); auto id_names = build_manager.TbeFusionOpCompile(fusion_scope_infos); std::set fusion_ids; @@ -486,7 +486,7 @@ bool UbPatternFusion::FuseBufferFusionPattern(session::KernelGraph *kernel_graph return change; } -bool UbPatternFusion::ReplaceFusionOp(std::unordered_map *buffer_fusion_infos, +bool UbPatternFusion::ReplaceFusionOp(mindspore::HashMap *buffer_fusion_infos, int64_t fusion_id, session::KernelGraph *kernel_graph) const { MS_EXCEPTION_IF_NULL(buffer_fusion_infos); auto buffer_fusion_info = (*buffer_fusion_infos)[fusion_id]; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.h b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.h index e7c218ddefb..c9fba56c834 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,11 +15,11 @@ */ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_UB_PATTERN_FUSION_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_UB_PATTERN_FUSION_H_ -#include -#include #include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h" #include "ir/anf.h" #include "backend/optimizer/common/pass.h" @@ -30,7 +30,7 @@ namespace mindspore { namespace opt { -using FusedNodeRecord = std::vector>; +using FusedNodeRecord = std::vector>; class UbPatternFusion : public PassWithSwitch { public: @@ -41,8 +41,8 @@ class UbPatternFusion : public PassWithSwitch { bool RunPass(const FuncGraphPtr &graph) override; void GetBufferFusionInfo(session::KernelGraph *kernel_graph, - std::unordered_map *buffer_fusion_infos) const; - bool ReplaceFusionOp(std::unordered_map *buffer_fusion_infos, int64_t fusion_id, + mindspore::HashMap *buffer_fusion_infos) const; + bool ReplaceFusionOp(mindspore::HashMap *buffer_fusion_infos, int64_t fusion_id, session::KernelGraph *kernel_graph) const; bool FuseBufferFusionPattern(session::KernelGraph *kernel_graph) const; }; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_cast_format.cc b/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_cast_format.cc index 2bf66eccd34..a2514ca65b9 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_cast_format.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_cast_format.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,10 +17,10 @@ #include #include -#include #include #include +#include "utils/hash_map.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/optimizer/common/helper.h" namespace mindspore { @@ -79,7 +79,7 @@ void ConvertCastFormat::ChangeCastFormat(const CNodePtr &cast_node, const FuncGr AnfAlgo::SetNodeAttr(kAttrVisited, MakeValue(true), cast_node); auto used_cast_node_list = GetRealNodeUsedList(func_graph, cast_node); MS_EXCEPTION_IF_NULL(used_cast_node_list); - std::unordered_map format_counter = CalculateFormat(used_cast_node_list, cast_node); + mindspore::HashMap format_counter = CalculateFormat(used_cast_node_list, cast_node); auto cast_input_format = AnfAlgo::GetPrevNodeOutputFormat(cast_node, 0); string convert_format = kOpFormat_DEFAULT; if (cast_input_format == kOpFormat_DEFAULT) { @@ -109,12 +109,12 @@ void ConvertCastFormat::ChangeCastFormat(const CNodePtr &cast_node, const FuncGr } } -std::unordered_map ConvertCastFormat::CalculateFormat( +mindspore::HashMap ConvertCastFormat::CalculateFormat( const std::shared_ptr>> &used_cast_node_list, const CNodePtr &cast_node) const { MS_EXCEPTION_IF_NULL(used_cast_node_list); MS_EXCEPTION_IF_NULL(cast_node); - std::unordered_map format_counter; + mindspore::HashMap format_counter; for (const auto &node_info : *used_cast_node_list) { MS_EXCEPTION_IF_NULL(node_info.first); auto cast_out_node = node_info.first->cast(); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_cast_format.h b/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_cast_format.h index 31b046037dc..3169fea27ae 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_cast_format.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/format_type/convert_cast_format.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,11 +17,11 @@ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_FORMAT_TYPE_CONVERT_CAST_FORMAT_H_ #include -#include #include #include #include +#include "utils/hash_map.h" #include "backend/optimizer/common/optimizer.h" namespace mindspore { @@ -34,7 +34,7 @@ class ConvertCastFormat : public PatternProcessPass { const AnfNodePtr Process(const FuncGraphPtr &func_graph, const AnfNodePtr &, const EquivPtr &) const override; private: - std::unordered_map CalculateFormat( + mindspore::HashMap CalculateFormat( const std::shared_ptr>> &used_cast_node_list, const CNodePtr &cast_node) const; void ChangeCastFormat(const CNodePtr &cast_node, const FuncGraphPtr &func_graph) const; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/topk_split.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/topk_split.cc index 0aa4c3b5f1d..6797dbaea25 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/topk_split.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/topk_split.cc @@ -17,7 +17,7 @@ #include #include #include -#include +#include "utils/hash_set.h" #include "backend/optimizer/common/helper.h" #include "backend/kernel_compiler/kernel_build_info.h" #include "utils/utils.h" @@ -164,7 +164,7 @@ const AnfNodePtr TopKSplit::Process(const FuncGraphPtr &func_graph, const AnfNod auto new_value_node = std::make_shared(MakeValue(*data)); new_cnode->set_input(kTopkIndexK + 1, new_value_node); - std::unordered_set attr_index{kTopkIndexK}; + mindspore::HashSet attr_index{kTopkIndexK}; ConstInputToAttr(new_cnode, attr_index); auto indices_const = CreateValueNode(); new_cnode->add_input(indices_const); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/input_to_output_registry.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/input_to_output_registry.cc index 739fedfb299..c3393bc83fe 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/input_to_output_registry.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/input_to_output_registry.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -95,7 +95,7 @@ InputToOutputRegistry &InputToOutputRegistry::Instance() { void InputToOutputRegistry::Register(const InputToOutputRegister ®) { auto op_name = reg.op_name(); if (op_input_to_output_map_.find(op_name) == op_input_to_output_map_.end()) { - (void)op_input_to_output_map_.insert(make_pair(op_name, reg)); + (void)op_input_to_output_map_.emplace(op_name, reg); MS_LOG(DEBUG) << op_name << " input2output register successfully!"; } } @@ -105,7 +105,7 @@ void InputToOutputRegistry::Register(const std::string &op_name, const std::vect if (op_input_to_output_map_.find(op_name) == op_input_to_output_map_.end()) { InputToOutputRegister reg(op_name, pre_check_func); reg.set_input_indices(input_indices); - (void)op_input_to_output_map_.insert(make_pair(op_name, reg)); + (void)op_input_to_output_map_.emplace(op_name, reg); MS_LOG(DEBUG) << op_name << " input2output register successfully!"; } } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/input_to_output_registry.h b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/input_to_output_registry.h index b38f89c761d..c516ea07518 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/input_to_output_registry.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/input_to_output_registry.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_IR_FUSION_INPUT_TO_OUTPUT_REGISTRY_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_IR_FUSION_INPUT_TO_OUTPUT_REGISTRY_H_ #include -#include #include #include +#include "utils/hash_map.h" #include "ir/anf.h" #include "utils/ms_utils.h" @@ -56,7 +56,7 @@ class InputToOutputRegistry { InputToOutputRegistry(); ~InputToOutputRegistry() = default; DISABLE_COPY_AND_ASSIGN(InputToOutputRegistry) - std::unordered_map op_input_to_output_map_; + mindspore::HashMap op_input_to_output_map_; }; } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.h b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.h index a160d6989e3..5a382a74236 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,8 +20,8 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "ir/anf.h" #include "backend/optimizer/common/pattern_engine.h" #include "backend/optimizer/common/helper.h" diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_update_with_lr_v2.h b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_update_with_lr_v2.h index ece158974e3..fe1e5d9e131 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_update_with_lr_v2.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_update_with_lr_v2.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,8 +20,8 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "ir/anf.h" #include "backend/optimizer/common/pattern_engine.h" #include "backend/optimizer/common/helper.h" diff --git a/mindspore/ccsrc/backend/optimizer/common/const_input_to_attr_registry.cc b/mindspore/ccsrc/backend/optimizer/common/const_input_to_attr_registry.cc index 4046cc0d5c5..df80598bb97 100644 --- a/mindspore/ccsrc/backend/optimizer/common/const_input_to_attr_registry.cc +++ b/mindspore/ccsrc/backend/optimizer/common/const_input_to_attr_registry.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -94,17 +94,17 @@ ConstInputToAttrInfoRegistry &ConstInputToAttrInfoRegistry::Instance() { void ConstInputToAttrInfoRegistry::Register(const ConstInputToAttrInfoRegister ®) { auto op_name = reg.GetOpName(); if (op_input_to_attr_map_.find(op_name) == op_input_to_attr_map_.end()) { - (void)op_input_to_attr_map_.insert(make_pair(op_name, reg)); + (void)op_input_to_attr_map_.emplace(op_name, reg); MS_LOG(DEBUG) << op_name << " const2attr register successfully!"; } } void ConstInputToAttrInfoRegistry::Register(const std::string &op_name, - const std::unordered_set &input_attr_set) { + const mindspore::HashSet &input_attr_set) { if (op_input_to_attr_map_.find(op_name) == op_input_to_attr_map_.end()) { ConstInputToAttrInfoRegister reg(op_name); (void)reg.SetConstInputToAttr(input_attr_set); - (void)op_input_to_attr_map_.insert(make_pair(op_name, reg)); + (void)op_input_to_attr_map_.emplace(op_name, reg); MS_LOG(DEBUG) << op_name << " const2attr register successfully!"; } } diff --git a/mindspore/ccsrc/backend/optimizer/common/const_input_to_attr_registry.h b/mindspore/ccsrc/backend/optimizer/common/const_input_to_attr_registry.h index 5bcc23dfd22..13aa9a68c92 100644 --- a/mindspore/ccsrc/backend/optimizer/common/const_input_to_attr_registry.h +++ b/mindspore/ccsrc/backend/optimizer/common/const_input_to_attr_registry.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_CONST_INPUT_TO_ATTR_REGISTRY_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_CONST_INPUT_TO_ATTR_REGISTRY_H_ #include -#include -#include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "utils/ms_utils.h" namespace mindspore { @@ -33,31 +33,31 @@ class ConstInputToAttrInfoRegister { return *this; } - ConstInputToAttrInfoRegister &SetConstInputToAttr(const std::unordered_set &input_attr_set) { + ConstInputToAttrInfoRegister &SetConstInputToAttr(const mindspore::HashSet &input_attr_set) { (void)input_attr_set_.insert(input_attr_set.begin(), input_attr_set.end()); return *this; } - const std::unordered_set &GetConstInputAttrInfo() const { return input_attr_set_; } + const mindspore::HashSet &GetConstInputAttrInfo() const { return input_attr_set_; } const std::string &GetOpName() const { return op_name_; } private: std::string op_name_; - std::unordered_set input_attr_set_; + mindspore::HashSet input_attr_set_; }; class ConstInputToAttrInfoRegistry { public: static ConstInputToAttrInfoRegistry &Instance(); void Register(const ConstInputToAttrInfoRegister ®); - void Register(const std::string &op_name, const std::unordered_set &input_attr_set); + void Register(const std::string &op_name, const mindspore::HashSet &input_attr_set); bool GetRegisterByOpName(const std::string &op_name, ConstInputToAttrInfoRegister *reg) const; private: ConstInputToAttrInfoRegistry(); ~ConstInputToAttrInfoRegistry() = default; DISABLE_COPY_AND_ASSIGN(ConstInputToAttrInfoRegistry) - std::unordered_map op_input_to_attr_map_; + mindspore::HashMap op_input_to_attr_map_; }; struct ConstInputToAttrInfoReceiver { diff --git a/mindspore/ccsrc/backend/optimizer/common/helper.cc b/mindspore/ccsrc/backend/optimizer/common/helper.cc index 0ecfbfa6f1e..155529a99ec 100644 --- a/mindspore/ccsrc/backend/optimizer/common/helper.cc +++ b/mindspore/ccsrc/backend/optimizer/common/helper.cc @@ -17,11 +17,11 @@ #include "backend/optimizer/common/helper.h" #include #include -#include #include #include #include #include +#include "utils/hash_set.h" #include "utils/utils.h" #include "base/base_ref.h" #include "backend/session/anf_runtime_algorithm.h" @@ -74,7 +74,7 @@ bool IsDepend(const FuncGraph &graph, const AnfNodePtr &node, const std::vector< FuncGraphManagerPtr manager = graph.manager(); MS_EXCEPTION_IF_NULL(manager); - std::unordered_set seen_node; + mindspore::HashSet seen_node; std::deque todo{node}; while (!todo.empty()) { AnfNodePtr nd = todo.front(); @@ -299,7 +299,7 @@ bool IsNopNode(const AnfNodePtr &node) { return false; } - static std::unordered_set nop_nodes = {prim::kPrimReshape->name(), kExpandDimsOpName, + static mindspore::HashSet nop_nodes = {prim::kPrimReshape->name(), kExpandDimsOpName, prim::kPrimSqueeze->name(), prim::kPrimFlatten->name(), kFlattenGradOpName, prim::kPrimReformat->name()}; if (node == nullptr || !node->isa()) { @@ -580,7 +580,7 @@ ValueNodePtr CreateShapeValueNode(const FuncGraphPtr &func_graph, const std::vec return shape_value_node; } -void ConstInputToAttr(const CNodePtr &cnode, const std::unordered_set &input_attrs) { +void ConstInputToAttr(const CNodePtr &cnode, const mindspore::HashSet &input_attrs) { MS_EXCEPTION_IF_NULL(cnode); std::vector new_inputs; auto primitive = AnfAlgo::GetCNodePrimitive(cnode); @@ -1074,7 +1074,7 @@ int64_t GetNodeOutputTotalUsedNum(const session::KernelGraph &kernel_graph, cons return std::accumulate(output_used_num.begin(), output_used_num.end(), int64_t(0)); } -void GetCustomOpAttrIndex(const PrimitivePtr &primitive, std::unordered_set *indexes) { +void GetCustomOpAttrIndex(const PrimitivePtr &primitive, mindspore::HashSet *indexes) { if (primitive == nullptr || primitive->name() != prim::kPrimCustom->name()) { return; } diff --git a/mindspore/ccsrc/backend/optimizer/common/helper.h b/mindspore/ccsrc/backend/optimizer/common/helper.h index 9c30ba6758a..4d6020bf783 100644 --- a/mindspore/ccsrc/backend/optimizer/common/helper.h +++ b/mindspore/ccsrc/backend/optimizer/common/helper.h @@ -21,7 +21,7 @@ #include #include #include -#include +#include "utils/hash_set.h" #include "ir/func_graph.h" #include "backend/session/kernel_graph.h" #include "utils/ms_utils.h" @@ -194,7 +194,7 @@ std::shared_ptr>> GetRealNodeUsedListByOu size_t output_index); bool IsNotRealUsedByOthers(const FuncGraphPtr &graph, const AnfNodePtr &node); -void ConstInputToAttr(const CNodePtr &cnode, const std::unordered_set &input_attrs); +void ConstInputToAttr(const CNodePtr &cnode, const mindspore::HashSet &input_attrs); bool AnfEqual(const BaseRef &a, const BaseRef &b); @@ -236,7 +236,7 @@ std::vector GetNodeOutputUsedNum(const session::KernelGraph &kernel_gra int64_t GetNodeOutputTotalUsedNum(const session::KernelGraph &kernel_graph, const AnfNodePtr &node); // Get custom operator attr input indexes -void GetCustomOpAttrIndex(const PrimitivePtr &primitive, std::unordered_set *indexes); +void GetCustomOpAttrIndex(const PrimitivePtr &primitive, mindspore::HashSet *indexes); } // namespace opt } // namespace mindspore #endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_HELPER_H_ diff --git a/mindspore/ccsrc/backend/optimizer/common/node_pass.cc b/mindspore/ccsrc/backend/optimizer/common/node_pass.cc index 455701023a5..bb69f8db3a6 100644 --- a/mindspore/ccsrc/backend/optimizer/common/node_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/common/node_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,12 +15,12 @@ */ #include "backend/optimizer/common/node_pass.h" -#include -#include #include #include "ir/anf.h" #include "ir/func_graph.h" #include "ir/manager.h" +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "backend/session/anf_runtime_algorithm.h" namespace mindspore { @@ -29,7 +29,7 @@ const size_t kSwitchBranchIndex = 2; const size_t kCallArgsIndex = 1; const size_t kPartialArgsIndex = 1; -void AddOutputAndCallerToMap(const CNodePtr &cnode, std::unordered_map *out_caller_map) { +void AddOutputAndCallerToMap(const CNodePtr &cnode, mindspore::HashMap *out_caller_map) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(out_caller_map); auto inputs = cnode->inputs(); @@ -56,8 +56,8 @@ bool NodePass::Run(const FuncGraphPtr &func_graph) { MS_EXCEPTION_IF_NULL(manager); manager->AddFuncGraph(func_graph); - std::unordered_map subgraph_out_caller_map = {}; - std::unordered_set seen_node; + mindspore::HashMap subgraph_out_caller_map = {}; + mindspore::HashSet seen_node; std::deque> todo{{func_graph->output(), func_graph}}; bool changes = false; while (!todo.empty()) { diff --git a/mindspore/ccsrc/backend/optimizer/common/optimizer.h b/mindspore/ccsrc/backend/optimizer/common/optimizer.h index 6ef9aae4361..468a9b56d20 100644 --- a/mindspore/ccsrc/backend/optimizer/common/optimizer.h +++ b/mindspore/ccsrc/backend/optimizer/common/optimizer.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ir/anf.h" #include "ir/func_graph.h" #include "ir/primitive.h" diff --git a/mindspore/ccsrc/backend/optimizer/common/pattern_engine.h b/mindspore/ccsrc/backend/optimizer/common/pattern_engine.h index d8d772cfff8..3be6edbc351 100644 --- a/mindspore/ccsrc/backend/optimizer/common/pattern_engine.h +++ b/mindspore/ccsrc/backend/optimizer/common/pattern_engine.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,8 +23,6 @@ #include #include #include -#include -#include #include #include #include @@ -33,6 +31,8 @@ #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "backend/optimizer/common/visit.h" #include "base/base.h" #include "utils/log_adapter.h" @@ -158,7 +158,7 @@ std::ostream &operator<<(std::ostream &os, const VarPtr &var); using Equiv = std::map; using EquivPtr = std::shared_ptr; -using PrimitiveVarMap = std::unordered_map; +using PrimitiveVarMap = mindspore::HashMap; using PrimitiveVarMapPtr = std::shared_ptr; inline bool DefaultTypeEq(const BaseRef &x, const BaseRef &y) { return x.type() == y.type(); } diff --git a/mindspore/ccsrc/backend/optimizer/common/visit.h b/mindspore/ccsrc/backend/optimizer/common/visit.h index a0494635687..855c63cc982 100644 --- a/mindspore/ccsrc/backend/optimizer/common/visit.h +++ b/mindspore/ccsrc/backend/optimizer/common/visit.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,13 +19,13 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_VISIT_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_VISIT_H_ -#include #include #include #include #include #include +#include "utils/hash_map.h" #include "base/base.h" #include "base/base_ref.h" diff --git a/mindspore/ccsrc/backend/optimizer/cpu/insert_format_transform_op.cc b/mindspore/ccsrc/backend/optimizer/cpu/insert_format_transform_op.cc index 6a8a5deeb60..2524fb055a0 100644 --- a/mindspore/ccsrc/backend/optimizer/cpu/insert_format_transform_op.cc +++ b/mindspore/ccsrc/backend/optimizer/cpu/insert_format_transform_op.cc @@ -16,12 +16,12 @@ #include "backend/optimizer/cpu/insert_format_transform_op.h" -#include #include #include #include #include #include +#include "utils/hash_set.h" #include "backend/kernel_compiler/kernel_build_info.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/session/kernel_graph.h" @@ -172,7 +172,7 @@ void InsertTransformOpForOutput(const FuncGraphPtr &graph, const AnfNodePtr &nod } } // namespace -const std::unordered_set kChannelLastKernel = {prim::kPrimBiasAdd->name()}; +const mindspore::HashSet kChannelLastKernel = {prim::kPrimBiasAdd->name()}; bool InsertFormatTransformOpCPU::Run(const FuncGraphPtr &graph) { MS_EXCEPTION_IF_NULL(graph); diff --git a/mindspore/ccsrc/backend/optimizer/gpu/adjust_depend_for_parallel_optimizer_recompute_all_gather_fusion.cc b/mindspore/ccsrc/backend/optimizer/gpu/adjust_depend_for_parallel_optimizer_recompute_all_gather_fusion.cc index 6b7f8ba7859..4119e71ed5d 100644 --- a/mindspore/ccsrc/backend/optimizer/gpu/adjust_depend_for_parallel_optimizer_recompute_all_gather_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/gpu/adjust_depend_for_parallel_optimizer_recompute_all_gather_fusion.cc @@ -16,9 +16,9 @@ #include "backend/optimizer/gpu/adjust_depend_for_parallel_optimizer_recompute_all_gather_fusion.h" -#include #include +#include "utils/hash_map.h" #include "backend/session/anf_runtime_algorithm.h" #include "utils/utils.h" @@ -26,7 +26,7 @@ namespace mindspore { namespace opt { bool AdjustDependForParallelOptimizerRecomputeAllGatherFusion::Run(const FuncGraphPtr &graph) { MS_EXCEPTION_IF_NULL(graph); - std::unordered_map forward_allgather_recompute_value_in_fusion_group; + mindspore::HashMap forward_allgather_recompute_value_in_fusion_group; std::vector node_list = TopoSort(graph->get_return()); std::vector parallel_optimizer_recompute_allgather_fusion_ids; std::vector parallel_optimizer_recompute_allgathers; diff --git a/mindspore/ccsrc/backend/optimizer/gpu/insert_cast_gpu.cc b/mindspore/ccsrc/backend/optimizer/gpu/insert_cast_gpu.cc index 5d01bed2bb0..50c94d61222 100644 --- a/mindspore/ccsrc/backend/optimizer/gpu/insert_cast_gpu.cc +++ b/mindspore/ccsrc/backend/optimizer/gpu/insert_cast_gpu.cc @@ -18,8 +18,8 @@ #include #include #include -#include +#include "utils/hash_set.h" #include "backend/optimizer/common/helper.h" #include "backend/session/anf_runtime_algorithm.h" #include "ir/primitive.h" @@ -27,7 +27,7 @@ namespace mindspore { namespace opt { -const std::unordered_set kConv3DKernel = {prim::kPrimConv3DBackpropInput->name(), +const mindspore::HashSet kConv3DKernel = {prim::kPrimConv3DBackpropInput->name(), prim::kPrimConv3DBackpropFilter->name(), prim::kPrimConv3D->name(), prim::kPrimConv3DTranspose->name()}; diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/arithmetic_simplify.cc b/mindspore/ccsrc/backend/optimizer/graph_kernel/arithmetic_simplify.cc index 6ad5d4ea711..107ea452bad 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/arithmetic_simplify.cc +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/arithmetic_simplify.cc @@ -18,12 +18,12 @@ #include #include #include -#include #include #include #include -#include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "backend/optimizer/graph_kernel/graph_kernel_helper.h" #include "backend/optimizer/graph_kernel/core/graph_builder.h" #include "backend/session/anf_runtime_algorithm.h" @@ -32,7 +32,7 @@ namespace mindspore::graphkernel { // operator which follows commutative rules -static std::unordered_set commutative_ops{"Add", "Mul"}; +static mindspore::HashSet commutative_ops{"Add", "Mul"}; class PatternNode; using PatternNodePtr = std::shared_ptr; @@ -51,8 +51,8 @@ class PatternNode { std::vector inputs_; }; -using ParaMap = std::unordered_map; -using ConstMap = std::unordered_map; +using ParaMap = mindspore::HashMap; +using ConstMap = mindspore::HashMap; /* This class works to store a kind of pattern tree; it needs a string expression to construct; Ex."Pow(Exp(A),B)=Exp(Mul(A,B))" @@ -86,10 +86,10 @@ class PatternTree { protected: // set attributes for certain pattern node if needed; - virtual std::unordered_map SetAttributes(const inner::NodePtr &) { + virtual mindspore::HashMap SetAttributes(const inner::NodePtr &) { auto right_pattern = std::make_shared(); DfsTraverse(right_pattern, rhs_root_); - std::unordered_map attrs_map; + mindspore::HashMap attrs_map; for (auto &i : (*right_pattern)) { attrs_map[i] = {}; } @@ -319,7 +319,7 @@ inner::NodePtr PatternTree::AlterGraph(const std::shared_ptr ¶_to_r DfsTraverse(res, rhs_root_); auto all_attrs = SetAttributes(origin_root); inner::LiteGraph::GraphBuilder gb(""); - std::unordered_map pattern_to_ref; + mindspore::HashMap pattern_to_ref; for (auto &n : (*res)) { if (PatternNodeType(n->op()) != inner::NType::Primitive) continue; inner::NodePtrList inputs; @@ -367,7 +367,7 @@ class ExtraReduce1PatternTree : public PatternTree { return (GetValue((origin_root->inputs()[0])->attrs().find("keep_dims")->second) == GetValue(origin_root->attrs().find("keep_dims")->second)); } - std::unordered_map SetAttributes(const inner::NodePtr &origin_root) override { + mindspore::HashMap SetAttributes(const inner::NodePtr &origin_root) override { auto attrs_map = PatternTree::SetAttributes(origin_root); std::vector axis; std::set axis_set; @@ -384,7 +384,7 @@ class ExtraReduce1PatternTree : public PatternTree { auto first_axis = GetValue>(first_reduce->attrs().find("axis")->second); auto second_axis = GetValue>(origin_root->attrs().find("axis")->second); std::set st(first_axis.begin(), first_axis.end()); - std::unordered_map mp; + mindspore::HashMap mp; int64_t shift = 0; for (int64_t n = 0; n < SizeToLong(first_reduce->inputs()[0]->shape.size()); n++) { if (st.find(n) != st.end()) { @@ -409,7 +409,7 @@ class ExtraReduce2PatternTree : public PatternTree { ~ExtraReduce2PatternTree() = default; protected: - std::unordered_map SetAttributes(const inner::NodePtr &origin_root) override { + mindspore::HashMap SetAttributes(const inner::NodePtr &origin_root) override { auto attrs_map = PatternTree::SetAttributes(origin_root); bool keep_dims = GetValue(origin_root->attrs().find("keep_dims")->second); auto axis = GetValue>(origin_root->attrs().find("axis")->second); @@ -427,7 +427,7 @@ class ExtraReduce2PatternTree : public PatternTree { this case. */ bool OutsideRely(const inner::NodePtrList &nodes, const inner::NodePtr &root) { - std::unordered_set nodes_can_simplify; + mindspore::HashSet nodes_can_simplify; std::for_each(nodes.begin(), nodes.end(), [&nodes_can_simplify](auto n) { nodes_can_simplify.insert(n.get()); }); for (auto &n : nodes) { if (n == root) { @@ -526,12 +526,12 @@ static std::vector expressions = { {62, "CImag(Complex(A,B))=B", EXPR_PATTERN(PatternTree)}, }; -std::unordered_map> GetExpressions() { +mindspore::HashMap> GetExpressions() { const auto &flags = GraphKernelFlags::GetInstance(); - std::unordered_map> expression_map; - std::unordered_set enable_ids{flags.enable_simplify_exprs_only.begin(), + mindspore::HashMap> expression_map; + mindspore::HashSet enable_ids{flags.enable_simplify_exprs_only.begin(), flags.enable_simplify_exprs_only.end()}; - std::unordered_set disable_ids{flags.disable_simplify_exprs.begin(), flags.disable_simplify_exprs.end()}; + mindspore::HashSet disable_ids{flags.disable_simplify_exprs.begin(), flags.disable_simplify_exprs.end()}; for (auto &e : expressions) { if (!enable_ids.empty()) { if (enable_ids.count(std::to_string(e.id)) == 0) continue; diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/arithmetic_simplify.h b/mindspore/ccsrc/backend/optimizer/graph_kernel/arithmetic_simplify.h index 76614532c6d..32da7fe9b99 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/arithmetic_simplify.h +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/arithmetic_simplify.h @@ -18,9 +18,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "backend/optimizer/common/optimizer.h" #include "ir/func_graph.h" #include "backend/optimizer/graph_kernel/model/lite_graph.h" @@ -37,7 +37,7 @@ class ArithmeticSimplify : public opt::Pass { private: bool DoArithmeticTrans(const inner::LiteGraphPtr &litegraph); bool DoConstantFold(const inner::LiteGraphPtr &litegraph); - std::unordered_map> expressions_map_; + mindspore::HashMap> expressions_map_; }; using ArithmeticSimplifyPtr = std::shared_ptr; } // namespace mindspore::graphkernel diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/expanders/expander_factory.h b/mindspore/ccsrc/backend/optimizer/graph_kernel/expanders/expander_factory.h index b127eb38ec5..cef5a6e9170 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/expanders/expander_factory.h +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/expanders/expander_factory.h @@ -16,11 +16,11 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_EXPANDERS_EXPANDER_FACTORY_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_EXPANDERS_EXPANDER_FACTORY_H_ -#include #include #include #include +#include "utils/hash_map.h" #include "backend/optimizer/graph_kernel/expanders/utils.h" namespace mindspore::graphkernel::expanders { @@ -44,7 +44,7 @@ class OpExpanderFactory { void Register(const std::string &op, const RegFunc &func) { creators[op] = func; } private: - std::unordered_map creators; + mindspore::HashMap creators; }; class OpExpanderRegister { diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cluster.cc b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cluster.cc index fe12779a1ed..7d62de1ca5f 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cluster.cc +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cluster.cc @@ -21,6 +21,7 @@ #include #include +#include "utils/hash_map.h" #include "base/core_ops.h" #include "ir/graph_utils.h" #include "utils/anf_utils.h" @@ -137,7 +138,7 @@ class Graph { size_t seed_{0}; // visited flag of dfs. size_t max_node_id_; // largest node id of a cluster - Cluster(size_t node_id, const AnfNodePtr &node, const std::unordered_map &node_idx_map) + Cluster(size_t node_id, const AnfNodePtr &node, const mindspore::HashMap &node_idx_map) : cluster_id_(node_id), max_node_id_(node_id) { auto cnode = node->cast(); MS_EXCEPTION_IF_NULL(cnode); @@ -170,7 +171,7 @@ class Graph { public: // Init and build graph - Graph(const AnfNodePtrList &nodes, const std::unordered_map &node_idx_map) { + Graph(const AnfNodePtrList &nodes, const mindspore::HashMap &node_idx_map) { clusters_.reserve(nodes.size()); for (size_t i = 0; i < nodes.size(); i++) { (void)clusters_.emplace_back(i, nodes[i], node_idx_map); diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cluster.h b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cluster.h index ac65dcdc15d..6599ab200aa 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cluster.h +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cluster.h @@ -17,9 +17,10 @@ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_GRAPH_KERNEL_CLUSTER_H_ #include -#include #include +#include +#include "utils/hash_map.h" #include "ir/anf.h" #include "backend/optimizer/common/optimizer.h" @@ -51,7 +52,7 @@ class GraphKernelCluster : public opt::Pass { GraphPtr graph_{nullptr}; std::vector nodes_; - std::unordered_map node_idx_map_; + mindspore::HashMap node_idx_map_; std::stringstream dump_buf_; std::vector op_list_; }; diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cse.cc b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cse.cc index 84914a8e1a8..89589e10a37 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cse.cc +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_cse.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -51,17 +51,16 @@ bool IsCNodePrimitveEqual(const CNodePtr &main, const CNodePtr &node, const std: return false; } - auto all = std::all_of(main_attrs.begin(), main_attrs.end(), - [&node_attrs](const std::pair &item) -> bool { - if (item.second == nullptr) { - return false; - } - auto iter = node_attrs.find(item.first); - if (iter == node_attrs.end()) { - return false; - } - return *item.second == *iter->second; - }); + auto all = std::all_of(main_attrs.begin(), main_attrs.end(), [&node_attrs](const auto &item) -> bool { + if (item.second == nullptr) { + return false; + } + auto iter = node_attrs.find(item.first); + if (iter == node_attrs.end()) { + return false; + } + return *item.second == *iter->second; + }); return all; } diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.cc b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.cc index dc688c48515..ab2707fa56d 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.cc +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.cc @@ -19,9 +19,9 @@ #include #include #include -#include #include +#include "utils/hash_set.h" #include "backend/kernel_compiler/common_utils.h" #include "backend/kernel_compiler/akg/akg_kernel_json_generator.h" #include "backend/kernel_compiler/akg/akg_kernel_json_decoder.h" diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.h b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.h index 3a64a19b351..0ae433197b0 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.h +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.h @@ -21,9 +21,9 @@ #include #include #include -#include #include #include +#include "utils/hash_set.h" #include "ir/anf.h" #include "ir/func_graph.h" #include "ir/primitive.h" diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_splitter.cc b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_splitter.cc index 38d2b86cc11..d13f13f9956 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_splitter.cc +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_splitter.cc @@ -17,11 +17,11 @@ #include #include #include -#include #include #include #include -#include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "frontend/optimizer/irpass.h" #include "pipeline/jit/parse/python_adapter.h" #include "backend/session/anf_runtime_algorithm.h" @@ -178,7 +178,7 @@ bool SplitNodesDecoder::DecodeSplitNodes(const nlohmann::json &kernel_json, namespace { void TraverseFuncGraphFromCNode(const CNodePtr &cnode, const std::function &callback) { - std::unordered_set visited; + mindspore::HashSet visited; std::queue que; que.push(cnode); visited.insert(cnode); @@ -219,8 +219,8 @@ class Area { ~Area() = default; // Set the external inputs of spy as a Parameter. - void CreateParameters(const FuncGraphPtr &func_graph, std::unordered_map *param_node_map) { - std::unordered_map node_param_map; + void CreateParameters(const FuncGraphPtr &func_graph, mindspore::HashMap *param_node_map) { + mindspore::HashMap node_param_map; for (auto node : this->spy_cnodes_) { auto cnode = node->cast(); MS_EXCEPTION_IF_NULL(cnode); @@ -232,7 +232,7 @@ class Area { auto new_param = std::make_shared(func_graph); new_param->set_abstract(in_node->abstract()); func_graph->add_parameter(new_param); - node_param_map.insert(std::make_pair(in_node, new_param)); + (void)node_param_map.emplace(in_node, new_param); cnode->set_input(i, new_param); } else { cnode->set_input(i, it->second); @@ -241,13 +241,13 @@ class Area { } this->spy_cnodes_.clear(); // spy list is not useful anymore for (auto &&elem : node_param_map) { - param_node_map->insert(std::make_pair(elem.second, elem.first)); + (void)param_node_map->emplace(elem.second, elem.first); } return; } // Make a return node for traitor nodes. - void CreateReturnNode(const FuncGraphPtr &func_graph, std::unordered_map *tuple_node_index) { + void CreateReturnNode(const FuncGraphPtr &func_graph, mindspore::HashMap *tuple_node_index) { // If there's no traitor in the area, it means that this area is the last part // of the original FuncGraph, it already contains the original Return node. if (traitor_nodes_.empty()) { @@ -268,7 +268,7 @@ class Area { AbstractBasePtrList abstracts; size_t i = 0; for (auto &traitor : traitor_nodes_) { - tuple_node_index->insert(std::make_pair(traitor, i++)); + (void)tuple_node_index->emplace(traitor, i++); maketuple_inputs.emplace_back(traitor); abstracts.emplace_back(traitor->abstract()); } @@ -293,7 +293,7 @@ class Area { } } - const std::unordered_set &nodes() const { return nodes_; } + const mindspore::HashSet &nodes() const { return nodes_; } const std::vector &spy_cnodes() const { return spy_cnodes_; } private: @@ -301,7 +301,7 @@ class Area { bool IsExternalCNode(const AnfNodePtr &node) const { return node->isa() && this->nodes_.count(node) == 0; } // nodes in this area - std::unordered_set nodes_; + mindspore::HashSet nodes_; // if a node's output is used by other Area, it's a traitor std::vector traitor_nodes_; // if a node use other Area's output, it's a spy @@ -339,7 +339,7 @@ class AreaGraph { for (auto index : topo_order_) { auto ¤t_area = areas_[index]; auto sub_func_graph = std::make_shared(); - std::unordered_map param_node_map; + mindspore::HashMap param_node_map; current_area.CreateParameters(sub_func_graph, ¶m_node_map); current_area.CreateReturnNode(sub_func_graph, &node_index_in_returned_tuple_); @@ -407,7 +407,7 @@ class AreaGraph { // Make a CNode in main graph to hold the sub_func_graph. CNodePtr CreateMainCNode(const FuncGraphPtr &main_func_graph, const FuncGraphPtr &sub_func_graph, const std::vector &main_cnodes, - const std::unordered_map ¶m_node_map) { + const mindspore::HashMap ¶m_node_map) { TraceGuard guard(std::make_shared(sub_func_graph->debug_info())); AnfNodePtrList main_cnode_inputs = {NewValueNode(sub_func_graph)}; for (const auto ¶m : sub_func_graph->parameters()) { @@ -452,9 +452,9 @@ class AreaGraph { // Topological order of areas std::vector topo_order_; // Map AnfNode to Area id - std::unordered_map node_area_map_; + mindspore::HashMap node_area_map_; // Map the nodes to their index if there are multiple value in an area - std::unordered_map node_index_in_returned_tuple_; + mindspore::HashMap node_index_in_returned_tuple_; }; class SplitSchemer { @@ -545,7 +545,8 @@ class Splitter { if (param == nullptr) return; auto it = this->param_to_main_graph_node_map_.find(param); if (it != this->param_to_main_graph_node_map_.end()) { - cnode->add_input(it->second); + auto input = it->second; + cnode->add_input(input); sub_func_graph->add_parameter(param); // Avoid repeating parameters. this->param_to_main_graph_node_map_.erase(it); @@ -561,7 +562,7 @@ class Splitter { auto output = func_graph->output()->cast(); MS_EXCEPTION_IF_NULL(output); const auto ¶meters = func_graph->parameters(); - std::unordered_map param_input; + mindspore::HashMap param_input; for (size_t i = 0; i < parameters.size(); ++i) { param_input[parameters[i]] = inputs[i + 1]; } @@ -590,7 +591,7 @@ class Splitter { // For multiple output kernel, to avoid returning Parameter, the last MakeTuple was distribute to // a new FuncGraph, just inline the last MakeTuple node. std::vector tmp_subgraph_cnodes; - std::unordered_map replace_map; + mindspore::HashMap replace_map; for (size_t i = 0; i < new_subgraph_cnodes_.size(); ++i) { if (split_schemer_->NeedInline(cnodes_group_id[i])) { @@ -646,7 +647,7 @@ class Splitter { // Copy all Parameter and ValueNode that the area used. void AreaExpand(const Area &area) { - std::unordered_map old_valuenode_and_param_map; + mindspore::HashMap old_valuenode_and_param_map; for (auto sub_node : area.nodes()) { auto sub_cnode = sub_node->cast(); if (sub_cnode == nullptr) continue; @@ -682,7 +683,8 @@ class Splitter { ParameterPtr param_c = std::make_shared(func); param_c->set_name(param->name()); param_c->set_abstract(param->abstract()); - param_to_main_graph_node_map_[param_c] = param_to_main_graph_node_map_[param]; + auto node = param_to_main_graph_node_map_[param]; + param_to_main_graph_node_map_[param_c] = node; return param_c; } @@ -691,7 +693,7 @@ class Splitter { std::vector new_subgraph_cnodes_; // The cnode list that hold the new sub_func_graph std::vector inlined_nodes_; SplitSchemerPtr split_schemer_; - std::unordered_map param_to_main_graph_node_map_; + mindspore::HashMap param_to_main_graph_node_map_; }; class CostModelSplitSchemer : public SplitSchemer { @@ -837,7 +839,8 @@ class CostModelSplitSchemer : public SplitSchemer { MS_EXCEPTION_IF_NULL(output); if (IsValidKernelNode(output)) { - auto group_id = node_group_[ret_node] = node_group_[output]; + auto group_id = node_group_[output]; + node_group_[ret_node] = group_id; split_plan_[group_id].emplace_back(ret_node); return; } @@ -846,7 +849,8 @@ class CostModelSplitSchemer : public SplitSchemer { auto group_id = split_plan_.size(); split_plan_.emplace_back(AnfNodePtrList{output, ret_node}); need_inline_.emplace_back(1); - node_group_[ret_node] = node_group_[output] = group_id; + node_group_[output] = group_id; + node_group_[ret_node] = group_id; return; } } @@ -861,8 +865,9 @@ class CostModelSplitSchemer : public SplitSchemer { for (const auto &input : cnode->inputs()) { auto iter = node_group_.find(input); if (iter != node_group_.end()) { - node_group_[node] = iter->second; - split_plan_[iter->second].emplace_back(node); + auto group_id = iter->second; + node_group_[node] = group_id; + split_plan_[group_id].emplace_back(node); found = true; break; } @@ -885,7 +890,7 @@ class CostModelSplitSchemer : public SplitSchemer { std::shared_ptr func_graph_; AnfNodePtrList topo_all_nodes_; AnfNodePtrList topo_valid_nodes_; - std::unordered_map node_group_; + mindspore::HashMap node_group_; std::vector need_inline_; }; diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/lite_graph.cc b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/lite_graph.cc index d8dc9859f85..84e22cfba7b 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/lite_graph.cc +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/lite_graph.cc @@ -19,12 +19,12 @@ #include #include #include -#include #include #include #include #include +#include "utils/hash_map.h" #include "backend/optimizer/graph_kernel/model/node.h" #include "backend/optimizer/graph_kernel/model/op_node.h" #include "backend/optimizer/graph_kernel/model/op_register.h" @@ -52,7 +52,7 @@ std::string LiteGraph::Dump() const { } const NodePtrList &LiteGraph::GetOrderedNodes() { - std::unordered_map outdegrees; + mindspore::HashMap outdegrees; std::function dfs; std::set visited; dfs = [&dfs, &outdegrees, &visited](const NodePtr &node) { diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/lite_graph.h b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/lite_graph.h index 57c10b6e58c..5cd7e787f15 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/lite_graph.h +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/lite_graph.h @@ -19,10 +19,10 @@ #include #include #include -#include -#include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "backend/optimizer/graph_kernel/model/node.h" #include "backend/optimizer/graph_kernel/model/op_node.h" diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/node.cc b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/node.cc index b8d54e4bd14..a5ca6881109 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/node.cc +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/node.cc @@ -20,14 +20,14 @@ #include #include #include -#include #include #include -#include "mindspore/core/ir/dtype/type_id.h" -#include "mindspore/core/ir/value.h" -#include "mindspore/core/ir/tensor.h" -#include "mindspore/core/utils/shape_utils.h" +#include "utils/hash_map.h" +#include "ir/dtype/type_id.h" +#include "ir/value.h" +#include "ir/tensor.h" +#include "utils/shape_utils.h" #include "utils/utils.h" namespace mindspore::graphkernel::inner { diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/node.h b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/node.h index ce69adad5e3..ad9efacf6d4 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/node.h +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/node.h @@ -21,13 +21,13 @@ #include #include #include -#include #include #include #include #include #include +#include "utils/hash_map.h" #include "mindspore/core/ir/dtype/type_id.h" #include "mindspore/core/ir/value.h" #include "mindspore/core/ir/tensor.h" @@ -45,7 +45,7 @@ enum class NType { using DFormat = std::string; using DShape = ShapeVector; -using DAttrs = std::unordered_map; +using DAttrs = mindspore::HashMap; struct NodeBase { DShape shape; @@ -95,13 +95,13 @@ class Node : public NodeBase, public std::enable_shared_from_this { const DAttrs &attrs() const { return attrs_; } const NodePtr &input(size_t i) const { return inputs_[i]; } const NodePtrList &inputs() const { return inputs_; } - const std::unordered_map> &users() const { return users_; } + const mindspore::HashMap> &users() const { return users_; } protected: std::string name_; DAttrs attrs_; NodePtrList inputs_; - std::unordered_map> users_; + mindspore::HashMap> users_; private: // the nodes' users are only maintained by AddInput/SetInput. diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_node.cc b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_node.cc index c6762bd707f..66814c9b239 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_node.cc +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_node.cc @@ -22,10 +22,10 @@ #include #include #include -#include -#include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "backend/optimizer/graph_kernel/model/node.h" namespace mindspore::graphkernel::inner { @@ -122,7 +122,7 @@ tensor::TensorPtr CalcByOperator(const NodePtrList &inputs, const std::string &o return *static_cast(std::static_pointer_cast(i)->data()->data_c()); }); - std::unordered_map &)>> func_map = { + mindspore::HashMap &)>> func_map = { {"Add", [](const std::vector &n) { return n[0] + n[1]; }}, {"Sub", [](const std::vector &n) { return n[0] - n[1]; }}, {"Mul", [](const std::vector &n) { return n[0] * n[1]; }}, diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_node.h b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_node.h index 146f4935330..5cf0fd2bf10 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_node.h +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_node.h @@ -20,9 +20,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "backend/optimizer/graph_kernel/model/node.h" #include "ir/dtype/type.h" diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_register.h b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_register.h index 9d0b901ae8d..7d3a14ba56f 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_register.h +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/model/op_register.h @@ -16,11 +16,11 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_MODEL_OP_REGISTER_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_MODEL_OP_REGISTER_H_ -#include #include #include #include +#include "utils/hash_map.h" #include "backend/optimizer/graph_kernel/model/node.h" namespace mindspore::graphkernel::inner { @@ -80,7 +80,7 @@ class OpRegistry { Register("StandardNormal", OP_CREATOR(StandardNormalOp)); } ~OpRegistry() = default; - std::unordered_map> creators; + mindspore::HashMap> creators; }; } // namespace mindspore::graphkernel::inner #endif diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/reorder_ops.cc b/mindspore/ccsrc/backend/optimizer/graph_kernel/reorder_ops.cc index a7bd39b503f..c2e21af870d 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/reorder_ops.cc +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/reorder_ops.cc @@ -18,7 +18,7 @@ #include #include #include -#include +#include "utils/hash_set.h" #include "base/core_ops.h" #include "utils/utils.h" #include "utils/log_adapter.h" @@ -30,7 +30,7 @@ namespace mindspore::graphkernel { namespace { bool IsTypeInsensitive(const CNodePtr &node) { // Nodes that will change the input data type will not seen as type insensitive nodes. - static std::unordered_set type_insensitive_op_list{ + static mindspore::HashSet type_insensitive_op_list{ prim::kPrimTransData, prim::kPrimTranspose, prim::kPrimExpandDims, prim::kPrimReshape, prim::kPrimSqueeze, prim::kPrimTile, prim::kPrimNeg, prim::kPrimRelu, prim::kPrimMaximum, prim::kPrimMinimum, prim::kPrimSelect}; @@ -150,7 +150,7 @@ void ReorderOps::SetTypeInsensitiveNodeInputs(const CNodePtr &node, const std::v new_inputs->resize(0); } new_inputs->push_back(node->input(0)); - std::unordered_set indexes_set(indexes.begin(), indexes.end()); + mindspore::HashSet indexes_set(indexes.begin(), indexes.end()); size_t idx = 0; for (size_t i = 1; i < node_inputs_num; ++i) { size_t data_idx = i - 1; @@ -180,7 +180,7 @@ void ReorderOps::SetTypeInsensitiveNodeInputsInfo(const CNodePtr &node, const st // node's inputs info at indexes change to input_at_indexes's input or output info new_inputs_info->inputs_format.resize(0); new_inputs_info->inputs_type.resize(0); - std::unordered_set indexes_set(indexes.begin(), indexes.end()); + mindspore::HashSet indexes_set(indexes.begin(), indexes.end()); size_t idx = 0; for (size_t data_idx = 0; data_idx < node_inputs_num - 1; ++data_idx) { if (indexes_set.find(data_idx) == indexes_set.end()) { diff --git a/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse.cc b/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse.cc index 95d748bfbfe..7b7cfb7b7fa 100644 --- a/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse.cc +++ b/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -508,7 +508,7 @@ session::KernelWithIndex MemReuseUtil::VisitKernelWithReturnType(const AnfNodePt auto &cache = skip_nop_node ? visit_kernel_with_return_type_in0pos_cache_ : visit_kernel_with_return_type_in0pos_skip_nop_cache_; - std::unordered_map::iterator tag_iter; + mindspore::HashMap::iterator tag_iter; if (auto iter = cache.find(node); iter == cache.end()) { auto tmp_item = std::pair{node, AnfAlgo::VisitKernelWithReturnType(node, i, skip_nop_node)}; diff --git a/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse.h b/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse.h index feb1ad91b6a..31773cad7f1 100644 --- a/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse.h +++ b/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,8 +18,8 @@ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_MEM_REUSE_MEM_REUSE_H_ #include #include -#include #include +#include "utils/hash_map.h" #include "backend/optimizer/mem_reuse/kernel_refcount.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/session/kernel_graph.h" @@ -109,8 +109,8 @@ class MemReuseUtil { bool enable_visit_kernel_cache_{false}; - std::unordered_map visit_kernel_with_return_type_in0pos_cache_; - std::unordered_map visit_kernel_with_return_type_in0pos_skip_nop_cache_; + mindspore::HashMap visit_kernel_with_return_type_in0pos_cache_; + mindspore::HashMap visit_kernel_with_return_type_in0pos_skip_nop_cache_; }; using MemReuseUtilPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse_checker.cc b/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse_checker.cc index 96754dd2f4e..3bc5525de67 100644 --- a/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse_checker.cc +++ b/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_reuse_checker.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -370,7 +370,7 @@ void MemReuseChecker::CheckNormalIR(const session::KernelGraph *graph) { nor_tensor_sizes_.push_back(it->GetSize()); curr_ous.push_back(it->GetPtr()); } - (void)node_ous_.insert(std::make_pair(node.get(), curr_ous)); + (void)node_ous_.emplace(node.get(), curr_ous); std::vector curr_ins; size_t input_num = AnfAlgo::GetInputTensorNum(node); for (size_t i = 0; i < input_num; ++i) { @@ -390,12 +390,12 @@ void MemReuseChecker::CheckNormalIR(const session::KernelGraph *graph) { nor_input_tensors_.push_back(device_address->GetPtr()); curr_ins.push_back(device_address->GetPtr()); } - (void)node_ins_.insert(std::make_pair(node.get(), curr_ins)); + (void)node_ins_.emplace(node.get(), curr_ins); } size_t ou_idx = 0; for (const auto &ou : nor_output_tensors_) { - (void)ptr_idx_.insert(std::make_pair(ou, ou_idx)); - (void)ptr_refs_.insert(std::make_pair(ou, 0)); + (void)ptr_idx_.emplace(ou, ou_idx); + (void)ptr_refs_.emplace(ou, 0); ou_idx++; } for (const auto &in : nor_input_tensors_) { diff --git a/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_swap_manager.h b/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_swap_manager.h index e645edfb7e4..9e2f9a78de1 100644 --- a/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_swap_manager.h +++ b/mindspore/ccsrc/backend/optimizer/mem_reuse/mem_swap_manager.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,12 +17,12 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_MEM_REUSE_MEM_SWAP_MANAGER_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_MEM_REUSE_MEM_SWAP_MANAGER_H_ -#include -#include #include #include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "backend/optimizer/mem_reuse/mem_copy_manager.h" using PerformPair = std::pair; @@ -141,10 +141,10 @@ class MemSwapManager { std::vector execution_order_; std::vector ordered_tensors_; - std::unordered_map kernel_execution_info_; - std::unordered_map> kernel_swap_perform_; + mindspore::HashMap kernel_execution_info_; + mindspore::HashMap> kernel_swap_perform_; // Key: trigger swap kernel, value: MemSwapInfoSet of kernel need to be swapped - std::unordered_map mem_swap_info_map_; + mindspore::HashMap mem_swap_info_map_; std::vector host_addrs_list_; // Key: cache kernel address, value: lists of first time move pos or not diff --git a/mindspore/ccsrc/backend/optimizer/pass/add_training_attr.cc b/mindspore/ccsrc/backend/optimizer/pass/add_training_attr.cc index 7ba425c7990..4a1a739f7b5 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/add_training_attr.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/add_training_attr.cc @@ -19,9 +19,9 @@ #include #include #include -#include -#include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/graph_utils.h" #include "backend/optimizer/common/helper.h" #include "backend/session/anf_runtime_algorithm.h" @@ -31,10 +31,10 @@ namespace mindspore { namespace opt { namespace { -std::unordered_map> MarkOp{ +mindspore::HashMap> MarkOp{ {"LSTM", {"LSTMGradWeight", "LSTMGrad", "LSTMGradData"}}}; -bool CheckOP(const FuncGraphManagerPtr &manager, const AnfNodePtr &cnode, const std::unordered_set &set) { +bool CheckOP(const FuncGraphManagerPtr &manager, const AnfNodePtr &cnode, const mindspore::HashSet &set) { for (const auto &node_index : manager->node_users()[cnode]) { auto output = node_index.first; MS_EXCEPTION_IF_NULL(output); diff --git a/mindspore/ccsrc/backend/optimizer/pass/adjust_depend_for_parallel_optimizer_recompute_all_gather.cc b/mindspore/ccsrc/backend/optimizer/pass/adjust_depend_for_parallel_optimizer_recompute_all_gather.cc index a8e9e9a0c57..f814bb2cc95 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/adjust_depend_for_parallel_optimizer_recompute_all_gather.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/adjust_depend_for_parallel_optimizer_recompute_all_gather.cc @@ -22,7 +22,7 @@ namespace mindspore { namespace opt { bool AdjustDependForParallelOptimizerRecomputeAllGather::Run(const FuncGraphPtr &graph) { MS_EXCEPTION_IF_NULL(graph); - std::unordered_map forward_allgather_recompute_value_in_fusion_group; + mindspore::HashMap forward_allgather_recompute_value_in_fusion_group; std::vector node_list = TopoSort(graph->get_return()); std::vector parallel_optimizer_recompute_allgather_fusion_ids; std::vector parallel_optimizer_recompute_allgathers; diff --git a/mindspore/ccsrc/backend/optimizer/pass/communication_op_fusion.cc b/mindspore/ccsrc/backend/optimizer/pass/communication_op_fusion.cc index 84274d8e96d..961bea027a4 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/communication_op_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/communication_op_fusion.cc @@ -18,8 +18,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ir/graph_utils.h" #include "base/core_ops.h" #include "runtime/device/kernel_info.h" @@ -454,7 +454,7 @@ bool CommunicationOpFusion::Run(const FuncGraphPtr &func_graph) { const float input_grad_size_num = 0.0; const float input_grad_time_num = 0.0; // divide candidate fusion groups with same (group,op,fusion,dtype) attrs, fusion==0 means not fusion - std::unordered_map candidate_groups; + mindspore::HashMap candidate_groups; std::vector node_list = TopoSort(func_graph->get_return()); for (auto &node : node_list) { if (node != nullptr && node->isa() && AnfAlgo::GetCNodeName(node) == op_name_) { diff --git a/mindspore/ccsrc/backend/optimizer/pass/convert_const_input_to_attr.h b/mindspore/ccsrc/backend/optimizer/pass/convert_const_input_to_attr.h index 529bfade57a..303ffdcc9e1 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/convert_const_input_to_attr.h +++ b/mindspore/ccsrc/backend/optimizer/pass/convert_const_input_to_attr.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_PASS_CONVERT_CONST_INPUT_TO_ATTR_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_PASS_CONVERT_CONST_INPUT_TO_ATTR_H_ #include -#include -#include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/anf.h" #include "backend/optimizer/common/optimizer.h" @@ -32,7 +32,7 @@ class ConvertConstInputToAttr : public PatternProcessPass { const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; private: - std::unordered_map> op_input_attr_map_; + mindspore::HashMap> op_input_attr_map_; }; } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/backend/optimizer/pass/convert_tuple_output_to_maketuple.cc b/mindspore/ccsrc/backend/optimizer/pass/convert_tuple_output_to_maketuple.cc index 7acb18bb28e..5b5275393d9 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/convert_tuple_output_to_maketuple.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/convert_tuple_output_to_maketuple.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,8 +17,8 @@ #include #include -#include +#include "utils/hash_map.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/optimizer/common/helper.h" #include "backend/session/kernel_graph.h" diff --git a/mindspore/ccsrc/backend/optimizer/pass/custom_op_const_input_to_attr.cc b/mindspore/ccsrc/backend/optimizer/pass/custom_op_const_input_to_attr.cc index 97f5d15d08e..f8ea07e8a90 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/custom_op_const_input_to_attr.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/custom_op_const_input_to_attr.cc @@ -16,8 +16,8 @@ #include "backend/optimizer/pass/custom_op_const_input_to_attr.h" #include -#include +#include "utils/hash_set.h" #include "backend/optimizer/common/helper.h" #include "backend/session/anf_runtime_algorithm.h" @@ -38,7 +38,7 @@ const AnfNodePtr CustomOpConstInputToAttr::Process(const FuncGraphPtr &, const A } auto primitive = AnfAlgo::GetCNodePrimitive(cnode); MS_EXCEPTION_IF_NULL(primitive); - std::unordered_set attr_indices; + mindspore::HashSet attr_indices; GetCustomOpAttrIndex(primitive, &attr_indices); if (attr_indices.empty()) { return nullptr; diff --git a/mindspore/ccsrc/backend/optimizer/pass/eliminate_redundant_op.cc b/mindspore/ccsrc/backend/optimizer/pass/eliminate_redundant_op.cc index 378e929715f..6ece5200f68 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/eliminate_redundant_op.cc +++ b/mindspore/ccsrc/backend/optimizer/pass/eliminate_redundant_op.cc @@ -17,7 +17,7 @@ #include "backend/optimizer/pass/eliminate_redundant_op.h" #include #include -#include +#include "utils/hash_map.h" #include "backend/session/anf_runtime_algorithm.h" #include "utils/utils.h" #include "backend/optimizer/common/helper.h" diff --git a/mindspore/ccsrc/backend/optimizer/pass/eliminate_redundant_op.h b/mindspore/ccsrc/backend/optimizer/pass/eliminate_redundant_op.h index b2b3d4d2b8b..6f1968e8554 100644 --- a/mindspore/ccsrc/backend/optimizer/pass/eliminate_redundant_op.h +++ b/mindspore/ccsrc/backend/optimizer/pass/eliminate_redundant_op.h @@ -20,7 +20,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ir/anf.h" #include "backend/optimizer/common/pattern_engine.h" #include "backend/optimizer/common/optimizer.h" @@ -44,7 +44,7 @@ class EliminateRedundantOp : public PatternProcessPass { const AnfNodePtr DoEliminate(const FuncGraphPtr &func_graph, const CNodePtr &cnode) const; const AnfNodePtr ProcessMatchedNodes(const FuncGraphPtr &func_graph, const CNodePtr &cnode, const CNodePtr &prev_cnode, std::vector *pass_vector) const; - std::unordered_map redundant_process_map_; + mindspore::HashMap redundant_process_map_; }; } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/backend/optimizer/somas/somas.cc b/mindspore/ccsrc/backend/optimizer/somas/somas.cc index 2fe0c791e78..868c1e3f770 100644 --- a/mindspore/ccsrc/backend/optimizer/somas/somas.cc +++ b/mindspore/ccsrc/backend/optimizer/somas/somas.cc @@ -1212,7 +1212,7 @@ bool Somas::Assign(const session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(tensor); if (tensor->GetSolverTensorDesc() != nullptr) { SomasSolverTensorDescPtr pSolverTensor = tensor->GetSolverTensorDesc(); - solver_tensor_desc_map_.insert(std::pair(pSolverTensor->index_, pSolverTensor)); + (void)solver_tensor_desc_map_.emplace(pSolverTensor->index_, pSolverTensor); } } MS_LOG(INFO) << "End Loop to create solver info"; @@ -1566,7 +1566,7 @@ std::string Somas::Offline() const { } else { std::map dest_infos; for (SomasNodePtr dest_node : tensor->destinations_) { - dest_infos.insert(std::make_pair(dest_node->GetId(), dest_node->GetStream()->GetId())); + (void)dest_infos.emplace(dest_node->GetId(), dest_node->GetStream()->GetId()); } for (auto dest_info : dest_infos) { diff --git a/mindspore/ccsrc/backend/optimizer/somas/somas.h b/mindspore/ccsrc/backend/optimizer/somas/somas.h index 663cb051365..c19eb64c6fd 100644 --- a/mindspore/ccsrc/backend/optimizer/somas/somas.h +++ b/mindspore/ccsrc/backend/optimizer/somas/somas.h @@ -20,11 +20,11 @@ #include #include #include -#include -#include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "backend/kernel_compiler/tbe/tbe_utils.h" #include "backend/optimizer/somas/somas_node.h" #include "backend/optimizer/somas/somas_solver_pre.h" @@ -64,7 +64,7 @@ class Somas { // hash id std::string hash_id_; // Maps - std::unordered_map tensors_map_; + mindspore::HashMap tensors_map_; std::map> nodes_map_; std::map> parameters_map_; diff --git a/mindspore/ccsrc/backend/optimizer/somas/somas_node.h b/mindspore/ccsrc/backend/optimizer/somas/somas_node.h index fc127a1162f..56003ee62ed 100644 --- a/mindspore/ccsrc/backend/optimizer/somas/somas_node.h +++ b/mindspore/ccsrc/backend/optimizer/somas/somas_node.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,17 +17,17 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_SOMAS_SOMAS_NODE_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_SOMAS_SOMAS_NODE_H_ -#include "backend/optimizer/somas/somas_stream.h" -#include "backend/optimizer/somas/somas_tensor.h" -#include "backend/optimizer/somas/somas_parameter.h" - #include #include #include #include -#include #include +#include "utils/hash_map.h" +#include "backend/optimizer/somas/somas_stream.h" +#include "backend/optimizer/somas/somas_tensor.h" +#include "backend/optimizer/somas/somas_parameter.h" + namespace mindspore { namespace somas { class SomasStream; @@ -53,7 +53,7 @@ class SomasNode { std::vector workspace_tensors_; std::map input_parameters_map_; - std::unordered_map anc_stream_max_order_; + mindspore::HashMap anc_stream_max_order_; // Constructors/Destructors SomasNode(size_t id, NodeType type, SomasStreamPtr stream) : id_(id), stream_(stream), type_(type) {} diff --git a/mindspore/ccsrc/backend/optimizer/somas/somas_solver_alg.cc b/mindspore/ccsrc/backend/optimizer/somas/somas_solver_alg.cc index 05596983d1e..a39e1a64be0 100644 --- a/mindspore/ccsrc/backend/optimizer/somas/somas_solver_alg.cc +++ b/mindspore/ccsrc/backend/optimizer/somas/somas_solver_alg.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -201,13 +201,12 @@ void FootPrint::addElem(BlockTensor *block, const size_t &offset) { size_t offset1 = offset; SomasSolverTensorDescPtr tensor = block->m_start_tensor_; MS_LOG(DEBUG) << "Allocating block: " << tensor->index_ << " in offset: " << offset; - pair sol_offset; - sol_offset.first = block->m_current_sol_; - sol_offset.second = offset; - if (block->offsets_.count(sol_offset.first)) - MS_LOG(WARNING) << "Warning addElem: Offset overwritten at solution " << block->m_current_sol_ << " for block " + auto sol_id = block->m_current_sol_; + if (block->offsets_.find(sol_id) != block->offsets_.end()) { + MS_LOG(WARNING) << "Warning addElem: Offset overwritten at solution " << sol_id << " for block " << block->m_start_tensor_->index_; - block->offsets_.insert(sol_offset); + } + (void)block->offsets_.emplace(sol_id, offset); while (tensor) { tensor->offset_ = offset1; offset1 += tensor->size_; @@ -234,14 +233,13 @@ bool FastHeuristic::Eval(vector *block_tensors_v, const std::shared for (auto &block : *block_tensors_v) { if (!block.m_bre_allocate_) { offset = block.m_start_tensor_->offset_; - pair aux; - aux.first = foot_print->m_solId_; - aux.second = block.m_start_tensor_->offset_; - if (block.offsets_.count(aux.first)) { - MS_LOG(WARNING) << "Warning: Offset overwritten at solution " << aux.first << " for block " + auto aux_id = foot_print->m_solId_; + auto aux_offset = block.m_start_tensor_->offset_; + if (block.offsets_.find(aux_id) != block.offsets_.end()) { + MS_LOG(WARNING) << "Warning: Offset overwritten at solution " << aux_id << " for block " << block.m_start_tensor_->index_; } - block.offsets_.insert(aux); + (void)block.offsets_.emplace(aux_id, aux_offset); continue; } bpushed = false; diff --git a/mindspore/ccsrc/backend/optimizer/somas/somas_solver_alg.h b/mindspore/ccsrc/backend/optimizer/somas/somas_solver_alg.h index 92a1c451c68..9615e03257e 100644 --- a/mindspore/ccsrc/backend/optimizer/somas/somas_solver_alg.h +++ b/mindspore/ccsrc/backend/optimizer/somas/somas_solver_alg.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,17 +27,16 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "backend/optimizer/somas/somas_solver_pre.h" #include "utils/ms_context.h" using std::pair; using std::set; using std::stack; -using std::unordered_map; using std::vector; namespace mindspore { @@ -85,12 +84,12 @@ class Interval { class BlockTensor { public: SomasSolverTensorDescPtr m_start_tensor_; - unordered_map, bool (*)(const pair &, const pair &)>> + mindspore::HashMap< + uint32_t, std::set, bool (*)(const pair &, const pair &)>> offsets_candidates_; uint32_t m_current_sol_; bool m_bre_allocate_; - unordered_map offsets_; + mindspore::HashMap offsets_; size_t m_size_; BlockTensor() : m_start_tensor_(nullptr), diff --git a/mindspore/ccsrc/backend/optimizer/somas/somas_solver_core.cc b/mindspore/ccsrc/backend/optimizer/somas/somas_solver_core.cc index 7b08bc35b44..7bbcb5b06a1 100644 --- a/mindspore/ccsrc/backend/optimizer/somas/somas_solver_core.cc +++ b/mindspore/ccsrc/backend/optimizer/somas/somas_solver_core.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,15 +20,15 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "backend/optimizer/somas/somas_solver_alg.h" #include "backend/optimizer/somas/somas_solver_core.h" #include "backend/optimizer/somas/somas_solver_pre.h" +using mindspore::HashMap; using std::sort; -using std::unordered_map; using std::vector; namespace mindspore { @@ -266,7 +266,7 @@ static bool GreaterSizeGreaterConstraintsGreaterIndex(const BlockTensor &t1, con void SomasSolverCore::SortTensors() { // need to sort the tensors for Fast Heuristic MS_LOG(DEBUG) << "Sorting Blocks of tensor, strategy: " << sortingNames[sort_strategy_]; typedef bool (*SortingFunction)(const BlockTensor &, const BlockTensor &); - std::unordered_map sort_map; + mindspore::HashMap sort_map; sort_map[kGreaterSizeSmallerIndex] = &GreaterSizeSmallerIndex; #ifdef SOMAS_DEBUG sort_map[kGreaterSizeGreaterIndex] = &GreaterSizeGreaterIndex; @@ -327,13 +327,13 @@ void SomasSolverCore::AppendLifelongTensors() { MS_LOG(DEBUG) << "Appending lifelong tensors to solution"; size_t offset = upperbound_; std::map lifelongTensors; - for (auto t_ : tensors_) { - if (t_.second->lifelong_) { - lifelongTensors.insert(t_); + for (auto &t : tensors_) { + if (t.second->lifelong_) { + (void)lifelongTensors.emplace(t.first, t.second); } } - for (auto t_ : lifelongTensors) { - SomasSolverTensorDescPtr pTensor = t_.second; + for (auto &t : lifelongTensors) { + auto &pTensor = t.second; pTensor->offset_ = offset; offset += pTensor->size_; } diff --git a/mindspore/ccsrc/backend/optimizer/somas/somas_solver_core.h b/mindspore/ccsrc/backend/optimizer/somas/somas_solver_core.h index 4e48460c756..8b44ec99f97 100644 --- a/mindspore/ccsrc/backend/optimizer/somas/somas_solver_core.h +++ b/mindspore/ccsrc/backend/optimizer/somas/somas_solver_core.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "backend/optimizer/somas/somas_solver_alg.h" #include "backend/optimizer/somas/somas_solver_pre.h" diff --git a/mindspore/ccsrc/backend/optimizer/somas/somas_solver_pre.cc b/mindspore/ccsrc/backend/optimizer/somas/somas_solver_pre.cc index e8e4e64975b..f99e205da1b 100644 --- a/mindspore/ccsrc/backend/optimizer/somas/somas_solver_pre.cc +++ b/mindspore/ccsrc/backend/optimizer/somas/somas_solver_pre.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -90,7 +90,7 @@ vector SomasSolverPre::CreateTensorsMaps(const TensorsDescMap &t for (size_t sol = 1; sol < total_sol; sol++) { SomasSolverTensorDesc newDesc = *(pairT.second.get()); SomasSolverTensorDescPtr newDescPtr = std::make_shared(newDesc); - vecTensorsMap[sol].insert(std::make_pair(pairT.first, newDescPtr)); + (void)vecTensorsMap[sol].emplace(pairT.first, newDescPtr); } } return vecTensorsMap; diff --git a/mindspore/ccsrc/backend/optimizer/somas/somas_solver_pre.h b/mindspore/ccsrc/backend/optimizer/somas/somas_solver_pre.h index 10920b59072..ac72e60da77 100644 --- a/mindspore/ccsrc/backend/optimizer/somas/somas_solver_pre.h +++ b/mindspore/ccsrc/backend/optimizer/somas/somas_solver_pre.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,11 +25,11 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "backend/session/kernel_graph.h" -using std::unordered_map; +using mindspore::HashMap; using std::vector; namespace mindspore { @@ -175,7 +175,7 @@ struct SomasSolverTensorDesc { } }; using SomasSolverTensorDescPtr = std::shared_ptr; -typedef std::unordered_map TensorsDescMap; +typedef mindspore::HashMap TensorsDescMap; class SomasSolverPre { public: SomasSolverPre() = default; diff --git a/mindspore/ccsrc/backend/optimizer/somas/somas_tensor.h b/mindspore/ccsrc/backend/optimizer/somas/somas_tensor.h index be283348be0..d5a3bf28975 100644 --- a/mindspore/ccsrc/backend/optimizer/somas/somas_tensor.h +++ b/mindspore/ccsrc/backend/optimizer/somas/somas_tensor.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "backend/optimizer/somas/somas_node.h" #include "backend/optimizer/somas/somas_solver_pre.h" #include "backend/optimizer/somas/somas_stream.h" @@ -79,7 +79,7 @@ class SomasTensor { std::set destinations_; std::set destinationStreams_; - unordered_map max_destinations_; + mindspore::HashMap max_destinations_; // Constructors/Destructors explicit SomasTensor(size_t id, SomasNodePtr source_node, SomasStreamPtr source_stream, size_t real_size, @@ -116,7 +116,7 @@ class SomasTensor { private: bool ref_overlap_; size_t num_constraints_{0}; - unordered_map max_destination_id_; + mindspore::HashMap max_destination_id_; const size_t id_{0}; const SomasNodePtr source_node_; SomasStreamPtr const source_stream_; diff --git a/mindspore/ccsrc/backend/optimizer/trt_pass/graph_converter.cc b/mindspore/ccsrc/backend/optimizer/trt_pass/graph_converter.cc index 390abd67593..6831fd3c241 100644 --- a/mindspore/ccsrc/backend/optimizer/trt_pass/graph_converter.cc +++ b/mindspore/ccsrc/backend/optimizer/trt_pass/graph_converter.cc @@ -20,11 +20,11 @@ #include #include #include -#include #include #include #include #include +#include "utils/hash_map.h" #include "utils/ms_context.h" #include "backend/optimizer/trt_pass/trt_converter_context.h" #include "utils/singleton.h" @@ -105,9 +105,9 @@ CNodePtr BuildMakeTupleNode(const FuncGraphPtr root, const std::map args_map; + mindspore::HashMap args_map; for (size_t i = 0; i < parameters.size(); i++) { - args_map.insert(std::make_pair(parameters[i], arguments[i])); + (void)args_map.emplace(parameters[i], arguments[i]); } AnfNodePtrList useful_arguments; diff --git a/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.cc b/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.cc index c3af70c5a39..a73c23d3bcf 100644 --- a/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.cc +++ b/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.cc @@ -20,10 +20,10 @@ #include #include #include -#include #include #include #include +#include "utils/hash_map.h" #include "utils/ms_context.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/optimizer/trt_pass/trt_op_factory.h" @@ -61,8 +61,8 @@ bool WeightCheck(const AnfNodePtr &node) { return true; } -std::unordered_map CollectNodeInfo(const FuncGraphPtr &func_graph) { - std::unordered_map res; +mindspore::HashMap CollectNodeInfo(const FuncGraphPtr &func_graph) { + mindspore::HashMap res; const std::vector &node_list = TopoSort(func_graph->get_return()); for (size_t i = 0; i < node_list.size(); i++) { diff --git a/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.h b/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.h index 0eb38f4abc0..c664287b9d7 100644 --- a/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.h +++ b/mindspore/ccsrc/backend/optimizer/trt_pass/graph_partitioner.h @@ -19,9 +19,9 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "backend/optimizer/common/optimizer.h" namespace mindspore { @@ -78,7 +78,7 @@ class GraphDependency { std::string ToString() const; private: - std::unordered_map> dependencies_; + mindspore::HashMap> dependencies_; }; using Subgraph = std::tuple; @@ -114,7 +114,7 @@ class GraphPartitioner { bool NodeGrouping(const FuncGraphPtr &func_graph); std::map CollectSegments(); - std::unordered_map node_info_; + mindspore::HashMap node_info_; GraphDependency dependency_; }; } // namespace opt diff --git a/mindspore/ccsrc/backend/optimizer/trt_pass/trt_converter_context.cc b/mindspore/ccsrc/backend/optimizer/trt_pass/trt_converter_context.cc index 0ca519bb09d..a563e62fea6 100644 --- a/mindspore/ccsrc/backend/optimizer/trt_pass/trt_converter_context.cc +++ b/mindspore/ccsrc/backend/optimizer/trt_pass/trt_converter_context.cc @@ -16,6 +16,8 @@ #include "backend/optimizer/trt_pass/trt_converter_context.h" +#include +#include #include "runtime/device/gpu/trt_loader.h" #include "backend/optimizer/trt_pass/trt_op_factory.h" #include "backend/kernel_compiler/gpu/trt/trt_utils.h" @@ -219,7 +221,7 @@ bool TrtConverterContext::LoadLayerInput(const AnfNodePtr &node, std::vector TrtConverterContext::GetGraphInputs() const { // Get Anf-graph inputs without weights. All weights were binded to Trt-graph. - std::unordered_map graph_inputs; + mindspore::HashMap graph_inputs; for (const auto &input_node : func_graph_->parameters()) { if (!input_node->isa()) { continue; @@ -227,7 +229,7 @@ std::vector TrtConverterContext::GetGraphInputs() const { auto input = input_node->cast(); if (!AnfAlgo::IsParameterWeight(input)) { - graph_inputs.insert(std::make_pair(input->name(), input_node)); + (void)graph_inputs.emplace(input->name(), input_node); } } @@ -260,7 +262,7 @@ std::tuple, std::vector> TrtC size_t pos = name.find_first_not_of("return_output_"); size_t anf_index = atoi(name.substr(pos).c_str()); - anf_trt_index_map.insert(std::make_pair(anf_index, trt_index)); + (void)anf_trt_index_map.emplace(anf_index, trt_index); trt_output_list[trt_index] = anf_output_list[anf_index]; trt_index++; } diff --git a/mindspore/ccsrc/backend/optimizer/trt_pass/trt_converter_context.h b/mindspore/ccsrc/backend/optimizer/trt_pass/trt_converter_context.h index 7f4f94e73f2..f3aed3480e3 100644 --- a/mindspore/ccsrc/backend/optimizer/trt_pass/trt_converter_context.h +++ b/mindspore/ccsrc/backend/optimizer/trt_pass/trt_converter_context.h @@ -17,13 +17,13 @@ #ifndef MINDSPORE_CCSRC_BACKEND_OPTITIMIZER_TRT_CONVERTER_CONTEXT_H_ #define MINDSPORE_CCSRC_BACKEND_OPTITIMIZER_TRT_CONVERTER_CONTEXT_H_ -#include #include #include #include #include #include #include +#include "utils/hash_map.h" #include "base/base.h" #include "ir/anf.h" #include "backend/session/anf_runtime_algorithm.h" @@ -90,7 +90,7 @@ class TrtConverterContext : public std::enable_shared_from_this engine_; // Cache (AnfNode + output_index : ILayer output). - std::unordered_map> output_map_; + mindspore::HashMap> output_map_; std::vector> temp_weights_; }; } // namespace opt diff --git a/mindspore/ccsrc/backend/optimizer/trt_pass/trt_op_factory.h b/mindspore/ccsrc/backend/optimizer/trt_pass/trt_op_factory.h index 5cc8d8da88a..c976f0bfb40 100644 --- a/mindspore/ccsrc/backend/optimizer/trt_pass/trt_op_factory.h +++ b/mindspore/ccsrc/backend/optimizer/trt_pass/trt_op_factory.h @@ -18,12 +18,12 @@ #define MINDSPORE_CCSRC_BACKEND_OPTITIMIZER_TRT_PASS_OP_FACTORY_H_ #include -#include #include #include #include #include #include +#include "utils/hash_map.h" #include "base/base.h" #include "ir/anf.h" @@ -45,7 +45,7 @@ class TrtOpFactory { if (op_convert_map_.count(op_name)) { MS_LOG(EXCEPTION) << "Operator: " << op_name << " re-registered."; } - op_convert_map_.insert(std::make_pair(op_name, func)); + (void)op_convert_map_.emplace(op_name, func); } ConvertFunc GetConvertFunc(const std::string &op_name) const { @@ -62,7 +62,7 @@ class TrtOpFactory { ~TrtOpFactory() = default; DISABLE_COPY_AND_ASSIGN(TrtOpFactory) - std::unordered_map op_convert_map_; + mindspore::HashMap op_convert_map_; }; class TrtOpRegister { diff --git a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h index a8d4018158a..fe4fa1b4f8e 100644 --- a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h +++ b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include "ir/anf.h" diff --git a/mindspore/ccsrc/backend/session/ascend_inference_session.h b/mindspore/ccsrc/backend/session/ascend_inference_session.h index 10cbbc74c68..bc9462c82c0 100644 --- a/mindspore/ccsrc/backend/session/ascend_inference_session.h +++ b/mindspore/ccsrc/backend/session/ascend_inference_session.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ */ #ifndef MINDSPORE_CCSRC_BACKEND_SESSION_ASCEND_INFERENCE_SESSION_H #define MINDSPORE_CCSRC_BACKEND_SESSION_ASCEND_INFERENCE_SESSION_H -#include #include #include #include @@ -24,6 +23,7 @@ #include #include #include +#include "utils/hash_map.h" #include "backend/session/ascend_session.h" #include "backend/session/kernel_graph.h" #include "backend/kernel_compiler/kernel.h" diff --git a/mindspore/ccsrc/backend/session/ascend_session.cc b/mindspore/ccsrc/backend/session/ascend_session.cc index d4e3605ccac..102abe10f2e 100644 --- a/mindspore/ccsrc/backend/session/ascend_session.cc +++ b/mindspore/ccsrc/backend/session/ascend_session.cc @@ -18,10 +18,10 @@ #include #include #include -#include #include #include +#include "utils/hash_set.h" #include "base/core_ops.h" #include "base/base_ref_utils.h" #include "ir/tensor.h" @@ -984,7 +984,7 @@ void AscendSession::BuildOpsInGraph(const GraphId &graph_id, const std::map op_output_info; std::vector kernels; - std::unordered_map single_op_graphs; + mindspore::HashMap single_op_graphs; // Collect kernels need to be built in single op graphs for (const auto &kernel : graph->execution_order()) { // Generate fake input tensors, tensor masks and input kernel with index @@ -1252,7 +1252,7 @@ void InitMemReuseExecOrder(KernelGraph *kernel_graph) { if (!kernel_graph->subgraph_multi_call()) { return; } - std::unordered_map label_id_index_map; + mindspore::HashMap label_id_index_map; auto kernel_cnodes = kernel_graph->execution_order(); std::vector mem_reuse_order; for (uint32_t i = 0; i < kernel_cnodes.size(); i++) { diff --git a/mindspore/ccsrc/backend/session/ascend_session.h b/mindspore/ccsrc/backend/session/ascend_session.h index 1ed6f9018ee..040acf413db 100644 --- a/mindspore/ccsrc/backend/session/ascend_session.h +++ b/mindspore/ccsrc/backend/session/ascend_session.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,7 +17,6 @@ #ifndef MINDSPORE_CCSRC_BACKEND_SESSION_ASCEND_SESSION_H #define MINDSPORE_CCSRC_BACKEND_SESSION_ASCEND_SESSION_H -#include #include #include #include @@ -26,6 +25,7 @@ #include #include #include +#include "utils/hash_map.h" #include "backend/session/session_basic.h" #include "backend/session/kernel_graph.h" #include "backend/kernel_compiler/kernel.h" @@ -164,9 +164,9 @@ class AscendSession : public SessionBasic { void SetOperatorInfo(const std::vector &nodes) const; void RecurseSelectKernelInfo(const KernelGraphPtr &graph, std::set *memo) const; // key is final_graph_id,value is child graph execute order of final graph - std::unordered_map> graph_execute_orders_; + mindspore::HashMap> graph_execute_orders_; // key is final_graph_id,value is the graph types of child graphs - std::unordered_map> graph_order_types_; + mindspore::HashMap> graph_order_types_; // initial tensors, these tensor will sync data to device before run graph std::map, tensor::TensorPtr> initial_tenosrs_; // final_graph_id is used in every root graph has it's own session situation diff --git a/mindspore/ccsrc/backend/session/executor.cc b/mindspore/ccsrc/backend/session/executor.cc index c85ba3be610..9e020d9039b 100644 --- a/mindspore/ccsrc/backend/session/executor.cc +++ b/mindspore/ccsrc/backend/session/executor.cc @@ -248,7 +248,7 @@ std::vector> Executor::GetReadyTasksFromPendingLis auto task = *iter; if (IsTaskReady(task)) { (void)ready_tasks.emplace_back(task); - pending_tasks_.erase(iter++); + iter = pending_tasks_.erase(iter); } else { ++iter; } diff --git a/mindspore/ccsrc/backend/session/gpu_inference_session.h b/mindspore/ccsrc/backend/session/gpu_inference_session.h index 0ee2459799f..bfe66a5289b 100644 --- a/mindspore/ccsrc/backend/session/gpu_inference_session.h +++ b/mindspore/ccsrc/backend/session/gpu_inference_session.h @@ -15,7 +15,6 @@ */ #ifndef MINDSPORE_CCSRC_BACKEND_SESSION_GPU_INFERENCE_SESSION_H #define MINDSPORE_CCSRC_BACKEND_SESSION_GPU_INFERENCE_SESSION_H -#include #include #include #include @@ -24,6 +23,7 @@ #include #include #include +#include "utils/hash_map.h" #include "backend/session/gpu_session.h" #include "backend/session/kernel_graph.h" #include "backend/kernel_compiler/kernel.h" diff --git a/mindspore/ccsrc/backend/session/kernel_graph.cc b/mindspore/ccsrc/backend/session/kernel_graph.cc index 5b46aa95641..2c64baf29cd 100644 --- a/mindspore/ccsrc/backend/session/kernel_graph.cc +++ b/mindspore/ccsrc/backend/session/kernel_graph.cc @@ -16,9 +16,9 @@ #include "backend/session/kernel_graph.h" #include #include -#include #include #include +#include "utils/hash_set.h" #include "base/core_ops.h" #include "ir/param_info.h" #include "utils/utils.h" @@ -39,7 +39,7 @@ const std::set kOpAssignKernelNameList = {prim::kPrimAssign->name() prim::kPrimAssignSub->name()}; void PushNoVisitedNode(const AnfNodePtr &node, std::queue *que, - std::unordered_set *visited_nodes) { + mindspore::HashSet *visited_nodes) { MS_EXCEPTION_IF_NULL(node); MS_EXCEPTION_IF_NULL(que); MS_EXCEPTION_IF_NULL(visited_nodes); @@ -163,7 +163,7 @@ std::vector KernelGraph::outputs() const { } void KernelGraph::EnqueueActiveNodes(const AnfNodePtr &node, std::queue *visit_queue, - std::unordered_set *visited_nodes, bool comm_first) { + mindspore::HashSet *visited_nodes, bool comm_first) { MS_EXCEPTION_IF_NULL(visit_queue); MS_EXCEPTION_IF_NULL(visited_nodes); auto it = node_output_edges_.find(node); @@ -213,7 +213,7 @@ void KernelGraph::SetExecOrderByDefault() { std::queue seed_nodes; UpdateNodeEdgeList(&seed_nodes); execution_order_.clear(); - std::unordered_set visited_nodes; + mindspore::HashSet visited_nodes; std::queue zero_input_nodes; std::queue delay_comm_stack; std::queue communication_descendants; @@ -764,35 +764,40 @@ void KernelGraph::FrontBackendlMapUpdate(const AnfNodePtr &old_backend_anf, cons MS_LOG(DEBUG) << "Old same with new:" << old_backend_anf->DebugString(); return; } - if (backend_front_anf_map_.find(old_backend_anf) == backend_front_anf_map_.end()) { + auto bf_iter = backend_front_anf_map_.find(old_backend_anf); + if (bf_iter == backend_front_anf_map_.end()) { MS_LOG(DEBUG) << "Old_backend_anf " << old_backend_anf->DebugString() << " is not exist in the map"; return; } - if (front_backend_anf_map_.find(backend_front_anf_map_[old_backend_anf]) == front_backend_anf_map_.end()) { + auto front_anf = bf_iter->second; + auto fb_iter = front_backend_anf_map_.find(front_anf); + if (fb_iter == front_backend_anf_map_.end()) { MS_LOG(EXCEPTION) << "Anf is not exist in the map ,old " << old_backend_anf->DebugString(); } + fb_iter->second = new_backend_anf; + // Delete old kernel, should be called before add new item to map. + (void)backend_front_anf_map_.erase(bf_iter); + backend_front_anf_map_[new_backend_anf] = front_anf; if (IsInternalOutput(old_backend_anf)) { ReplaceInternalOutput(old_backend_anf, new_backend_anf); } - front_backend_anf_map_[backend_front_anf_map_[old_backend_anf]] = new_backend_anf; - backend_front_anf_map_[new_backend_anf] = backend_front_anf_map_[old_backend_anf]; - // delete old kernel - (void)backend_front_anf_map_.erase(old_backend_anf); } // get kernel by anf AnfNodePtr KernelGraph::GetBackendAnfByFrontAnf(const AnfNodePtr &front_anf) { - if (front_backend_anf_map_.find(front_anf) == front_backend_anf_map_.end()) { + auto iter = front_backend_anf_map_.find(front_anf); + if (iter == front_backend_anf_map_.end()) { return nullptr; } - return front_backend_anf_map_[front_anf]; + return iter->second; } AnfNodePtr KernelGraph::GetFrontAnfByBackendAnf(const AnfNodePtr &backend_anf) { - if (backend_front_anf_map_.find(backend_anf) == backend_front_anf_map_.end()) { + auto iter = backend_front_anf_map_.find(backend_anf); + if (iter == backend_front_anf_map_.end()) { return nullptr; } - return backend_front_anf_map_[backend_anf]; + return iter->second; } bool KernelGraph::BackendNodeExistInFrontBackendMap(const AnfNodePtr &backend_anf) { @@ -800,10 +805,11 @@ bool KernelGraph::BackendNodeExistInFrontBackendMap(const AnfNodePtr &backend_an } ValueNodePtr KernelGraph::GetValueNodeByTensor(const mindspore::tensor::TensorPtr &tensor) { - if (tensor_to_value_node_map_.find(tensor) == tensor_to_value_node_map_.end()) { + auto iter = tensor_to_value_node_map_.find(tensor); + if (iter == tensor_to_value_node_map_.end()) { return nullptr; } - return tensor_to_value_node_map_[tensor]; + return iter->second; } void KernelGraph::TensorValueNodeMapAdd(const tensor::TensorPtr &tensor, const ValueNodePtr &value_node) { @@ -816,29 +822,12 @@ void KernelGraph::AddDependEdge(const AnfNodePtr &node, const AnfNodePtr &input, MS_EXCEPTION_IF_NULL(node); MS_EXCEPTION_IF_NULL(input); MS_LOG(DEBUG) << "Input:" << input->DebugString() << ", node:" << node->DebugString() << ",num:" << depend_edge_num; - auto output_depend_edge = std::pair(node, depend_edge_num); // add output depend edge of input - auto output_it = node_output_edges_.find(input); - if (output_it == node_output_edges_.end()) { - node_output_edges_[input] = std::vector>{output_depend_edge}; - } else { - output_it->second.push_back(output_depend_edge); - } + node_output_edges_[input].emplace_back(node, depend_edge_num); // add input depend edge of output - auto input_depend_edge = std::pair(input, depend_edge_num); - auto input_it = node_input_edges_.find(node); - if (input_it == node_input_edges_.end()) { - node_input_edges_[node] = std::vector>{input_depend_edge}; - } else { - input_it->second.push_back(input_depend_edge); - } + node_input_edges_[node].emplace_back(input, depend_edge_num); // add node input depend num - auto depend_it = node_input_num_.find(node); - if (depend_it == node_input_num_.end()) { - node_input_num_[node] = depend_edge_num; - } else { - depend_it->second += depend_edge_num; - } + node_input_num_[node] += depend_edge_num; } std::vector KernelGraph::GetOutputNodes(const AnfNodePtr &node) { @@ -848,8 +837,9 @@ std::vector KernelGraph::GetOutputNodes(const AnfNodePtr &node) { MS_LOG(EXCEPTION) << "Can't find node[" << node->DebugString() << "]"; } std::vector output_nodes; - auto trans = [](const std::pair &pair) -> AnfNodePtr { return pair.first; }; - (void)std::transform(it->second.begin(), it->second.end(), std::back_inserter(output_nodes), trans); + output_nodes.reserve(it->second.size()); + (void)std::transform(it->second.begin(), it->second.end(), std::back_inserter(output_nodes), + [](const auto &p) { return p.first; }); return output_nodes; } @@ -858,7 +848,7 @@ void KernelGraph::UpdateNodeEdgeList(std::queue *seed_nodes) { node_output_edges_.clear(); node_input_num_.clear(); node_input_edges_.clear(); - std::unordered_set visited_nodes; + mindspore::HashSet visited_nodes; std::queue que; que.push(get_return()); while (!que.empty()) { @@ -900,15 +890,11 @@ void KernelGraph::AddRefCorrespondPairs(const AnfWithOutIndex &final_pair, const MS_LOG(EXCEPTION) << "Out_pair is already in RefOutputMap, node is " << final_pair.first->DebugString() << ", index is " << final_pair.second; } - (void)ref_out_in_map_.insert(std::make_pair(final_pair, origin_pair)); + (void)ref_out_in_map_.emplace(final_pair, origin_pair); } bool KernelGraph::RemoveValueNodeFromGraph(const ValueNodePtr &value_node) { - if (graph_value_nodes_.find(value_node) != graph_value_nodes_.end()) { - (void)graph_value_nodes_.erase(value_node); - return true; - } - return false; + return graph_value_nodes_.erase(value_node) != 0; } void KernelGraph::ReplaceGraphInput(const AnfNodePtr &old_parameter, const AnfNodePtr &new_parameter) { @@ -1094,13 +1080,15 @@ void KernelGraph::ReplaceInternalOutput(const AnfNodePtr &node, const AnfNodePtr return; } MS_LOG(INFO) << "Replace internal node " << node->DebugString() << " To " << new_node->DebugString(); - auto &front_nodes = iter->second; - // Move all front nodes to new node mapping - internal_outputs_to_front_map_[new_node] = front_nodes; + auto front_nodes = std::move(iter->second); + // We should do 'erase(iter)' before modify 'internal_outputs_to_front_map_', + // since the 'iter' may be invalidated after new item added. + internal_outputs_to_front_map_.erase(iter); + // Move all front nodes to new node mapping. for (const auto &front_node_iter : front_nodes) { front_to_internal_outputs_map_[front_node_iter.second.first] = new_node; } - internal_outputs_to_front_map_.erase(iter); + internal_outputs_to_front_map_[new_node] = std::move(front_nodes); } void KernelGraph::ReplaceInternalOutput(const AnfNodePtr &node, const AnfNodePtr &new_node, size_t src_output_idx, @@ -1126,13 +1114,14 @@ void KernelGraph::ReplaceInternalOutput(const AnfNodePtr &node, const AnfNodePtr MS_LOG(INFO) << "The output " << src_output_idx << " of node " << node->DebugString() << " is not an internal node"; return; } - auto front_node_pair = front_node_iter->second; - internal_outputs_to_front_map_[new_node][dst_output_idx] = front_node_pair; - front_to_internal_outputs_map_[front_node_pair.first] = new_node; - front_nodes.erase(src_output_idx); + auto front_node_pair = std::move(front_node_iter->second); + (void)front_nodes.erase(front_node_iter); if (front_nodes.empty()) { - internal_outputs_to_front_map_.erase(iter); + (void)internal_outputs_to_front_map_.erase(iter); } + // We should do 'erase' before 'insert', since the 'iter' may be invalidated after new item added. + front_to_internal_outputs_map_[front_node_pair.first] = new_node; + internal_outputs_to_front_map_[new_node][dst_output_idx] = std::move(front_node_pair); } void KernelGraph::CacheInternalParameterToFrontNode(const AnfNodePtr ¶meter, @@ -1161,7 +1150,7 @@ void KernelGraph::CacheInternalParameterToFrontNode(const AnfNodePtr ¶meter, } AnfWithOutIndex KernelGraph::GetFrontNodeByInternalParameter(const AnfNodePtr ¶meter) const { - const auto &iter = internal_parameter_to_front_node_map_.find(parameter); + auto iter = internal_parameter_to_front_node_map_.find(parameter); if (iter != internal_parameter_to_front_node_map_.end()) { return iter->second; } @@ -1169,10 +1158,6 @@ AnfWithOutIndex KernelGraph::GetFrontNodeByInternalParameter(const AnfNodePtr &p } FuncGraphPtr KernelGraph::GetFuncGraph() { - if (front_backend_anf_map_.empty()) { - return nullptr; - } - for (const auto &front_backend_anf : front_backend_anf_map_) { const auto &front_node = front_backend_anf.first; const auto &func_graph = front_node->func_graph(); @@ -1218,7 +1203,7 @@ void KernelGraph::CacheGraphOutputToFrontNodeWithIndex(const std::vectorsecond; } @@ -1234,11 +1219,7 @@ AnfNodePtr KernelGraph::GetInternalOutputByFrontNode(const AnfNodePtr &front_nod } bool KernelGraph::IsInternalOutput(const AnfNodePtr &node) const { - auto front_nodes_iter = internal_outputs_to_front_map_.find(node); - if (front_nodes_iter == internal_outputs_to_front_map_.end()) { - return false; - } - return true; + return internal_outputs_to_front_map_.find(node) != internal_outputs_to_front_map_.end(); } bool KernelGraph::IsInternalOutput(const AnfNodePtr &node, size_t output_idx) const { @@ -1247,10 +1228,7 @@ bool KernelGraph::IsInternalOutput(const AnfNodePtr &node, size_t output_idx) co return false; } auto &front_nodes = front_nodes_iter->second; - if (front_nodes.find(output_idx) == front_nodes.end()) { - return false; - } - return true; + return front_nodes.find(output_idx) != front_nodes.end(); } bool KernelGraph::IsUniqueTargetInternalOutput(const AnfNodePtr &node, size_t output_idx) const { @@ -1296,15 +1274,13 @@ void KernelGraph::UpdateChildGraphOrder() { void KernelGraph::RemoveNodeFromGraph(const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(node); - if (backend_front_anf_map_.find(node) != backend_front_anf_map_.end()) { - auto front_node = backend_front_anf_map_[node]; - (void)backend_front_anf_map_.erase(node); - (void)front_backend_anf_map_.erase(front_node); + auto iter = backend_front_anf_map_.find(node); + if (iter != backend_front_anf_map_.end()) { + (void)front_backend_anf_map_.erase(iter->second); + (void)backend_front_anf_map_.erase(iter); } if (node->isa()) { - if (graph_value_nodes_.find(node->cast()) != graph_value_nodes_.end()) { - (void)graph_value_nodes_.erase(node->cast()); - } + (void)graph_value_nodes_.erase(node->cast()); } } diff --git a/mindspore/ccsrc/backend/session/kernel_graph.h b/mindspore/ccsrc/backend/session/kernel_graph.h index b91a9b1039c..806fcb3e80c 100644 --- a/mindspore/ccsrc/backend/session/kernel_graph.h +++ b/mindspore/ccsrc/backend/session/kernel_graph.h @@ -22,11 +22,11 @@ #include #include #include -#include #include -#include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/func_graph.h" #include "ir/anf.h" #include "ir/graph_utils.h" @@ -163,7 +163,7 @@ class KernelGraph : public FuncGraph { // add value node tensor relation map void TensorValueNodeMapAdd(const tensor::TensorPtr &tensor, const ValueNodePtr &value_node); // get all value nodes of graph - const std::unordered_set graph_value_nodes() const { return graph_value_nodes_; } + const mindspore::HashSet graph_value_nodes() const { return graph_value_nodes_; } // add value node to graph void AddValueNodeToGraph(const ValueNodePtr &value_node); // ref output is in map @@ -358,10 +358,10 @@ class KernelGraph : public FuncGraph { void InsertToSendRecvPair(const CNodePtr &allreduce, const std::pair &send_recv_pair) { allreduce_to_send_recv_pairs_[allreduce] = send_recv_pair; } - const std::unordered_map> &allreduce_from_send_recv_pairs() const { + const mindspore::HashMap> &allreduce_from_send_recv_pairs() const { return allreduce_from_send_recv_pairs_; } - const std::unordered_map> &allreduce_to_send_recv_pairs() const { + const mindspore::HashMap> &allreduce_to_send_recv_pairs() const { return allreduce_to_send_recv_pairs_; } @@ -405,7 +405,7 @@ class KernelGraph : public FuncGraph { void SetKernelInfoForNode(const AnfNodePtr &node) const; AnfNodePtr MakeValueNode(const AnfNodePtr &node) const; void EnqueueActiveNodes(const AnfNodePtr &node, std::queue *visit_queue, - std::unordered_set *visited_nodes, bool comm_first = true); + mindspore::HashSet *visited_nodes, bool comm_first = true); // update node edge list void UpdateNodeEdgeList(std::queue *seed_nodes); // add node depend edge by data edge @@ -432,21 +432,21 @@ class KernelGraph : public FuncGraph { uint32_t root_graph_id_{0}; // record map bettween front anf and backend anf,use two map implement bidirectional map - std::unordered_map front_backend_anf_map_; - std::unordered_map backend_front_anf_map_; - std::unordered_map tuple_backend_front_anf_index_map_; + mindspore::HashMap front_backend_anf_map_; + mindspore::HashMap backend_front_anf_map_; + mindspore::HashMap tuple_backend_front_anf_index_map_; // there may be a tensor from ME backend ,a value ndoe will be create according the tensor,map record - std::unordered_map tensor_to_value_node_map_; + mindspore::HashMap tensor_to_value_node_map_; // include all value nodes - std::unordered_set graph_value_nodes_; - std::unordered_map node_input_num_; - std::unordered_map>> node_input_edges_; + mindspore::HashSet graph_value_nodes_; + mindspore::HashMap node_input_num_; + mindspore::HashMap>> node_input_edges_; // record map between ref final output anf with index and ref origin input with index std::map ref_out_in_map_; - std::unordered_map>> node_output_edges_; + mindspore::HashMap>> node_output_edges_; std::map> summary_nodes_; // parameters that will be updated when graph is executed - std::unordered_set updated_parameters_; + mindspore::HashSet updated_parameters_; // graph needn't execute bool executable_{false}; // exist summary node in graph @@ -471,29 +471,29 @@ class KernelGraph : public FuncGraph { // Internal parameter is not the origin parameter of func graph, it is the output of previous kernel graph which is // related to the input of this kernel graph. The first of unordered map is the input of this kernel graph, the second // of unordered map is front node corresponding to the output of previous kernel graph. - std::unordered_map internal_parameter_to_front_node_map_; + mindspore::HashMap internal_parameter_to_front_node_map_; // The first of map is the backend graph output of this kernel graph, the second of map is front node corresponding to // the backend node with index. std::map graph_output_to_front_node_map_; - std::unordered_map front_to_internal_outputs_map_; - std::unordered_map>> + mindspore::HashMap front_to_internal_outputs_map_; + mindspore::HashMap>> internal_outputs_to_front_map_; - std::unordered_map> internal_outputs_tensor_map_; + mindspore::HashMap> internal_outputs_tensor_map_; uint32_t current_epoch_; - std::unordered_map tuple_parameter_to_make_tuple_map_; + mindspore::HashMap tuple_parameter_to_make_tuple_map_; std::set visited_nodes_; std::map edge_to_; std::stack loop_nodes_; std::vector input_nodes_; std::vector input_tensors_; KernelMapTensor output_node_to_tensor_; - std::unordered_map> pre_graphs_; - std::unordered_map> post_graphs_; + mindspore::HashMap> pre_graphs_; + mindspore::HashMap> post_graphs_; // The send/recv pairs inserted for allreduce, the key is allreduce kernel, the first of pair is send node, the second // of pair is recv node. - std::unordered_map> allreduce_from_send_recv_pairs_; - std::unordered_map> allreduce_to_send_recv_pairs_; + mindspore::HashMap> allreduce_from_send_recv_pairs_; + mindspore::HashMap> allreduce_to_send_recv_pairs_; std::atomic pre_graph_finished_count_{0}; std::atomic post_graph_finished_count_{0}; bool first_step_{true}; diff --git a/mindspore/ccsrc/backend/session/session_basic.cc b/mindspore/ccsrc/backend/session/session_basic.cc index 9ae6f3d769a..e6906d425e5 100644 --- a/mindspore/ccsrc/backend/session/session_basic.cc +++ b/mindspore/ccsrc/backend/session/session_basic.cc @@ -18,10 +18,10 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "ops/primitive_c.h" #include "ir/manager.h" #include "abstract/utils.h" @@ -503,7 +503,7 @@ void SessionBasic::ClearGraph() { auto graph_iter = graphs_.begin(); while (graph_iter != graphs_.end()) { graph_iter->second.reset(); - graphs_.erase(graph_iter++); + graph_iter = graphs_.erase(graph_iter); } graph_sum_ = 0; } @@ -656,7 +656,7 @@ void SessionBasic::GetCNodeInfo(const CNodePtr &cnode, std::vector * } void SessionBasic::GetNewCNodeInputs(const CNodePtr &cnode, KernelGraph *graph, std::vector *cnode_inputs, - std::unordered_map *other_graph_cnode) { + mindspore::HashMap *other_graph_cnode) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(graph); MS_EXCEPTION_IF_NULL(other_graph_cnode); @@ -707,7 +707,7 @@ void SessionBasic::GetNewCNodeInputs(const CNodePtr &cnode, KernelGraph *graph, } CNodePtr SessionBasic::CreateNewCNode(const CNodePtr &cnode, KernelGraph *graph, - std::unordered_map *other_graph_cnode) { + mindspore::HashMap *other_graph_cnode) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(graph); MS_EXCEPTION_IF_NULL(other_graph_cnode); @@ -1101,7 +1101,7 @@ ParameterPtr SessionBasic::CreateNewParameter(const AnfNodePtr &anf, KernelGraph KernelGraphPtr SessionBasic::ConstructKernelGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs, bool common_opt) { - std::unordered_map other_graph_cnode; + mindspore::HashMap other_graph_cnode; auto graph = NewKernelGraph(); MS_EXCEPTION_IF_NULL(graph); MS_LOG(INFO) << "Create graph: " << graph->graph_id(); diff --git a/mindspore/ccsrc/backend/session/session_basic.h b/mindspore/ccsrc/backend/session/session_basic.h index d8211d2ca89..049fc28a421 100644 --- a/mindspore/ccsrc/backend/session/session_basic.h +++ b/mindspore/ccsrc/backend/session/session_basic.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,11 +18,11 @@ #include #include -#include #include #include #include #include +#include "utils/hash_map.h" #include "backend/session/session_context.h" #include "backend/session/kernel_graph.h" #include "backend/session/anf_runtime_algorithm.h" @@ -128,7 +128,7 @@ class SessionBasic : public std::enable_shared_from_this { void SetInputNodeUsage(const KernelGraphPtr &graph, const FuncGraphManagerPtr &manager); CNodePtr CreateNewCNode(const CNodePtr &cnode, KernelGraph *graph, - std::unordered_map *other_graph_cnode); + mindspore::HashMap *other_graph_cnode); CNodePtr CreateNewCNode(const CNodePtr &cnode, KernelGraph *graph); // get graph id in child graphs by ME front anf node pointer @@ -180,7 +180,7 @@ class SessionBasic : public std::enable_shared_from_this { std::vector CreateCallSwitchInputs(const CNodePtr &cnode, KernelGraph *graph); void GetCNodeInfo(const CNodePtr &cnode, std::vector *cnode_inputs) const; void GetNewCNodeInputs(const CNodePtr &cnode, KernelGraph *graph, std::vector *cnode_inputs, - std::unordered_map *other_graph_cnode); + mindspore::HashMap *other_graph_cnode); std::vector CreateCallSwitchLayerInputs(const CNodePtr &cnode, KernelGraph *graph); void ProcessNodeRetFunc(const CNodePtr &cnode, KernelGraph *graph, const std::vector &real_inputs); void HandleInternalOutput(const AnfNodePtr &input_front_node, const AnfNodePtr &backend_node, @@ -319,11 +319,11 @@ class SessionBasic : public std::enable_shared_from_this { std::map>> bucket_map_; std::map free_bucket_id_map_; - std::unordered_map> graphs_; - std::unordered_map> run_op_graphs_; - std::unordered_map front_backend_graph_map_; - std::unordered_map partial_parameters_map_; - std::unordered_map partial_target_map_; + mindspore::HashMap> graphs_; + mindspore::HashMap> run_op_graphs_; + mindspore::HashMap front_backend_graph_map_; + mindspore::HashMap partial_parameters_map_; + mindspore::HashMap partial_target_map_; std::shared_ptr context_; CallBackFunc summary_callback_; static GraphId graph_sum_; diff --git a/mindspore/ccsrc/debug/anf_ir_dump.cc b/mindspore/ccsrc/debug/anf_ir_dump.cc index 4e737886c08..175668bbc2b 100644 --- a/mindspore/ccsrc/debug/anf_ir_dump.cc +++ b/mindspore/ccsrc/debug/anf_ir_dump.cc @@ -20,7 +20,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ir/primitive.h" #include "ir/func_graph.h" #include "runtime/device/kernel_info.h" @@ -319,7 +319,7 @@ void DumpParallelInfo(const CNodePtr &node, const std::shared_ptrbuffer << " }"; } -void DumpAttrs(const std::unordered_map &attrs, const std::shared_ptr &gsub, +void DumpAttrs(const mindspore::HashMap &attrs, const std::shared_ptr &gsub, bool check_strategy = false) { int i = 0; for (const auto &attr : attrs) { @@ -572,7 +572,7 @@ void DumpSubgraph(const OrderedMap } void GetEnvDumpIrLineLevel(LocDumpMode *dump_location) { - static std::unordered_map dump_level_map = { + static mindspore::HashMap dump_level_map = { {std::to_string(kOff), kOff}, {std::to_string(kTopStack), kTopStack}, {std::to_string(kWholeStack), kWholeStack}}; static const auto dump_level_in_env = common::GetEnv("ENV_DUMP_IR_LINE_LEVEL"); auto it = dump_level_map.find(dump_level_in_env); diff --git a/mindspore/ccsrc/debug/anf_ir_utils.cc b/mindspore/ccsrc/debug/anf_ir_utils.cc index 8021e570b42..77c39f6d701 100644 --- a/mindspore/ccsrc/debug/anf_ir_utils.cc +++ b/mindspore/ccsrc/debug/anf_ir_utils.cc @@ -19,9 +19,9 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "ir/graph_utils.h" #include "utils/symbolic.h" #include "ir/meta_func_graph.h" diff --git a/mindspore/ccsrc/debug/anf_ir_utils.h b/mindspore/ccsrc/debug/anf_ir_utils.h index 4aa7bc16329..1cbadd58e6d 100644 --- a/mindspore/ccsrc/debug/anf_ir_utils.h +++ b/mindspore/ccsrc/debug/anf_ir_utils.h @@ -21,10 +21,10 @@ #include #include #include -#include -#include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/anf.h" #include "ir/func_graph.h" #include "ir/meta_func_graph.h" diff --git a/mindspore/ccsrc/debug/data_dump/npy_header.cc b/mindspore/ccsrc/debug/data_dump/npy_header.cc index 6f4e42e9e14..eafde1413b9 100644 --- a/mindspore/ccsrc/debug/data_dump/npy_header.cc +++ b/mindspore/ccsrc/debug/data_dump/npy_header.cc @@ -16,10 +16,10 @@ #include "debug/data_dump/npy_header.h" -#include #include #include +#include "utils/hash_map.h" #include "mindspore/core/ir/dtype.h" #include "mindspore/core/utils/log_adapter.h" #include "mindspore/core/utils/convert_utils_base.h" @@ -92,7 +92,7 @@ std::string NpyHeader::shape_to_str() const { } // dtype description corresponding to tensor type -const std::unordered_map type_desc_map = { +const mindspore::HashMap type_desc_map = { {kNumberTypeBool, DtypeDescr{'|', 'b', 1}}, {kNumberTypeInt8, DtypeDescr{'|', 'i', 1}}, {kNumberTypeInt16, DtypeDescr{'<', 'i', 2}}, {kNumberTypeInt32, DtypeDescr{'<', 'i', 4}}, {kNumberTypeInt64, DtypeDescr{'<', 'i', 8}}, {kNumberTypeUInt8, DtypeDescr{'|', 'u', 1}}, diff --git a/mindspore/ccsrc/debug/debugger/proto_exporter.cc b/mindspore/ccsrc/debug/debugger/proto_exporter.cc index ba82ef9e7fc..a936b593b11 100644 --- a/mindspore/ccsrc/debug/debugger/proto_exporter.cc +++ b/mindspore/ccsrc/debug/debugger/proto_exporter.cc @@ -18,11 +18,11 @@ #include #include #include -#include -#include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "debug/anf_ir_utils.h" #include "debug/common.h" #include "debug/debugger/debugger.h" diff --git a/mindspore/ccsrc/debug/debugger/tensor_summary.h b/mindspore/ccsrc/debug/debugger/tensor_summary.h index 1d864f779cf..080c88b27f5 100644 --- a/mindspore/ccsrc/debug/debugger/tensor_summary.h +++ b/mindspore/ccsrc/debug/debugger/tensor_summary.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,11 +17,11 @@ #define MINDSPORE_TENSOR_SUMMARY_H #include -#include #include #include #include +#include "utils/hash_map.h" #include "debug/debug_services.h" #ifdef ONLINE_DBG_MODE @@ -155,9 +155,9 @@ class TensorSummary : public ITensorSummary { double epsilon_; bool mean_sd_cal_enabled_; VarianceAndMeanCalculator current_mean_variance_; - std::unordered_map> means_; - std::unordered_map> all_close_; - std::unordered_map> range_counts_; + mindspore::HashMap> means_; + mindspore::HashMap> all_close_; + mindspore::HashMap> range_counts_; double_t StatLookup(const DebugServices::watchpoint_t &); double_t StatLookup(const std::string &, const DebugServices::watchpoint_t &); double_t GetZeroValPercent(); diff --git a/mindspore/ccsrc/debug/rdr/recorder_manager.h b/mindspore/ccsrc/debug/rdr/recorder_manager.h index 1929cb1c55d..676550c559a 100644 --- a/mindspore/ccsrc/debug/rdr/recorder_manager.h +++ b/mindspore/ccsrc/debug/rdr/recorder_manager.h @@ -19,10 +19,10 @@ #include #include #include -#include #include #include #include +#include "utils/hash_map.h" #include "debug/env_config_parser.h" namespace mindspore { @@ -83,7 +83,7 @@ class RecorderManager { mutable std::mutex mtx_; // , BaserRecorderPtr - std::unordered_map, BaseRecorderPtr, pair_hash> recorder_container_; + mindspore::HashMap, BaseRecorderPtr, pair_hash> recorder_container_; }; } // namespace mindspore #endif // MINDSPORE_CCSRC_DEBUG_RDR_RECORDER_MANAGER_H_ diff --git a/mindspore/ccsrc/debug/trace.cc b/mindspore/ccsrc/debug/trace.cc index 53ce2e58ef5..984a326f4ad 100644 --- a/mindspore/ccsrc/debug/trace.cc +++ b/mindspore/ccsrc/debug/trace.cc @@ -19,8 +19,6 @@ #include #include #include -#include -#include #include #include #include @@ -28,6 +26,8 @@ #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/meta_func_graph.h" #include "ir/graph_utils.h" #include "frontend/operator/composite/composite.h" @@ -145,7 +145,7 @@ class AnalyzeFailExporter : public AnfExporter { AbstractBasePtr GetNodeAbstract(const AnfNodePtr &nd); AnfNodeConfigPtr GetForwardConfig(const AnfNodeConfigPtr &cfg); void ProcessFuncGraphCall(const CNodePtr &node, std::string *const op_comment); - std::unordered_map CreateTaggedNodeMap( + mindspore::HashMap CreateTaggedNodeMap( const std::vector &node_config_stack); private: @@ -153,10 +153,10 @@ class AnalyzeFailExporter : public AnfExporter { AnalysisEnginePtr engine_ = nullptr; }; -std::unordered_map AnalyzeFailExporter::CreateTaggedNodeMap( +mindspore::HashMap AnalyzeFailExporter::CreateTaggedNodeMap( const std::vector &node_config_stack) { - std::unordered_set forwarded_configs; // Check if config. is forwarded. - std::unordered_map tagged_func_graphs; + mindspore::HashSet forwarded_configs; // Check if config. is forwarded. + mindspore::HashMap tagged_func_graphs; size_t index = 0; for (auto &node_config : node_config_stack) { MS_EXCEPTION_IF_NULL(node_config); @@ -327,7 +327,7 @@ bool AnalyzeFailExporter::ExportFuncGraph(const std::string &filename, const Tra } auto tagged_func_graphs = CreateTaggedNodeMap(node_config_stack); - std::unordered_set printed_func_graphs; // Check if func graph has been printed. + mindspore::HashSet printed_func_graphs; // Check if func graph has been printed. // Output graph on the analysis stack for (const auto &node_config : node_config_stack) { MS_EXCEPTION_IF_NULL(node_config); diff --git a/mindspore/ccsrc/fl/server/consistent_hash_ring.cc b/mindspore/ccsrc/fl/server/consistent_hash_ring.cc index 64c01e2191a..52aee7ab496 100644 --- a/mindspore/ccsrc/fl/server/consistent_hash_ring.cc +++ b/mindspore/ccsrc/fl/server/consistent_hash_ring.cc @@ -37,7 +37,7 @@ bool ConsistentHashRing::Insert(uint32_t rank) { bool ConsistentHashRing::Erase(uint32_t rank) { for (auto iterator = ring_.begin(); iterator != ring_.end();) { if (iterator->second == rank) { - (void)ring_.erase(iterator++); + iterator = ring_.erase(iterator); } else { ++iterator; } diff --git a/mindspore/ccsrc/fl/server/distributed_count_service.h b/mindspore/ccsrc/fl/server/distributed_count_service.h index d98f2e9f195..60575d5aca0 100644 --- a/mindspore/ccsrc/fl/server/distributed_count_service.h +++ b/mindspore/ccsrc/fl/server/distributed_count_service.h @@ -21,6 +21,7 @@ #include #include #include +#include "utils/hash_map.h" #include "proto/ps.pb.h" #include "fl/server/common.h" #include "ps/core/server_node.h" @@ -117,14 +118,14 @@ class DistributedCountService { // Key: name, e.g, startFLJob, updateModel, push. // Value: a set of id without repeatation because each work may report multiple times. - std::unordered_map> global_current_count_; + mindspore::HashMap> global_current_count_; // Key: name, e.g, StartFLJobCount. // Value: global threshold count in the server cluster dimension for this name. - std::unordered_map global_threshold_count_; + mindspore::HashMap global_threshold_count_; // First/last count event callbacks of the name. - std::unordered_map counter_handlers_; + mindspore::HashMap counter_handlers_; // Because the count is increased/queried conccurently, we must ensure the operations are threadsafe. std::unordered_map mutex_; diff --git a/mindspore/ccsrc/fl/server/distributed_metadata_store.h b/mindspore/ccsrc/fl/server/distributed_metadata_store.h index 743ecf33913..d14db23d736 100644 --- a/mindspore/ccsrc/fl/server/distributed_metadata_store.h +++ b/mindspore/ccsrc/fl/server/distributed_metadata_store.h @@ -20,6 +20,7 @@ #include #include #include +#include "utils/hash_map.h" #include "proto/ps.pb.h" #include "fl/server/common.h" #include "ps/core/server_node.h" @@ -106,7 +107,7 @@ class DistributedMetadataStore { // We store metadata which is serialized by ProtoBuffer so that data storage and data transmission API is easy to use. // Key: data name. // Value: ProtoBuffer Struct. - std::unordered_map metadata_; + mindspore::HashMap metadata_; // Because the metadata is read/written conccurently, we must ensure the operations are threadsafe. std::unordered_map mutex_; diff --git a/mindspore/ccsrc/fl/server/kernel/kernel_factory.h b/mindspore/ccsrc/fl/server/kernel/kernel_factory.h index 7a3e6afbfa9..3ae6d26cc08 100644 --- a/mindspore/ccsrc/fl/server/kernel/kernel_factory.h +++ b/mindspore/ccsrc/fl/server/kernel/kernel_factory.h @@ -21,7 +21,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "fl/server/common.h" #include "fl/server/kernel/params_info.h" @@ -83,7 +83,7 @@ class KernelFactory { // Generally, a server kernel can correspond to several ParamsInfo which is registered by the method 'Register' in // server kernel's *.cc files. - std::unordered_map>> name_to_creator_map_; + mindspore::HashMap>> name_to_creator_map_; }; } // namespace kernel } // namespace server diff --git a/mindspore/ccsrc/fl/server/kernel/round/round_kernel.cc b/mindspore/ccsrc/fl/server/kernel/round/round_kernel.cc index 042323ab281..e8002883aca 100644 --- a/mindspore/ccsrc/fl/server/kernel/round/round_kernel.cc +++ b/mindspore/ccsrc/fl/server/kernel/round/round_kernel.cc @@ -124,7 +124,7 @@ void RoundKernel::GenerateOutput(const std::vector &outputs, const v outputs[0]->size = len; std::unique_lock lock(heap_data_mtx_); - (void)heap_data_.insert(std::make_pair(outputs[0], std::move(output_data))); + (void)heap_data_.emplace(outputs[0], std::move(output_data)); return; } } // namespace kernel diff --git a/mindspore/ccsrc/fl/server/kernel/round/round_kernel.h b/mindspore/ccsrc/fl/server/kernel/round/round_kernel.h index c7184a89eda..645713ed3f9 100644 --- a/mindspore/ccsrc/fl/server/kernel/round/round_kernel.h +++ b/mindspore/ccsrc/fl/server/kernel/round/round_kernel.h @@ -26,7 +26,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "backend/kernel_compiler/common_utils.h" #include "backend/kernel_compiler/cpu/cpu_kernel.h" #include "fl/server/common.h" @@ -120,7 +120,7 @@ class RoundKernel : virtual public CPUKernel { std::mutex release_mtx_; std::queue heap_data_to_release_; std::mutex heap_data_mtx_; - std::unordered_map> heap_data_; + mindspore::HashMap> heap_data_; }; } // namespace kernel } // namespace server diff --git a/mindspore/ccsrc/fl/server/kernel/round/round_kernel_factory.h b/mindspore/ccsrc/fl/server/kernel/round/round_kernel_factory.h index 0f799ea7944..ac02c8794f3 100644 --- a/mindspore/ccsrc/fl/server/kernel/round/round_kernel_factory.h +++ b/mindspore/ccsrc/fl/server/kernel/round/round_kernel_factory.h @@ -20,7 +20,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "fl/server/common.h" #include "fl/server/kernel/round/round_kernel.h" @@ -42,7 +42,7 @@ class RoundKernelFactory { RoundKernelFactory(const RoundKernelFactory &) = delete; RoundKernelFactory &operator=(const RoundKernelFactory &) = delete; - std::unordered_map name_to_creator_map_; + mindspore::HashMap name_to_creator_map_; }; class RoundKernelRegister { diff --git a/mindspore/ccsrc/fl/server/local_meta_store.h b/mindspore/ccsrc/fl/server/local_meta_store.h index a9ced6e8af3..98b6b642a31 100644 --- a/mindspore/ccsrc/fl/server/local_meta_store.h +++ b/mindspore/ccsrc/fl/server/local_meta_store.h @@ -20,7 +20,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "fl/server/common.h" namespace mindspore { @@ -77,7 +77,7 @@ class LocalMetaStore { LocalMetaStore &operator=(const LocalMetaStore &) = delete; // key_to_meta_ stores metadata with key-value format. - std::unordered_map key_to_meta_; + mindspore::HashMap key_to_meta_; // This mutex makes sure that the operations on key_to_meta_ is threadsafe. std::mutex mtx_; size_t curr_iter_num_; diff --git a/mindspore/ccsrc/frontend/operator/composite/composite.h b/mindspore/ccsrc/frontend/operator/composite/composite.h index 9a0fbeae7a6..5444663915d 100644 --- a/mindspore/ccsrc/frontend/operator/composite/composite.h +++ b/mindspore/ccsrc/frontend/operator/composite/composite.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,11 +21,11 @@ #include #include -#include #include #include #include #include +#include "utils/hash_map.h" #include "frontend/operator/composite/zip_operation.h" #include "frontend/operator/composite/list_append_operation.h" #include "frontend/operator/composite/do_signature.h" @@ -43,7 +43,7 @@ namespace prim { using AbstractSlicePtr = abstract::AbstractSlicePtr; using AbstractScalarPtr = abstract::AbstractScalarPtr; using AbstractTensorPtr = abstract::AbstractTensorPtr; -using ElemwiseMap = std::unordered_map; +using ElemwiseMap = mindspore::HashMap; using ArgsPairList = std::vector>; class HyperMap : public MetaFuncGraph { diff --git a/mindspore/ccsrc/frontend/operator/composite/do_signature.h b/mindspore/ccsrc/frontend/operator/composite/do_signature.h index c67d8a5346f..10f39c74c5e 100644 --- a/mindspore/ccsrc/frontend/operator/composite/do_signature.h +++ b/mindspore/ccsrc/frontend/operator/composite/do_signature.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,12 +19,12 @@ #include #include -#include #include #include #include #include +#include "utils/hash_map.h" #include "pipeline/jit/static_analysis/static_analysis.h" #include "utils/misc.h" #include "utils/any.h" diff --git a/mindspore/ccsrc/frontend/operator/composite/multitype_funcgraph.h b/mindspore/ccsrc/frontend/operator/composite/multitype_funcgraph.h index 279722e2fa0..9dcc9413f0d 100644 --- a/mindspore/ccsrc/frontend/operator/composite/multitype_funcgraph.h +++ b/mindspore/ccsrc/frontend/operator/composite/multitype_funcgraph.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,11 +21,11 @@ #include #include -#include #include #include #include #include +#include #include "pipeline/jit/static_analysis/static_analysis.h" #include "utils/misc.h" #include "ir/dtype.h" diff --git a/mindspore/ccsrc/frontend/operator/composite/unpack_call.h b/mindspore/ccsrc/frontend/operator/composite/unpack_call.h index 5a5016f93bf..866df943e2b 100644 --- a/mindspore/ccsrc/frontend/operator/composite/unpack_call.h +++ b/mindspore/ccsrc/frontend/operator/composite/unpack_call.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,12 +19,12 @@ #include #include -#include #include #include #include #include +#include "utils/hash_map.h" #include "pipeline/jit/static_analysis/static_analysis.h" #include "utils/misc.h" #include "utils/any.h" diff --git a/mindspore/ccsrc/frontend/operator/composite/zip_operation.h b/mindspore/ccsrc/frontend/operator/composite/zip_operation.h index 6c142e930fe..c551ff5fc6a 100644 --- a/mindspore/ccsrc/frontend/operator/composite/zip_operation.h +++ b/mindspore/ccsrc/frontend/operator/composite/zip_operation.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,12 +21,12 @@ #include #include -#include #include #include #include #include +#include "utils/hash_map.h" #include "pipeline/jit/static_analysis/static_analysis.h" #include "utils/misc.h" #include "utils/any.h" diff --git a/mindspore/ccsrc/frontend/operator/prim_to_function.h b/mindspore/ccsrc/frontend/operator/prim_to_function.h index 3e29e9d3d65..aea99ac1346 100644 --- a/mindspore/ccsrc/frontend/operator/prim_to_function.h +++ b/mindspore/ccsrc/frontend/operator/prim_to_function.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,9 +22,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "ir/anf.h" #include "ir/primitive.h" #include "ir/dtype.h" @@ -56,7 +56,7 @@ class PrimToFunction { PrimToFunction(); // Get the number of primitive arguments int64_t GetPrimType(const PrimitivePtr &prim) const; - const std::unordered_map prim_func_type_map_; + const mindspore::HashMap prim_func_type_map_; }; } // namespace prim } // namespace mindspore diff --git a/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.cc b/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.cc index 35a069a8f95..bd68567eead 100644 --- a/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.cc +++ b/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.cc @@ -36,8 +36,8 @@ namespace mindspore { namespace ad { -std::unordered_map DFunctor::func_graph_to_functor_; -std::unordered_map DFunctor::anfnode_to_adjoin_definition_; +mindspore::HashMap DFunctor::func_graph_to_functor_; +mindspore::HashMap DFunctor::anfnode_to_adjoin_definition_; bool lift_fv_before_grad = true; @@ -142,7 +142,7 @@ void DFunctor::BackPropagateSwitchLayer(const CNodePtr &cnode_morph, const CNode if (!IsPrimitiveCNode(input, prim::kPrimMakeTuple)) { MS_LOG(EXCEPTION) << "The 2th input of switch_layer expect a tuple of graphs, but got " << input->ToString() << "."; } - std::unordered_map node_to_fg; + mindspore::HashMap node_to_fg; auto tuple_graphs = input->cast(); for (size_t i = 1; i < tuple_graphs->size(); ++i) { auto graph = tuple_graphs->input(i); @@ -510,8 +510,8 @@ void DFunctor::MapMorphism() { auto output = k_graph_->NewCNode({NewValueNode(prim::kPrimMakeTuple), forward_app, NewValueNode(tape_)}); output_adjoint->second->RegisterKUser(output, 1); k_graph_->set_output(output); - (void)primal_graph_->transforms().insert(std::make_pair("grad", FuncGraphTransform(k_graph_))); - (void)k_graph_->transforms().insert(std::make_pair("primal", FuncGraphTransform(primal_graph_))); + (void)primal_graph_->transforms().emplace("grad", FuncGraphTransform(k_graph_)); + (void)k_graph_->transforms().emplace("primal", FuncGraphTransform(primal_graph_)); } FuncGraphPtr DFunctor::KUserDefined(const FuncGraphPtr &primal) { @@ -536,8 +536,8 @@ FuncGraphPtr DFunctor::KUserDefined(const FuncGraphPtr &primal) { } // Cache the grad func - (void)primal->transforms().insert(std::make_pair("grad", FuncGraphTransform(fg))); - (void)fg->transforms().insert(std::make_pair("primal", FuncGraphTransform(primal))); + (void)primal->transforms().emplace("grad", FuncGraphTransform(fg)); + (void)fg->transforms().emplace("primal", FuncGraphTransform(primal)); // Reset defer_inline to enable successive inlining primal->set_flag(FUNC_GRAPH_FLAG_DEFER_INLINE, false); @@ -868,7 +868,7 @@ CNodePtr GetPrimalUser(const CNodePtr &j_user, const std::map> FindPrimalJPair(const FuncGraphManagerPtr &manager, +static mindspore::HashMap> FindPrimalJPair(const FuncGraphManagerPtr &manager, const FuncGraphPtr &primal_graph) { std::vector j_users; std::map> primal_map; @@ -893,7 +893,7 @@ static std::unordered_map> FindPrimalJPair(const } } - std::unordered_map> primal_user_to_j_users; + mindspore::HashMap> primal_user_to_j_users; for (const auto &j_user : j_users) { MS_EXCEPTION_IF_NULL(j_user); auto primal = GetPrimalUser(j_user, primal_map); diff --git a/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.h b/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.h index 7709d3e7d97..0355d46f487 100644 --- a/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.h +++ b/mindspore/ccsrc/frontend/optimizer/ad/dfunctor.h @@ -21,11 +21,11 @@ #include #include -#include #include #include #include +#include "utils/hash_map.h" #include "ir/anf.h" #include "ir/meta_func_graph.h" #include "ir/func_graph_cloner.h" @@ -38,7 +38,7 @@ namespace mindspore { namespace ad { -using Registry = std::unordered_map; +using Registry = mindspore::HashMap; class KPrim; extern KPrim g_k_prims; class DFunctor; @@ -111,12 +111,12 @@ class DFunctor : public std::enable_shared_from_this { const CNodePtr &cnode_morph); void ReplaceEquivdout(const CNodePtr &k_app, const CNodePtr &cnode_morph); - std::unordered_map anfnode_to_adjoin_; + mindspore::HashMap anfnode_to_adjoin_; // Cache for indirect fv backpropagation, K o K can only do backprop layer by layer. - std::unordered_map anfnode_to_adjoin_indirect_fv_; + mindspore::HashMap anfnode_to_adjoin_indirect_fv_; // Cache for fv node -> pair, zeros_like>, so EnvGetItemTransform in optimizer // can hit its cache if fv_node is same. - std::unordered_map> anfnode_to_envitem_; + mindspore::HashMap> anfnode_to_envitem_; FuncGraphPtr primal_graph_; // K object for primal_graph_; FuncGraphPtr k_graph_; @@ -128,8 +128,8 @@ class DFunctor : public std::enable_shared_from_this { // Cut off stopped objects in category D. bool need_cut_; bool is_top_; - static std::unordered_map> func_graph_to_functor_; - static std::unordered_map anfnode_to_adjoin_definition_; + static mindspore::HashMap> func_graph_to_functor_; + static mindspore::HashMap anfnode_to_adjoin_definition_; }; // D Functor's rules to map primitive object. @@ -165,7 +165,7 @@ class KPrim { // Refer the comment in KUserDefinedCellBprop. template FuncGraphPtr BpropToK(const T &primal, const FuncGraphPtr &bprop_g, const FuncGraphPtr ¤t_primal_fg, - const CNodePtr &cnode, const std::unordered_map &primal_attrs, + const CNodePtr &cnode, const mindspore::HashMap &primal_attrs, const std::vector &primal_debug_infos); AnfNodePtr BuildOutput(const FuncGraphPtr &bprop_fg, const FuncGraphPtr ¤t_primal_fg); void TransformArgsForPrimitive(const FuncGraphManagerPtr &mng, const FuncGraphPtr &bprop_fg, @@ -178,12 +178,12 @@ class KPrim { void CheckBprop(const FuncGraphPtr &bprop_fg, const string &prim_to_check); Registry bprop_registry_; - std::unordered_map bprop_registry_meta_; + mindspore::HashMap bprop_registry_meta_; }; template FuncGraphPtr KPrim::BpropToK(const T &primal, const FuncGraphPtr &bprop_fg, const FuncGraphPtr ¤t_primal_fg, - const CNodePtr &cnode, const std::unordered_map &primal_attrs, + const CNodePtr &cnode, const mindspore::HashMap &primal_attrs, const std::vector &primal_debug_infos) { MS_EXCEPTION_IF_NULL(primal); MS_EXCEPTION_IF_NULL(bprop_fg); diff --git a/mindspore/ccsrc/frontend/optimizer/ad/grad.cc b/mindspore/ccsrc/frontend/optimizer/ad/grad.cc index 2fda88c57db..5e3c1dd6b61 100644 --- a/mindspore/ccsrc/frontend/optimizer/ad/grad.cc +++ b/mindspore/ccsrc/frontend/optimizer/ad/grad.cc @@ -114,7 +114,7 @@ FuncGraphPtr Grad(const FuncGraphPtr &func_graph, const pipeline::ResourceBasePt multi_graph_sink(res); if (func_graph != grad_fg) { - (void)func_graph->transforms().insert(std::make_pair("grad", FuncGraphTransform(res))); + (void)func_graph->transforms().emplace("grad", FuncGraphTransform(res)); } return res; } diff --git a/mindspore/ccsrc/frontend/optimizer/ad/kprim.cc b/mindspore/ccsrc/frontend/optimizer/ad/kprim.cc index d9478d0295c..7ca6745e356 100644 --- a/mindspore/ccsrc/frontend/optimizer/ad/kprim.cc +++ b/mindspore/ccsrc/frontend/optimizer/ad/kprim.cc @@ -80,8 +80,8 @@ bool BpropMindirDirExists() { } // Get the serializable bprop list from the module mindspore.ops.bprop_mindir in python. -std::unordered_set GetSerializableBpropList() { - std::unordered_set serializable_bprop_list; +mindspore::HashSet GetSerializableBpropList() { + mindspore::HashSet serializable_bprop_list; if (!BpropMindirDirExists()) { return serializable_bprop_list; } @@ -105,14 +105,14 @@ std::unordered_set GetSerializableBpropList() { } bool IsSerializableBprop(const std::string &prim_name) { - static std::unordered_set serializable_bprop_list = GetSerializableBpropList(); + static mindspore::HashSet serializable_bprop_list = GetSerializableBpropList(); return std::any_of(serializable_bprop_list.begin(), serializable_bprop_list.end(), [&prim_name](const std::string &serializable_bprop_prim_name) { return prim_name == serializable_bprop_prim_name; }); } -void GetFilesHash(const std::string &dir, std::unordered_map *bprop_hash_to_file) { +void GetFilesHash(const std::string &dir, mindspore::HashMap *bprop_hash_to_file) { if (dir.empty()) { MS_LOG(ERROR) << "The directory path is empty."; return; @@ -139,13 +139,13 @@ void GetFilesHash(const std::string &dir, std::unordered_mapd_name; - bprop_hash_to_file->insert(std::make_pair(system::sha256::GetHashFromFile(real_path), real_path)); + (void)bprop_hash_to_file->emplace(system::sha256::GetHashFromFile(real_path), real_path); } closedir(open_dir); } -std::unordered_map GetAllBpropFileHash() { - std::unordered_map bprop_hash_to_file; +mindspore::HashMap GetAllBpropFileHash() { + mindspore::HashMap bprop_hash_to_file; auto bprop_dir = GetBpropDir(); auto realpath = FileUtils::GetRealPath(common::SafeCStr(bprop_dir)); if (!realpath.has_value()) { @@ -228,7 +228,7 @@ AnfNodePtr GetPythonOps(const FuncGraphPtr &fg, const AnfNodePtr &origin_node, c MS_EXCEPTION_IF_NULL(origin_node); MS_EXCEPTION_IF_NULL(prim); // DoSignaturePrimitive to the pair of primitive name and module name. - static std::unordered_map> python_ops{ + static mindspore::HashMap> python_ops{ {"S-Prim-zeros_like_leaf", {"zeros_like", ""}}, {"S-Prim-getitem", {"getitem", "mindspore.ops.composite.multitype_ops.getitem_impl"}}}; auto iter = python_ops.find(prim->name()); @@ -527,7 +527,7 @@ FuncGraphPtr KPrim::KPrimitive(const CNodePtr &cnode, const ValueNodePtr &value_ } AdjustForAutoMonad(prim, bprop_fg); - std::unordered_map primal_attrs; + mindspore::HashMap primal_attrs; std::vector primal_debug_infos = GeneratePrimalDebugInfo(value_node, resources); if (cnode != nullptr) { primal_attrs = cnode->primal_attrs(); diff --git a/mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc b/mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc index a07a3ba80bd..65d622eee44 100644 --- a/mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc +++ b/mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc @@ -19,11 +19,11 @@ #include #include #include -#include -#include #include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/anf.h" #include "frontend/optimizer/ad/prim_bprop_optimizer.h" #include "frontend/optimizer/ad/adjoint.h" @@ -283,7 +283,7 @@ class KPynativeCellImpl : public KPynativeCell { FuncGraphPtr tape_; AnfNodePtrList cell_inputs_; // These weights need to calculate gradient. - std::unordered_set need_grad_weights_; + mindspore::HashSet need_grad_weights_; OrderedMap anfnode_to_adjoin_; // For CNode like TupleGetItem, ListGetItem, MakeTuple, MakeList, it's bypassed by caller so diff --git a/mindspore/ccsrc/frontend/optimizer/ad/prim_bprop_optimizer.h b/mindspore/ccsrc/frontend/optimizer/ad/prim_bprop_optimizer.h index fd1e3fd1fab..b1144625147 100644 --- a/mindspore/ccsrc/frontend/optimizer/ad/prim_bprop_optimizer.h +++ b/mindspore/ccsrc/frontend/optimizer/ad/prim_bprop_optimizer.h @@ -19,9 +19,10 @@ #include #include -#include #include +#include +#include "utils/hash_map.h" #include "frontend/optimizer/irpass.h" #include "ir/func_graph.h" #include "pipeline/jit/resource.h" @@ -42,7 +43,7 @@ using PrimBpropOptGraphInfoPtr = std::shared_ptr; using PrimBpropOptGraphLevel2InfoPtr = std::shared_ptr; -using PrimBpropCache = std::unordered_map; +using PrimBpropCache = mindspore::HashMap; using TupleListKey = std::pair; diff --git a/mindspore/ccsrc/frontend/optimizer/auto_monad_eliminate.cc b/mindspore/ccsrc/frontend/optimizer/auto_monad_eliminate.cc index f0d0c2a208a..fadf0f7c067 100644 --- a/mindspore/ccsrc/frontend/optimizer/auto_monad_eliminate.cc +++ b/mindspore/ccsrc/frontend/optimizer/auto_monad_eliminate.cc @@ -17,21 +17,20 @@ #include "frontend/optimizer/auto_monad_eliminate.h" #include -#include -#include #include #include #include #include +#include "utils/hash_map.h" +#include "utils/ordered_map.h" #include "base/core_ops.h" #include "abstract/abstract_value.h" -#include "utils/ordered_map.h" namespace mindspore { namespace opt { namespace { -using ParamUserMap = std::unordered_map>; +using ParamUserMap = mindspore::HashMap>; using LoadGraphMap = OrderedMap>; std::optional GetRefKey(const AnfNodePtr &node) { diff --git a/mindspore/ccsrc/frontend/optimizer/cse.cc b/mindspore/ccsrc/frontend/optimizer/cse.cc index 6a0f430532c..3ad8b373eac 100644 --- a/mindspore/ccsrc/frontend/optimizer/cse.cc +++ b/mindspore/ccsrc/frontend/optimizer/cse.cc @@ -20,8 +20,8 @@ #include #include -#include +#include "utils/hash_map.h" #include "abstract/abstract_function.h" #include "utils/flags.h" #include "utils/utils.h" @@ -71,8 +71,8 @@ BasePtr AbsOf(const AnfNodePtr &node, bool ignore_fg_abs_tracking_id) { bool CSE::BuildOrderGroupAndDoReplaceForOneGraph(const FuncGraphPtr &fg, const FuncGraphManagerPtr &manager) const { MS_EXCEPTION_IF_NULL(fg); std::vector order_group; - std::unordered_map> groups; - std::unordered_map hashes; + mindspore::HashMap> groups; + mindspore::HashMap hashes; std::vector toposet = TopoSort(fg->get_return()); for (auto node : toposet) { @@ -212,7 +212,7 @@ bool CSE::CheckReplace(const AnfNodePtr &main, const AnfNodePtr &node, bool chec } bool CSE::DoReplace(const FuncGraphManagerPtr manager, const std::vector &order_group, - std::unordered_map> *groups) const { + mindspore::HashMap> *groups) const { bool changes = false; std::set clear_set; for (auto &h : order_group) { diff --git a/mindspore/ccsrc/frontend/optimizer/cse.h b/mindspore/ccsrc/frontend/optimizer/cse.h index 36357bd40fd..425724d3947 100644 --- a/mindspore/ccsrc/frontend/optimizer/cse.h +++ b/mindspore/ccsrc/frontend/optimizer/cse.h @@ -20,8 +20,8 @@ #define MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_CSE_H_ #include -#include #include +#include "utils/hash_map.h" #include "ir/anf.h" #include "ir/manager.h" @@ -46,7 +46,7 @@ class CSE { private: bool BuildOrderGroupAndDoReplace(const FuncGraphManagerPtr manager) const; bool DoReplace(const FuncGraphManagerPtr manager, const std::vector &order_group, - std::unordered_map> *groups) const; + mindspore::HashMap> *groups) const; }; BasePtr AbsOf(const AnfNodePtr &node, bool ignore_fg_abs_tracking_id = false); diff --git a/mindspore/ccsrc/frontend/optimizer/cse_pass.h b/mindspore/ccsrc/frontend/optimizer/cse_pass.h index 2e59e8356f4..81c3ff0d503 100644 --- a/mindspore/ccsrc/frontend/optimizer/cse_pass.h +++ b/mindspore/ccsrc/frontend/optimizer/cse_pass.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,9 @@ #define MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_CSE_PASS_H_ #include -#include #include +#include "utils/hash_map.h" #include "frontend/optimizer/cse.h" #include "frontend/optimizer/optimizer.h" diff --git a/mindspore/ccsrc/frontend/optimizer/graph_transform.h b/mindspore/ccsrc/frontend/optimizer/graph_transform.h index 6b4e00d250d..be85f64a5a2 100644 --- a/mindspore/ccsrc/frontend/optimizer/graph_transform.h +++ b/mindspore/ccsrc/frontend/optimizer/graph_transform.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,12 +17,12 @@ #ifndef MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_GRAPH_TRANSFORM_H #define MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_GRAPH_TRANSFORM_H -#include #include #include #include #include +#include "utils/hash_map.h" #include "frontend/optimizer/optimizer.h" namespace mindspore { @@ -73,7 +73,7 @@ class GraphTupleParamTransform { auto new_fg = cloner[fg]; auto ¶ms = new_fg->parameters(); std::vector new_params; - std::unordered_map repl; + mindspore::HashMap repl; for (auto ¶m : params) { auto abs = param->abstract(); if (abs != nullptr && abs->isa()) { @@ -101,7 +101,7 @@ class GraphTupleParamTransform { } private: - std::unordered_map cache_; + mindspore::HashMap cache_; }; } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/branch_culling.cc b/mindspore/ccsrc/frontend/optimizer/irpass/branch_culling.cc index a76a4a2edcf..4f8f8d28701 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/branch_culling.cc +++ b/mindspore/ccsrc/frontend/optimizer/irpass/branch_culling.cc @@ -18,8 +18,8 @@ #include #include -#include +#include "utils/hash_map.h" #include "ir/func_graph.h" #include "frontend/operator/ops.h" @@ -111,10 +111,10 @@ bool InConvertWhiteList(const AnfNodePtr &node, size_t index) { return false; } -using NodeInputReplMap = std::unordered_map, AnfNodePtr, PairHasher>; +using NodeInputReplMap = mindspore::HashMap, AnfNodePtr, PairHasher>; // replace the nodes which should be changed void RunSwitchNodeReplace(const FuncGraphManagerPtr &manager, std::vector> nodes_changed, - std::unordered_map repl_node, NodeInputReplMap repl_node_inputs, + mindspore::HashMap repl_node, NodeInputReplMap repl_node_inputs, const FuncGraphPtr &func_graph) { for (auto &node_pair : nodes_changed) { CNodePtr old_node = node_pair.first; @@ -153,7 +153,7 @@ FuncGraphPtr TransformGraphCondBranchNodes( // record the node that has been changed std::vector> nodes_changed; // record the node to be replaced - std::unordered_map repl_node; + mindspore::HashMap repl_node; // record the node input to be replaced NodeInputReplMap repl_node_inputs; const AnfNodeSet &nodes = graph->nodes(); @@ -322,7 +322,7 @@ bool IsNetOutputNode(const FuncGraphManagerPtr &manager, const AnfNodePtr &node) // generate node for Depended MakeTuple void GenerateReplNodeForDependMakeTuple( const AnfNodePtr &depended_node, const FuncGraphPtr &graph, const AnfNodePtr &cond, - const std::shared_ptr> &repl_node, + const std::shared_ptr> &repl_node, const std::function &generate_func) { MS_EXCEPTION_IF_NULL(graph->manager()); @@ -356,7 +356,7 @@ void GenerateReplNodeForDependMakeTuple( // generate a replace depend node for a single network output node void GenerateRepDepend( const CNodePtr &node, const FuncGraphPtr &graph, const AnfNodePtr &cond, - const std::shared_ptr> &repl_node, + const std::shared_ptr> &repl_node, const std::function &generate_func) { MS_EXCEPTION_IF_NULL(graph->manager()); @@ -393,8 +393,8 @@ FuncGraphPtr TransformGraphDependNode( MS_EXCEPTION_IF_NULL(manager); ResetSharedOp(); - std::shared_ptr> repl_node = - std::make_shared>(); // record the node to be replaced + std::shared_ptr> repl_node = + std::make_shared>(); // record the node to be replaced const AnfNodeSet &nodes = graph->nodes(); for (auto &node : nodes) { MS_EXCEPTION_IF_NULL(node); diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/call_graph_tuple_transform.h b/mindspore/ccsrc/frontend/optimizer/irpass/call_graph_tuple_transform.h index 44f85f87de4..ca0bb498209 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/call_graph_tuple_transform.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/call_graph_tuple_transform.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ #include #include -#include -#include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/func_graph.h" #include "ir/func_graph_cloner.h" #include "frontend/optimizer/optimizer_caller.h" diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/env_item_eliminate.h b/mindspore/ccsrc/frontend/optimizer/irpass/env_item_eliminate.h index 6992005b6c6..8c009014f3e 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/env_item_eliminate.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/env_item_eliminate.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ #include #include -#include #include #include +#include "utils/hash_map.h" #include "ir/func_graph.h" #include "ir/func_graph_cloner.h" #include "frontend/optimizer/optimizer_caller.h" @@ -87,8 +87,8 @@ class EnvGetitemTransform { } private: - std::unordered_map, FuncGraphPtr, PairHasher>> + mindspore::HashMap, FuncGraphPtr, PairHasher>> cache_; }; @@ -150,8 +150,8 @@ class EnvGetitemTransformACrossGraph { } private: - std::unordered_map, FuncGraphPtr, PairHasher>> + mindspore::HashMap, FuncGraphPtr, PairHasher>> cache_; }; } // namespace internal diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/grad_var_prepare.cc b/mindspore/ccsrc/frontend/optimizer/irpass/grad_var_prepare.cc index ff6c179cbae..f234ba75da4 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/grad_var_prepare.cc +++ b/mindspore/ccsrc/frontend/optimizer/irpass/grad_var_prepare.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,9 +17,9 @@ #include "frontend/optimizer/irpass/grad_var_prepare.h" #include #include -#include #include +#include "utils/hash_map.h" #include "frontend/operator/composite/composite.h" #include "frontend/operator/ops.h" #include "frontend/optimizer/irpass.h" diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/grad_var_prepare.h b/mindspore/ccsrc/frontend/optimizer/irpass/grad_var_prepare.h index d9040044d9f..2547e22dfdf 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/grad_var_prepare.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/grad_var_prepare.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "frontend/operator/composite/composite.h" #include "frontend/operator/ops.h" #include "frontend/optimizer/irpass.h" diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/incorporate_call.h b/mindspore/ccsrc/frontend/optimizer/irpass/incorporate_call.h index e8b291a541f..6bb3ce3c640 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/incorporate_call.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/incorporate_call.h @@ -19,10 +19,10 @@ #include #include -#include #include #include +#include "utils/hash_map.h" #include "frontend/optimizer/irpass.h" #include "frontend/optimizer/optimizer.h" #include "frontend/optimizer/anf_visitor.h" @@ -68,7 +68,7 @@ class CallOutputTransform { } private: - std::unordered_map, FuncGraphPtr, PairHasher>> cache_; + mindspore::HashMap, FuncGraphPtr, PairHasher>> cache_; }; } // namespace internal diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h b/mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h index 543537387eb..e1feae82857 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h @@ -20,10 +20,10 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "ir/func_graph.h" #include "ir/func_graph_cloner.h" #include "frontend/optimizer/optimizer_caller.h" @@ -75,7 +75,7 @@ class GetitemTransform { } private: - std::unordered_map> cache_; + mindspore::HashMap> cache_; }; class GetItemTransformACrossGraph { @@ -127,7 +127,7 @@ class GetItemTransformACrossGraph { } private: - std::unordered_map> cache_; + mindspore::HashMap> cache_; }; bool HasMoreJ(const OptimizerPtr &optimizer) { @@ -516,7 +516,7 @@ class IncorporateGetitem : public AnfVisitor { CNodePtr fg_call_cnode_{nullptr}; std::vector args_{}; std::set processed_nodes_; - std::unordered_map>, FuncGraphPtr, + mindspore::HashMap>, FuncGraphPtr, internal::FuncGraphIntVectorPairHasher> processed_fgs_; internal::GetitemTransform getitem_transform_; @@ -865,7 +865,7 @@ class IncorporateGetitemSwitch : public AnfVisitor { bool is_in_get_{false}, is_in_switch_{false}; std::vector args_{}; std::set processed_nodes_; - std::unordered_map>, FuncGraphPtr, + mindspore::HashMap>, FuncGraphPtr, internal::FuncGraphIntVectorPairHasher> processed_fgs_; internal::GetitemTransform getitem_transform_; diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/inline.h b/mindspore/ccsrc/frontend/optimizer/irpass/inline.h index 9d73c4555be..6f8c6c08c7b 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/inline.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/inline.h @@ -20,8 +20,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "frontend/optimizer/irpass.h" #include "frontend/parallel/context.h" #include "frontend/optimizer/optimizer.h" @@ -358,7 +358,7 @@ class InlinerBase : public AnfVisitor { bool is_checked_{false}, is_recursive_{false}; bool use_move_; std::vector> criterions_; - std::unordered_map graph_branch_cache_; + mindspore::HashMap graph_branch_cache_; }; bool IsUniqueUse(InlinerBase *, const FuncGraphPtr &fg, const AnfNodePtr &) { diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/less_batch_normalization.cc b/mindspore/ccsrc/frontend/optimizer/irpass/less_batch_normalization.cc index 8fd894b408a..94ce61e0f3d 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/less_batch_normalization.cc +++ b/mindspore/ccsrc/frontend/optimizer/irpass/less_batch_normalization.cc @@ -17,7 +17,7 @@ #include "frontend/optimizer/irpass/less_batch_normalization.h" #include -#include +#include "utils/hash_map.h" namespace mindspore { namespace opt { @@ -250,7 +250,7 @@ static const std::vector> kNeedMatchPattern = {Resi const std::set kNeedRemoveNodeSet{ prim::kPrimLoad, prim::kPrimRefToEmbed, prim::kPrimApplyMomentum, prim::kPrimMomentum, prim::kPrimApplyFtrl, prim::kPrimSGD, prim::kPrimApplyRMSProp, prim::kPrimAdam}; -static std::unordered_map> kRemoveIndex{ +static mindspore::HashMap> kRemoveIndex{ {RemoveNodeType::kOtherNode, {2}}, {RemoveNodeType::kOptimizerNode, {3, 5, 6}}}; bool NeedRemove(const ParameterPtr &a, const std::vector ¶meter_list) { diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/less_batch_normalization.h b/mindspore/ccsrc/frontend/optimizer/irpass/less_batch_normalization.h index c556aaec2be..c0c250d16b9 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/less_batch_normalization.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/less_batch_normalization.h @@ -21,8 +21,8 @@ #include #include #include -#include +#include "utils/hash_set.h" #include "frontend/optimizer/irpass.h" #include "frontend/optimizer/optimizer.h" #include "frontend/optimizer/anf_visitor.h" @@ -42,7 +42,7 @@ class LessBatchNormalization : public AnfVisitor { bool MatchGraphStructure(const CNodePtr &cnode, const std::vector &match_pattern); private: - std::unordered_set remove_node_list_{}; + mindspore::HashSet remove_node_list_{}; std::vector total_match_node_{0}; size_t match_node_{0}; size_t match_branch_{0}; diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/parameter_eliminate.h b/mindspore/ccsrc/frontend/optimizer/irpass/parameter_eliminate.h index a4717c0ee74..6e5793c992d 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/parameter_eliminate.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/parameter_eliminate.h @@ -18,9 +18,9 @@ #define MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_PARAMETER_ELIMINATE_H #include #include -#include #include +#include "utils/hash_set.h" #include "frontend/optimizer/irpass.h" #include "frontend/optimizer/optimizer.h" #include "frontend/optimizer/anf_visitor.h" @@ -99,11 +99,11 @@ class ParameterEliminator { return {nullptr, {}}; } - static std::unordered_set EraseUnusedParameters(const FuncGraphPtr &fg, const FuncGraphManagerPtr &manager) { + static mindspore::HashSet EraseUnusedParameters(const FuncGraphPtr &fg, const FuncGraphManagerPtr &manager) { MS_EXCEPTION_IF_NULL(fg->manager()); const auto &manager_node_users = fg->manager()->node_users(); const auto ¶meters = fg->parameters(); - std::unordered_set unused_parameter_indexes; + mindspore::HashSet unused_parameter_indexes; // Traverse to find all unused parameters. size_t index = 0; for (const auto ¶meter : parameters) { @@ -126,7 +126,7 @@ class ParameterEliminator { return unused_parameter_indexes; } - static void EraseArgs(const CNodePtr &caller, const std::unordered_set &unused_parameter_indexes, + static void EraseArgs(const CNodePtr &caller, const mindspore::HashSet &unused_parameter_indexes, const FuncGraphManagerPtr &manager) { std::vector new_args = {caller->inputs()[0]}; for (size_t i = 0; i < caller->inputs().size() - 1; i++) { diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/partial_eliminate.h b/mindspore/ccsrc/frontend/optimizer/irpass/partial_eliminate.h index 7259cfb4b1a..1573da0bea8 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/partial_eliminate.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/partial_eliminate.h @@ -19,10 +19,10 @@ #include #include -#include #include #include +#include "utils/hash_map.h" #include "frontend/optimizer/irpass.h" #include "frontend/optimizer/optimizer.h" #include "frontend/optimizer/anf_visitor.h" diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/recompute_prepare.h b/mindspore/ccsrc/frontend/optimizer/irpass/recompute_prepare.h index 0027f688124..ed1dd069e8d 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/recompute_prepare.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/recompute_prepare.h @@ -17,7 +17,7 @@ #ifndef MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_RECOMPUTE_PREPARE_H_ #define MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_RECOMPUTE_PREPARE_H_ -#include +#include "utils/hash_set.h" #include "frontend/optimizer/irpass.h" #include "frontend/optimizer/optimizer.h" #include "frontend/optimizer/anf_visitor.h" @@ -42,7 +42,7 @@ class SetCellOutputNoRecompute : public AnfVisitor { return nullptr; } if (output->isa()) { - std::unordered_set real_outputs; + mindspore::HashSet real_outputs; GetRealOutputNodes(output, &real_outputs); for (const auto &real_output : real_outputs) { // Set the attr of cnode in case of shared primitives. @@ -53,7 +53,7 @@ class SetCellOutputNoRecompute : public AnfVisitor { return nullptr; } - void GetRealOutputNodes(const AnfNodePtr &output, std::unordered_set *real_outputs) { + void GetRealOutputNodes(const AnfNodePtr &output, mindspore::HashSet *real_outputs) { MS_EXCEPTION_IF_NULL(output); MS_EXCEPTION_IF_NULL(real_outputs); if (!output->isa()) { diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/specialize_transform.h b/mindspore/ccsrc/frontend/optimizer/irpass/specialize_transform.h index 3977eba8b9f..ebe967a3321 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/specialize_transform.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/specialize_transform.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "frontend/optimizer/irpass.h" #include "frontend/optimizer/optimizer.h" #include "frontend/optimizer/anf_visitor.h" @@ -70,7 +70,7 @@ class SpecializeTransform { } private: - std::unordered_map, FuncGraphPtr>> cache_; + mindspore::HashMap, FuncGraphPtr>> cache_; static ValueNodePtr NewReplaceValueNode(const ValuePtr &value) { MS_EXCEPTION_IF_NULL(value); if (value->isa() || value->isa() || value->isa()) { diff --git a/mindspore/ccsrc/frontend/optimizer/opt.cc b/mindspore/ccsrc/frontend/optimizer/opt.cc index ba045d00621..5ce2ba69a70 100644 --- a/mindspore/ccsrc/frontend/optimizer/opt.cc +++ b/mindspore/ccsrc/frontend/optimizer/opt.cc @@ -18,9 +18,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/anf.h" #include "ir/manager.h" #include "frontend/optimizer/optimizer.h" @@ -262,7 +262,7 @@ bool SubstitutionList::ApplySubstitutionToIR(const OptimizerPtr &optimizer, cons return changes; } -void SubstitutionList::DisplayStatusOfSubstitution(const std::unordered_map> &status, +void SubstitutionList::DisplayStatusOfSubstitution(const mindspore::HashMap> &status, const OptimizerPtr &optimizer, size_t space) const { constexpr int pad_width = 4; std::stringstream ss; @@ -283,7 +283,7 @@ void SubstitutionList::DisplayStatusOfSubstitution(const std::unordered_map> status; + mindspore::HashMap> status; if (optimizer->is_on_debug_) { for (size_t i = 0; i < list_.size(); i++) { status[list_[i]->name_ + std::to_string(i)] = {}; diff --git a/mindspore/ccsrc/frontend/optimizer/opt.h b/mindspore/ccsrc/frontend/optimizer/opt.h index a2341d1b74d..5c7dee14af4 100644 --- a/mindspore/ccsrc/frontend/optimizer/opt.h +++ b/mindspore/ccsrc/frontend/optimizer/opt.h @@ -21,8 +21,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ir/anf.h" #include "ir/func_graph.h" #include "frontend/optimizer/optimizer_caller.h" @@ -85,7 +85,7 @@ class SubstitutionList { bool ApplySubstitutionToIR(const OptimizerPtr &optimizer, const FuncGraphPtr &func_graph, const SubstitutionPtr &sub) const; bool ApplySubstitutionsToIR(const OptimizerPtr &optimizer, const FuncGraphPtr &func_graph) const; - void DisplayStatusOfSubstitution(const std::unordered_map> &status, + void DisplayStatusOfSubstitution(const mindspore::HashMap> &status, const OptimizerPtr &optimizer, size_t space) const; std::vector list_; diff --git a/mindspore/ccsrc/frontend/optimizer/pattern.h b/mindspore/ccsrc/frontend/optimizer/pattern.h index af539bd6ef4..780d772abcb 100644 --- a/mindspore/ccsrc/frontend/optimizer/pattern.h +++ b/mindspore/ccsrc/frontend/optimizer/pattern.h @@ -18,8 +18,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "base/base.h" #include "ir/anf.h" #include "ir/tensor.h" @@ -48,7 +48,7 @@ class Imm; using ImmPtr = std::shared_ptr; struct PatternHasher; struct PatternEqual; -using PatternNodeMap = std::unordered_map; +using PatternNodeMap = mindspore::HashMap; class Pattern : public Base { public: diff --git a/mindspore/ccsrc/frontend/optimizer/py_pass.cc b/mindspore/ccsrc/frontend/optimizer/py_pass.cc index 7904df5c039..de705eaf2fc 100644 --- a/mindspore/ccsrc/frontend/optimizer/py_pass.cc +++ b/mindspore/ccsrc/frontend/optimizer/py_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,10 +14,10 @@ * limitations under the License. */ #include "frontend/optimizer/py_pass.h" -#include #include #include +#include "utils/hash_set.h" #include "ir/func_graph.h" #include "ir/manager.h" #include "pybind_api/ir/primitive_py.h" diff --git a/mindspore/ccsrc/frontend/optimizer/py_pass.h b/mindspore/ccsrc/frontend/optimizer/py_pass.h index 145f86c4718..eaed2840f9d 100644 --- a/mindspore/ccsrc/frontend/optimizer/py_pass.h +++ b/mindspore/ccsrc/frontend/optimizer/py_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,8 +17,8 @@ #define MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_PASS_H_ #include #include -#include +#include "utils/hash_map.h" #include "ir/anf.h" #include "frontend/optimizer/pattern.h" #include "pybind_api/api_register.h" @@ -29,7 +29,7 @@ namespace opt { namespace python_pass { class PythonPass; using PythonPassPtr = std::shared_ptr; -using NodeEquiv = std::unordered_map; +using NodeEquiv = mindspore::HashMap; using NodeEquivPtr = std::shared_ptr; class PythonPass { diff --git a/mindspore/ccsrc/frontend/optimizer/py_pass_manager.cc b/mindspore/ccsrc/frontend/optimizer/py_pass_manager.cc index 2765991d7b2..82fb11a9b9e 100644 --- a/mindspore/ccsrc/frontend/optimizer/py_pass_manager.cc +++ b/mindspore/ccsrc/frontend/optimizer/py_pass_manager.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ namespace mindspore { namespace opt { namespace python_pass { PyPassManagerPtr PyPassManager::global_instance = nullptr; -std::unordered_map PyPassManager::phase_to_group_; +mindspore::HashMap PyPassManager::phase_to_group_; PassGroupPtr PyPassManager::GetPassGroup(Phase phase) { auto pm = phase_to_group_.find(phase); diff --git a/mindspore/ccsrc/frontend/optimizer/py_pass_manager.h b/mindspore/ccsrc/frontend/optimizer/py_pass_manager.h index 3acd87aa885..02d11f0650e 100644 --- a/mindspore/ccsrc/frontend/optimizer/py_pass_manager.h +++ b/mindspore/ccsrc/frontend/optimizer/py_pass_manager.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ir/anf.h" #include "ir/func_graph.h" #include "pybind_api/ir/primitive_py.h" @@ -75,7 +75,7 @@ class PyPassManager { bool should_reopt_ = true; MatchResultPtr res_; pipeline::ResourcePtr resource_; - static std::unordered_map phase_to_group_; + static mindspore::HashMap phase_to_group_; }; } // namespace python_pass } // namespace opt diff --git a/mindspore/ccsrc/frontend/optimizer/recompute.cc b/mindspore/ccsrc/frontend/optimizer/recompute.cc index 48831566cf3..e284256f8b5 100644 --- a/mindspore/ccsrc/frontend/optimizer/recompute.cc +++ b/mindspore/ccsrc/frontend/optimizer/recompute.cc @@ -20,9 +20,9 @@ #include #include #include -#include -#include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/func_graph.h" #include "mindspore/core/base/core_ops.h" #include "utils/utils.h" @@ -33,7 +33,7 @@ namespace { constexpr auto kGradientsFlag = "Gradients"; const int64_t fusion_id_increasement_size = 2000; bool CanNotRecomputed(const CNodePtr &node) { - static std::unordered_set not_recomputed_op_list{ + static mindspore::HashSet not_recomputed_op_list{ prim::kPrimDropoutGenMask, prim::kPrimLoad, prim::kPrimTupleGetItem, prim::kPrimSend, prim::kPrimReceive}; return std::any_of(not_recomputed_op_list.begin(), not_recomputed_op_list.end(), @@ -95,7 +95,7 @@ std::vector FindCandidateRecomputedNodes(const FuncGraphManagerPtr &mn } const auto &node_index_set = output_set_iter->second; if (!std::any_of(node_index_set.begin(), node_index_set.end(), - [](const std::pair &node_index) { return IsBpropNode(node_index.first); })) { + [](const auto &node_index) { return IsBpropNode(node_index.first); })) { continue; } // Check inputs. @@ -108,7 +108,7 @@ std::vector FindCandidateRecomputedNodes(const FuncGraphManagerPtr &mn return candidate_recomputed_nodes; } -void GetMaxSubGraph(const FuncGraphManagerPtr &mng, std::unordered_set *recomputed_nodes, bool get_inputs, +void GetMaxSubGraph(const FuncGraphManagerPtr &mng, mindspore::HashSet *recomputed_nodes, bool get_inputs, bool get_outputs) { MS_EXCEPTION_IF_NULL(mng); MS_EXCEPTION_IF_NULL(recomputed_nodes); @@ -160,9 +160,9 @@ void GetMaxSubGraph(const FuncGraphManagerPtr &mng, std::unordered_set } void GetOriginRecomputeAndTargetNodes(const FuncGraphManagerPtr &mng, - const std::unordered_set &max_recomputed_sub_graph, - std::unordered_set *recompute_nodes, - std::unordered_set *target_nodes) { + const mindspore::HashSet &max_recomputed_sub_graph, + mindspore::HashSet *recompute_nodes, + mindspore::HashSet *target_nodes) { MS_EXCEPTION_IF_NULL(mng); MS_EXCEPTION_IF_NULL(recompute_nodes); MS_EXCEPTION_IF_NULL(target_nodes); @@ -189,8 +189,8 @@ void GetOriginRecomputeAndTargetNodes(const FuncGraphManagerPtr &mng, } std::vector GetFirstTargetInputs(const std::vector &origin_nodes_topological, - const std::unordered_set &recomputed_origin_nodes, - const std::unordered_set &target_nodes) { + const mindspore::HashSet &recomputed_origin_nodes, + const mindspore::HashSet &target_nodes) { std::vector first_target_inputs; for (const auto &node : origin_nodes_topological) { MS_EXCEPTION_IF_NULL(node); @@ -212,7 +212,7 @@ std::vector GetFirstTargetInputs(const std::vector &origin return first_target_inputs; } -bool HasGradInputs(const AnfNodePtr &node, std::unordered_map *has_grad_inputs_map) { +bool HasGradInputs(const AnfNodePtr &node, mindspore::HashMap *has_grad_inputs_map) { MS_EXCEPTION_IF_NULL(node); MS_EXCEPTION_IF_NULL(has_grad_inputs_map); if (has_grad_inputs_map->find(node) != has_grad_inputs_map->end()) { @@ -220,7 +220,7 @@ bool HasGradInputs(const AnfNodePtr &node, std::unordered_map } auto cnode = node->cast(); if (cnode == nullptr) { - has_grad_inputs_map->insert(std::make_pair(node, false)); + (void)has_grad_inputs_map->emplace(node, false); return false; } const auto &inputs = cnode->inputs(); @@ -230,11 +230,11 @@ bool HasGradInputs(const AnfNodePtr &node, std::unordered_map continue; } if (IsBpropNode(inputs[i]) || HasGradInputs(inputs[i], has_grad_inputs_map)) { - has_grad_inputs_map->insert(std::make_pair(node, true)); + (void)has_grad_inputs_map->emplace(node, true); return true; } } - has_grad_inputs_map->insert(std::make_pair(node, false)); + (void)has_grad_inputs_map->emplace(node, false); return false; } @@ -277,7 +277,7 @@ void SetRecomputedAttr(const FuncGraphPtr &graph, const std::vector &o MS_EXCEPTION_IF_NULL(graph); auto mng = graph->manager(); MS_EXCEPTION_IF_NULL(mng); - std::unordered_map has_grad_inputs_map; + mindspore::HashMap has_grad_inputs_map; for (const auto &node : origin_nodes_topological) { MS_EXCEPTION_IF_NULL(node); // The node may be set the non-recomputed before such as the cell outputs. @@ -336,8 +336,8 @@ CNodePtr CreateNewRecomputedNode(const FuncGraphPtr &graph, const CNodePtr &orig CNodePtr NewRecomputedNode(const FuncGraphPtr &graph, const CNodePtr &origin_node, const std::vector &first_target_inputs, - const std::unordered_set &recomputed_origin_nodes, - std::unordered_map *origin_to_recomputed_nodes) { + const mindspore::HashSet &recomputed_origin_nodes, + mindspore::HashMap *origin_to_recomputed_nodes) { MS_EXCEPTION_IF_NULL(graph); MS_EXCEPTION_IF_NULL(origin_node); MS_EXCEPTION_IF_NULL(origin_to_recomputed_nodes); @@ -400,14 +400,14 @@ CNodePtr NewRecomputedNode(const FuncGraphPtr &graph, const CNodePtr &origin_nod new_inputs[1] = depend_node; } auto recomputed_node = CreateNewRecomputedNode(graph, origin_node, new_inputs); - origin_to_recomputed_nodes->insert(std::make_pair(origin_node, recomputed_node)); + (void)origin_to_recomputed_nodes->emplace(origin_node, recomputed_node); return recomputed_node; } -void DuplicateRecomputedNodes(const FuncGraphPtr &graph, const std::unordered_set &target_nodes, - const std::unordered_set &origin_recomputed_nodes, +void DuplicateRecomputedNodes(const FuncGraphPtr &graph, const mindspore::HashSet &target_nodes, + const mindspore::HashSet &origin_recomputed_nodes, const std::vector &first_target_inputs, - std::unordered_map *origin_to_recomputed_nodes) { + mindspore::HashMap *origin_to_recomputed_nodes) { MS_EXCEPTION_IF_NULL(graph); auto mng = graph->manager(); MS_EXCEPTION_IF_NULL(mng); @@ -449,18 +449,18 @@ void InsertRecomputedNodes(const FuncGraphPtr &graph) { SetRecomputedAttr(graph, origin_nodes_topological); // Get candidate origin recomputed nodes which have no grad inputs and output to at least one grad node directly. std::vector candidate_recomputed_nodes = FindCandidateRecomputedNodes(mng, origin_nodes_topological); - std::unordered_set visited_nodes; + mindspore::HashSet visited_nodes; for (const auto &candidate_recomputed_node : candidate_recomputed_nodes) { if (visited_nodes.find(candidate_recomputed_node) != visited_nodes.end()) { continue; } - std::unordered_set max_recomputed_sub_graph = {candidate_recomputed_node}; + mindspore::HashSet max_recomputed_sub_graph = {candidate_recomputed_node}; // Get max continuous recomputed sub-graph. GetMaxSubGraph(mng, &max_recomputed_sub_graph, true, true); visited_nodes.insert(max_recomputed_sub_graph.begin(), max_recomputed_sub_graph.end()); // Get the origin recomputed nodes which directly output to the grad nodes. - std::unordered_set origin_recomputed_nodes; - std::unordered_set target_nodes; + mindspore::HashSet origin_recomputed_nodes; + mindspore::HashSet target_nodes; GetOriginRecomputeAndTargetNodes(mng, max_recomputed_sub_graph, &origin_recomputed_nodes, &target_nodes); // Also get the inputs of origin recomputed nodes which eventually output to the grad nodes. GetMaxSubGraph(mng, &origin_recomputed_nodes, true, false); @@ -469,7 +469,7 @@ void InsertRecomputedNodes(const FuncGraphPtr &graph) { // not be executed until these inputs are ready. std::vector first_target_inputs = GetFirstTargetInputs(origin_nodes_topological, origin_recomputed_nodes, target_nodes); - std::unordered_map origin_to_recomputed_nodes; + mindspore::HashMap origin_to_recomputed_nodes; // Begin duplicate origin recomputed nodes with each target node. DuplicateRecomputedNodes(graph, target_nodes, origin_recomputed_nodes, first_target_inputs, &origin_to_recomputed_nodes); diff --git a/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_fusion.cc b/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_fusion.cc index 2f52768a326..cc79cd5a681 100644 --- a/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_fusion.cc +++ b/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #include #include #include -#include +#include "utils/hash_set.h" #include "ir/func_graph.h" #include "frontend/parallel/costmodel_context.h" #include "frontend/parallel/graph_util/node_info.h" @@ -28,7 +28,7 @@ namespace mindspore { namespace parallel { -std::unordered_set FindCNodesWithPara(const AnfNodePtr ¶, uint64_t recursive_times = 0) { +mindspore::HashSet FindCNodesWithPara(const AnfNodePtr ¶, uint64_t recursive_times = 0) { if (recursive_times > MAX_RECURSIVE_CALL_TIMES) { MS_LOG(EXCEPTION) << "FindCNodesWithPara exceeds max recursive call times! Max recursive call times is " << MAX_RECURSIVE_CALL_TIMES; @@ -38,7 +38,7 @@ std::unordered_set FindCNodesWithPara(const AnfNodePtr ¶, uint64_t FuncGraphManagerPtr manager = para->func_graph()->manager(); MS_EXCEPTION_IF_NULL(manager); auto node_set = manager->node_users()[para]; - std::unordered_set cnode_set; + mindspore::HashSet cnode_set; for (auto &node_pair : node_set) { auto cnode = node_pair.first->cast(); MS_EXCEPTION_IF_NULL(cnode); @@ -89,7 +89,7 @@ CNodeCostMap AllreduceFusion::FindCNode(const AnfNodePtr &from, uint64_t recursi << MAX_RECURSIVE_CALL_TIMES; } MS_EXCEPTION_IF_NULL(from); - std::unordered_map cnode_dist; + mindspore::HashMap cnode_dist; if (!from->isa()) { return cnode_dist; } @@ -130,7 +130,7 @@ CNodeCostMap AllreduceFusion::FindNextCNodes(const CNodePtr &from, uint64_t recu << MAX_RECURSIVE_CALL_TIMES; } const auto &from_inputs = from->inputs(); - std::unordered_map dist_map; + mindspore::HashMap dist_map; MS_LOG(DEBUG) << "from cnode " << from->DebugString() << " has " << from_inputs.size() << " inputs"; for (auto &input_node : from_inputs) { auto cnode_dist = FindCNode(input_node, recursive_times + 1); @@ -142,7 +142,7 @@ CNodeCostMap AllreduceFusion::FindNextCNodes(const CNodePtr &from, uint64_t recu } Status AllreduceFusion::AddEdgeToGraph() { - std::unordered_map cnode_state_map; + mindspore::HashMap cnode_state_map; const auto &cnodes = allreduce_graph_.cnode_set(); for (auto &cnode : cnodes) { cnode_state_map[cnode] = 0; diff --git a/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_fusion.h b/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_fusion.h index ee992053b99..c5a0196af00 100644 --- a/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_fusion.h +++ b/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,8 +17,8 @@ #ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_ALLREDUCE_FUSION_ALLREDUCE_FUSION_H_ #define MINDSPORE_CCSRC_FRONTEND_PARALLEL_ALLREDUCE_FUSION_ALLREDUCE_FUSION_H_ -#include #include +#include "utils/hash_map.h" #include "ir/anf.h" #include "frontend/parallel/allreduce_fusion/allreduce_graph.h" #include "frontend/parallel/status.h" @@ -27,7 +27,7 @@ namespace mindspore { namespace parallel { -using CNodeCostMap = std::unordered_map; +using CNodeCostMap = mindspore::HashMap; constexpr int64_t DEFAULT_COST_MODEL_ALLREDUCE_FUSION_ALGORITHM = 0; constexpr int64_t DEFAULT_COST_MODEL_ALLREDUCE_FUSION_TIMES = 0; diff --git a/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_graph.cc b/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_graph.cc index be0f2c26e76..379f3319e31 100644 --- a/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_graph.cc +++ b/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_graph.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -173,7 +173,7 @@ void AllreduceGraph::SortArnode() { } Status AllreduceGraph::RemoveExtraParas() { - std::unordered_set para_map; + mindspore::HashSet para_map; for (auto &node : arnode_vec_) { for (auto ¶ : node.paras()) { auto emplac_result = para_map.emplace(para); diff --git a/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_graph.h b/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_graph.h index 89081ed1893..9b9a5df74e8 100644 --- a/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_graph.h +++ b/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_graph.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ #include #include -#include -#include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/anf.h" #include "frontend/parallel/allreduce_fusion/allreduce_node.h" #include "frontend/parallel/status.h" @@ -60,7 +60,7 @@ class AllreduceGraph { void PrintAllredueGraphInfo() const; void PrintArnodeVec() const; void PrintArnodeSet() const; - const std::unordered_set &cnode_set() const { return cnode_set_; } + const mindspore::HashSet &cnode_set() const { return cnode_set_; } CNodePtr head_cnode() const { return head_cnode_; } Status set_head_cnode(const CNodePtr &node); double max() const { return max_; } @@ -69,14 +69,14 @@ class AllreduceGraph { CNodePtr head_cnode_; std::set arnode_set_; std::vector arnode_vec_; - std::unordered_set cnode_set_; + mindspore::HashSet cnode_set_; // If One ParameterPtr is used by multiple CNode, the last node for backward computation is saved. - std::unordered_map> para_cnode_map_; + mindspore::HashMap> para_cnode_map_; // One ParameterPtr may be used by multiple CNode - std::unordered_map> para_cnodeset_map_; + mindspore::HashMap> para_cnodeset_map_; // Multiple Parameter may be inputs to the same CNode - std::unordered_map> cnode_paraset_map_; - std::unordered_map cnode_arnode_map_; + mindspore::HashMap> cnode_paraset_map_; + mindspore::HashMap cnode_arnode_map_; double max_; }; } // namespace parallel diff --git a/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_node.h b/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_node.h index 6741461b243..6b98faff995 100644 --- a/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_node.h +++ b/mindspore/ccsrc/frontend/parallel/allreduce_fusion/allreduce_node.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,9 +18,9 @@ #define MINDSPORE_CCSRC_FRONTEND_PARALLEL_ALLREDUCE_FUSION_ALLREDUCE_NODE_H_ #include -#include -#include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/anf.h" #include "frontend/parallel/status.h" @@ -36,7 +36,7 @@ class AllreduceNode { Status Init(const CNodePtr &cnode_ptr); Status AddPara(const AnfNodePtr &node_ptr); Status RemovePara(const AnfNodePtr &node_ptr); - const std::unordered_set ¶s() const { return paras_; } + const mindspore::HashSet ¶s() const { return paras_; } double curr_para_size() const { return curr_para_size_; } virtual ~AllreduceNode() = default; // Add previous node @@ -55,8 +55,8 @@ class AllreduceNode { CNodePtr cnode_ptr_; std::vector prev_; std::vector next_; - std::unordered_set paras_; - std::unordered_map para_size_map_; + mindspore::HashSet paras_; + mindspore::HashMap para_size_map_; double curr_para_size_; double depend_feat_size_; }; diff --git a/mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc b/mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc index 88b0dbf76cf..284fc88725e 100644 --- a/mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc +++ b/mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,21 +21,21 @@ #include #include #include -#include -#include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "backend/optimizer/common/helper.h" #include "frontend/optimizer/optimizer.h" #include "ir/func_graph.h" #include "utils/cache_embedding_hashmap_struct.h" namespace mindspore { namespace parallel { -using ParamMap = std::unordered_map; -using ParamSet = std::unordered_set; +using ParamMap = mindspore::HashMap; +using ParamSet = mindspore::HashSet; using NodePairList = std::vector>; -using AnfMap = std::unordered_map; -using AnfSet = std::unordered_set; +using AnfMap = mindspore::HashMap; +using AnfSet = mindspore::HashSet; ParamMap AddCacheParameters(const FuncGraphPtr &graph, const ParamSet ¶meter_cache_enable_set) { ParamMap cache_host_params_map; diff --git a/mindspore/ccsrc/frontend/parallel/device_manager.cc b/mindspore/ccsrc/frontend/parallel/device_manager.cc index b63525ee5a2..f2d3ed6bfab 100644 --- a/mindspore/ccsrc/frontend/parallel/device_manager.cc +++ b/mindspore/ccsrc/frontend/parallel/device_manager.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,9 +18,9 @@ #include #include -#include #include +#include "utils/hash_set.h" #include "frontend/parallel/step_parallel.h" #include "utils/log_adapter.h" @@ -321,7 +321,7 @@ Group DeviceManager::CreateGroup(const std::string &group_name, // Create the group with only the given devices' ranks. Group DeviceManager::CreateGroup(const RankList &dev_ranks) { - std::unordered_set rank_set(dev_ranks.begin(), dev_ranks.end()); + mindspore::HashSet rank_set(dev_ranks.begin(), dev_ranks.end()); if (dev_ranks.size() != rank_set.size()) { MS_LOG(EXCEPTION) << "Invalid dev ranks(" << dev_ranks << "), it has the Duplicate elements in list"; } diff --git a/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.cc b/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.cc index 1c8c363a87e..252035c1281 100644 --- a/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.cc +++ b/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -77,7 +77,7 @@ AnfNodePtr ValuePtrToAnfNodePtr(const ValuePtr &value_ptr) { return value_node->cast(); } -static std::unordered_map int_tensor_map = {}; +static mindspore::HashMap int_tensor_map = {}; AnfNodePtr CreateInt32Tensor(int64_t value) { auto it = int_tensor_map.find(value); if (it != int_tensor_map.end()) { diff --git a/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.h b/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.h index 12c0c6bc157..08bc6a95145 100644 --- a/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.h +++ b/mindspore/ccsrc/frontend/parallel/graph_util/generate_graph.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,10 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "frontend/optimizer/opt.h" #include "frontend/parallel/strategy.h" #include "frontend/parallel/tensor_layout/tensor_redistribution.h" @@ -46,7 +46,7 @@ std::string HashInstanceName(const std::string &name); class GenerateGraph { public: - explicit GenerateGraph(std::unordered_map origin_attrs) + explicit GenerateGraph(mindspore::HashMap origin_attrs) : name_idx_(0), origin_attrs_(origin_attrs) {} Status Init(const CNodePtr &cnode); ~GenerateGraph() = default; @@ -63,7 +63,7 @@ class GenerateGraph { AnfNodePtr virtual_input_node_; std::string instance_name_base_; int64_t name_idx_; - std::unordered_map origin_attrs_; + mindspore::HashMap origin_attrs_; }; } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/frontend/parallel/graph_util/node_info.cc b/mindspore/ccsrc/frontend/parallel/graph_util/node_info.cc index 3e5ec8c4877..6481501d56f 100644 --- a/mindspore/ccsrc/frontend/parallel/graph_util/node_info.cc +++ b/mindspore/ccsrc/frontend/parallel/graph_util/node_info.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -278,7 +278,7 @@ bool AnfNodeIsPrimitive(const AnfNodePtr &anf_node, const std::string &prim_name return false; } -bool FindReshape(const CNodePtr &cnode, std::unordered_set *op_cache) { +bool FindReshape(const CNodePtr &cnode, mindspore::HashSet *op_cache) { if ((cnode == nullptr) || !IsValueNode(cnode->input(0))) { return false; } @@ -404,7 +404,7 @@ bool FindReshapeNextNodeStraCosts(const CNodePtr &cnode, OperatorInfoPtr *next_o return false; } -void SetUserAttrs(const std::unordered_map &origin_prim_attrs, const PrimitivePtr &self_prim) { +void SetUserAttrs(const mindspore::HashMap &origin_prim_attrs, const PrimitivePtr &self_prim) { MS_EXCEPTION_IF_NULL(self_prim); for (auto attr_name : filter_attrs) { auto iter = origin_prim_attrs.find(attr_name); diff --git a/mindspore/ccsrc/frontend/parallel/graph_util/node_info.h b/mindspore/ccsrc/frontend/parallel/graph_util/node_info.h index c4de59657e8..ec9800ff326 100644 --- a/mindspore/ccsrc/frontend/parallel/graph_util/node_info.h +++ b/mindspore/ccsrc/frontend/parallel/graph_util/node_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,8 +20,8 @@ #include #include #include -#include -#include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "base/base.h" #include "ir/anf.h" #include "frontend/parallel/ops_info/operator_info.h" @@ -45,14 +45,14 @@ std::vector FindParameterByRefKeyNode(const AnfNodePtr &node, const bool AnfNodeIsPrimitive(const AnfNodePtr &anf_node, const std::string &prim_name); -bool FindReshape(const CNodePtr &cnode, std::unordered_set *op_cache); +bool FindReshape(const CNodePtr &cnode, mindspore::HashSet *op_cache); bool FindReshapePreNodeStraCosts(const AnfNodePtr &node, OperatorInfoPtr *pre_operator_info, int64_t *out_index, size_t curr_depth); bool FindReshapeNextNodeStraCosts(const CNodePtr &cnode, OperatorInfoPtr *next_operator_info, int64_t *in_index, bool *is_next_reshape, size_t curr_depth); -void SetUserAttrs(const std::unordered_map &origin_prim_attrs, const PrimitivePtr &self_prim); +void SetUserAttrs(const mindspore::HashMap &origin_prim_attrs, const PrimitivePtr &self_prim); } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/activation_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/activation_info.h index 77e8c528646..cae8ee8ede0 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/activation_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/activation_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/strategy.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/arithmetic_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/arithmetic_info.h index b2832f7c38a..12473a6524e 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/arithmetic_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/arithmetic_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/batch_parallel_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/batch_parallel_info.h index 97cd5c08c11..5820da33923 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/batch_parallel_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/batch_parallel_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/strategy.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/batchnorm_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/batchnorm_info.h index 5f9c4d7bc5f..dab442a31f3 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/batchnorm_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/batchnorm_info.h @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.h index b7316d0d90e..aca044679f8 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/bias_add_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/broadcast_to_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/broadcast_to_info.h index cc3f6f7d006..74374b1f2fa 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/broadcast_to_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/broadcast_to_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/comparison_function_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/comparison_function_info.h index e457e5aa5f1..464c1a7f5b6 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/comparison_function_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/comparison_function_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/arithmetic_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/concat_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/concat_info.h index 3e33e7c244e..cdc9d0d9b1e 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/concat_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/concat_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/conv2d_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/conv2d_info.h index c5f2f5eabb0..920ed2c5598 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/conv2d_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/conv2d_info.h @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/graph_util/generate_graph.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/dropout_do_mask_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/dropout_do_mask_info.h index c7c0091d8b5..b5455a0f714 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/dropout_do_mask_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/dropout_do_mask_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/dsd_matmul_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/dsd_matmul_info.h index 07788229db8..c24c7a1d467 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/dsd_matmul_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/dsd_matmul_info.h @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/strategy.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/elementary_function_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/elementary_function_info.h index 79f1fe4beeb..4ceaf23da1d 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/elementary_function_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/elementary_function_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,9 +18,9 @@ #define MINDSPORE_CCSRC_FRONTEND_PARALLEL_OPS_INFO_ELEMENTARY_FUNCTION_INFO_H_ #include -#include #include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/activation_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/gather_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/gather_info.h index 726e650f29b..28605046a64 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/gather_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/gather_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/gatherd_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/gatherd_info.h index 9e063041e41..93933275a55 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/gatherd_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/gatherd_info.h @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/gathernd_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/gathernd_info.h index a67ba225039..e43c2940715 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/gathernd_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/gathernd_info.h @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/get_next_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/get_next_info.h index 5a996cf7e78..b8a795f2050 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/get_next_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/get_next_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/strategy.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/l2_normalize_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/l2_normalize_info.h index 0197923de2d..5c2323b9e2f 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/l2_normalize_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/l2_normalize_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/activation_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/layer_norm_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/layer_norm_info.h index b6c6b4dea53..505c18cd382 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/layer_norm_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/layer_norm_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/loss_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/loss_info.h index 600937e5937..847139d6ad8 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/loss_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/loss_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/ops_info/activation_info.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/matmul_dds_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/matmul_dds_info.h index 5852a7aaf78..5fc860aa90d 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/matmul_dds_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/matmul_dds_info.h @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/strategy.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/matmul_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/matmul_info.h index eec3ad4f7c9..33221cae587 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/matmul_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/matmul_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "utils/ms_utils.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/maxpool_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/maxpool_info.h index 27379f79c26..b7ad23e3ee7 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/maxpool_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/maxpool_info.h @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/onehot_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/onehot_info.h index 665c43186d4..ff862e9c2f8 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/onehot_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/onehot_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/operator_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/operator_info.h index 1764b2a5c7c..3c45f343fde 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/operator_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/operator_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,10 +21,10 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "utils/ms_utils.h" #include "base/base.h" #include "frontend/parallel/auto_parallel/costmodel.h" @@ -47,7 +47,7 @@ using VirtualDivOp = OperatorVector; using TensorMaps = std::vector; using TensorLayouts = std::vector; using different_type = std::vector::difference_type; -using PrimitiveAttrs = std::unordered_map; +using PrimitiveAttrs = mindspore::HashMap; using ReplaceGraphPtr = std::shared_ptr>, AnfNodePtr>>; class Edge; @@ -188,7 +188,7 @@ class OperatorInfo { const std::string &type() const { return type_; } void set_last_node_flag(const bool &is_last_node) { is_last_node_ = is_last_node; } const bool &is_last_node() const { return is_last_node_; } - const std::unordered_map &attrs() const { return attrs_; } + const mindspore::HashMap &attrs() const { return attrs_; } void set_stage_id(int32_t stage_id) { stage_id_ = stage_id; } int32_t stage_id() const { return stage_id_; } Status CreateGroupByTensorMap(const Shape &tensor_map, std::vector *group); @@ -238,7 +238,7 @@ class OperatorInfo { std::string name_; Shapes inputs_shape_; Shapes outputs_shape_; - std::unordered_map attrs_; + mindspore::HashMap attrs_; std::vector input_value_; TypePtr outputs_dtype_; diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/pack_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/pack_info.h index 7efa08268e9..6c160854680 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/pack_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/pack_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/prelu_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/prelu_info.h index 1bc475ba635..09f2ca1c81a 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/prelu_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/prelu_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/strategy.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/range_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/range_info.h index 2c738fde278..09ccc6d74e5 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/range_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/range_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "utils/ms_utils.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/reduce_method_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/reduce_method_info.h index f3a11261201..d66df623d8a 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/reduce_method_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/reduce_method_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/tensor.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/reluv2_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/reluv2_info.h index a21e33829e0..039ee20214c 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/reluv2_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/reluv2_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/strategy.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.h index 83358fef0e6..c5aa9209036 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/reshape_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/strategy.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/resizebilinear_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/resizebilinear_info.h index 6159da5b67c..173004b84eb 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/resizebilinear_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/resizebilinear_info.h @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/scatter_update_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/scatter_update_info.h index 608877db224..bf07b5fa0a6 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/scatter_update_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/scatter_update_info.h @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/select_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/select_info.h index 0379f54d850..0665cfaec73 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/select_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/select_info.h @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/slice_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/slice_info.h index 2635f7b4f41..654343aa972 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/slice_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/slice_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/strided_slice_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/strided_slice_info.h index 65424f8c2c6..99d7a82d9fd 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/strided_slice_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/strided_slice_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/tensordot_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/tensordot_info.h index 39ecb952af2..176c31f0f88 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/tensordot_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/tensordot_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "utils/ms_utils.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/tile_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/tile_info.h index 19178608d0f..8c9cc26e06c 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/tile_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/tile_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/topk_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/topk_info.h index 6161daf81a3..51e6d1b63c5 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/topk_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/topk_info.h @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.h index 3cf5bf94a6b..cab263410e6 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/transpose_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/strategy.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/uniform_real_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/uniform_real_info.h index c188932f546..75c5d73e2f7 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/uniform_real_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/uniform_real_info.h @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/unique_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/unique_info.h index 5056c73ab1c..f29f766855a 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/unique_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/unique_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/strategy.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/unsorted_segment_op_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/unsorted_segment_op_info.h index ad66da36622..676a272852c 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/unsorted_segment_op_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/unsorted_segment_op_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "frontend/parallel/auto_parallel/operator_costmodel.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/strategy.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/virtual_dataset_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/virtual_dataset_info.h index 941af83dfd1..dd51a00d5c9 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/virtual_dataset_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/virtual_dataset_info.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/strategy.h" diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/virtual_output_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/virtual_output_info.h index 20ac228056d..eb56a8bc1d3 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/virtual_output_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/virtual_output_info.h @@ -19,9 +19,9 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "ir/value.h" #include "frontend/parallel/ops_info/operator_info.h" #include "frontend/parallel/ops_info/virtual_dataset_info.h" diff --git a/mindspore/ccsrc/frontend/parallel/parameter_manager.cc b/mindspore/ccsrc/frontend/parallel/parameter_manager.cc index eec9b6f2ae1..7499145c6be 100644 --- a/mindspore/ccsrc/frontend/parallel/parameter_manager.cc +++ b/mindspore/ccsrc/frontend/parallel/parameter_manager.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,9 +24,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "base/core_ops.h" #include "frontend/operator/ops.h" #include "frontend/optimizer/optimizer.h" diff --git a/mindspore/ccsrc/frontend/parallel/pipeline_transformer/pipeline_transformer.cc b/mindspore/ccsrc/frontend/parallel/pipeline_transformer/pipeline_transformer.cc index 4c4135a52c4..ac71e2010fa 100644 --- a/mindspore/ccsrc/frontend/parallel/pipeline_transformer/pipeline_transformer.cc +++ b/mindspore/ccsrc/frontend/parallel/pipeline_transformer/pipeline_transformer.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,13 +14,13 @@ * limitations under the License. */ -#include #include #include #include #include #include #include +#include "utils/hash_map.h" #include "frontend/parallel/pipeline_transformer/pipeline_transformer.h" #include "frontend/parallel/auto_parallel/graph_costmodel.h" #include "frontend/parallel/ops_info/ops_utils.h" @@ -40,10 +40,10 @@ namespace mindspore { namespace parallel { -std::unordered_map> parameter_color_map; +mindspore::HashMap> parameter_color_map; // map -std::unordered_map send_tag_map; -std::unordered_map recv_tag_map; +mindspore::HashMap send_tag_map; +mindspore::HashMap recv_tag_map; const std::set WHITE_LIST = {prim::kPrimTupleGetItem, prim::kPrimMakeTuple, prim::kPrimCast}; static bool IsInWhiteList(const CNodePtr &cnode) { diff --git a/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc b/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc index d8200dc9ee6..aa965ccce5d 100644 --- a/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc @@ -23,11 +23,11 @@ #include #include #include -#include #include #include -#include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "base/core_ops.h" #include "frontend/optimizer/opt.h" #include "frontend/optimizer/optimizer.h" @@ -248,7 +248,7 @@ void InitCostGraph() { } void SetStrategyToOperator(const OperatorInfoPtr &operator_info, const PrimitivePtr &prim, - std::unordered_map attrs, bool, StrategyMap *stra_map, + mindspore::HashMap attrs, bool, StrategyMap *stra_map, const std::string &strategy_key_name) { // In this case, the configured strategy should be extracted to help setting cost StrategyPtr strategyPtr; @@ -811,7 +811,7 @@ void AugmentCostGraph(const std::vector &all_nodes) { Shapes inputs_shape = {shape}; Shapes outputs_shape = {shape}; // 2) init the attr - std::unordered_map attr = {}; + mindspore::HashMap attr = {}; // Create the TmpIdentity instance tmp_identity_ptr = std::make_shared(inputs_shape, outputs_shape, attr); @@ -878,7 +878,7 @@ void AugmentCostGraph(const std::vector &all_nodes) { } void ReshapeCostCompute(const std::vector &all_nodes) { - std::unordered_set op_cache; + mindspore::HashSet op_cache; for (auto node : all_nodes) { auto cnode = node->cast(); if (!FindReshape(cnode, &op_cache)) { diff --git a/mindspore/ccsrc/frontend/parallel/step_parallel.cc b/mindspore/ccsrc/frontend/parallel/step_parallel.cc index aaf2bda33dc..2ccf75ea287 100644 --- a/mindspore/ccsrc/frontend/parallel/step_parallel.cc +++ b/mindspore/ccsrc/frontend/parallel/step_parallel.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,9 +24,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "base/core_ops.h" #include "frontend/operator/ops.h" #include "frontend/optimizer/optimizer.h" @@ -487,12 +487,12 @@ void Redistribution(const std::pair &node_pair, const Opera } } -bool StrategyFound(const std::unordered_map &attrs) { +bool StrategyFound(const mindspore::HashMap &attrs) { auto iter = attrs.find(IN_STRATEGY); return !((iter == attrs.end()) || (iter->second->type_name() == NONE)); } -bool AttrFound(const std::unordered_map &attrs, const std::string &target) { +bool AttrFound(const mindspore::HashMap &attrs, const std::string &target) { auto iter = attrs.find(target); return !((iter == attrs.end()) || (iter->second->type_name() == NONE)); } @@ -797,7 +797,7 @@ void StepReplaceGraph(const ReplaceGraphPtr &replace_graph, const CNodePtr &node // However, the segment_sum operation needs two inputs, To solve this // We maintain a dict to count the times of the same operations, // and bind the inputs according to the times of the op appears. - std::unordered_map input_map = {}; + mindspore::HashMap input_map = {}; static int appear_count = 0; for (auto &replace_input : replace_graph->first) { auto pre_node = node->input(LongToSize(replace_input.second)); diff --git a/mindspore/ccsrc/frontend/parallel/step_parallel.h b/mindspore/ccsrc/frontend/parallel/step_parallel.h index 141d863616c..9653ea1b458 100644 --- a/mindspore/ccsrc/frontend/parallel/step_parallel.h +++ b/mindspore/ccsrc/frontend/parallel/step_parallel.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,9 +23,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "frontend/optimizer/opt.h" #include "frontend/parallel/strategy.h" #include "frontend/parallel/tensor_layout/tensor_redistribution.h" @@ -70,9 +70,9 @@ void Redistribution(const std::pair &node_pair, const Opera const CNodePtr &middle_node, int64_t index, TensorRedistribution tensor_redistribution, const CNodePtr &pre_node); -bool StrategyFound(const std::unordered_map &attrs); +bool StrategyFound(const mindspore::HashMap &attrs); -bool AttrFound(const std::unordered_map &attrs, const std::string &target); +bool AttrFound(const mindspore::HashMap &attrs, const std::string &target); AnfNodePtr GetAccuGrad(const std::vector ¶meters, const std::string &weight_name); diff --git a/mindspore/ccsrc/frontend/parallel/step_parallel_utils.cc b/mindspore/ccsrc/frontend/parallel/step_parallel_utils.cc index 495a68cdde9..b1020ab3337 100644 --- a/mindspore/ccsrc/frontend/parallel/step_parallel_utils.cc +++ b/mindspore/ccsrc/frontend/parallel/step_parallel_utils.cc @@ -23,9 +23,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "base/core_ops.h" #include "frontend/operator/ops.h" #include "frontend/optimizer/optimizer.h" diff --git a/mindspore/ccsrc/frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h b/mindspore/ccsrc/frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h index 38fce9282b0..b59a9531e02 100644 --- a/mindspore/ccsrc/frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h +++ b/mindspore/ccsrc/frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,10 +18,10 @@ #define MINDSPORE_CCSRC_FRONTEND_PARALLEL_STRATEGY_CHEKCPOINT_PARALLEL_STRATEGY_CHECKPOINT_H_ #include -#include #include #include #include +#include "utils/hash_map.h" #include "frontend/parallel/ops_info/ops_utils.h" #include "frontend/parallel/strategy.h" #include "frontend/parallel/context.h" @@ -30,11 +30,11 @@ namespace mindspore { namespace parallel { -using StrategyMap = std::unordered_map; +using StrategyMap = mindspore::HashMap; using TensorLayoutPtr = std::shared_ptr; -using TensorInfoMap = std::unordered_map; +using TensorInfoMap = mindspore::HashMap; using ParameterMap = std::vector>; -using ManualShapeMap = std::unordered_map>>; +using ManualShapeMap = mindspore::HashMap>>; using GroupInfoMap = std::vector>>; class StrategyCheckpoint { public: diff --git a/mindspore/ccsrc/frontend/parallel/tensor_layout/redistribution_operator_infer.cc b/mindspore/ccsrc/frontend/parallel/tensor_layout/redistribution_operator_infer.cc index f1a8a77347d..ef7bfad927d 100644 --- a/mindspore/ccsrc/frontend/parallel/tensor_layout/redistribution_operator_infer.cc +++ b/mindspore/ccsrc/frontend/parallel/tensor_layout/redistribution_operator_infer.cc @@ -101,7 +101,7 @@ Status RedistributionOperatorInfer::InferSplitByAxis() { int64_t in_dim = iter->second; int64_t out_dim = out_tensor_map_.GetDimByIdx(index); if (in_dim == out_dim) { - (void)map_.erase(iter++); + iter = map_.erase(iter); continue; } if (in_dim == NONE && @@ -112,7 +112,7 @@ Status RedistributionOperatorInfer::InferSplitByAxis() { MS_LOG(ERROR) << "Insert SplitByAxis Error!"; return Status::FAILED; } - (void)map_.erase(iter++); + iter = map_.erase(iter); } else { (void)++iter; } @@ -123,10 +123,10 @@ Status RedistributionOperatorInfer::InferSplitByAxis() { Status RedistributionOperatorInfer::InferPermuteByAxis() { for (auto iter = map_.begin(); iter != map_.end();) { uint64_t index = iter->first; - int64_t in_dim = map_[index]; + int64_t in_dim = iter->second; int64_t out_dim = out_tensor_map_.GetDimByIdx(index); if (in_dim == out_dim) { - (void)map_.erase(iter++); + iter = map_.erase(iter); continue; } if (in_dim == NONE && @@ -154,7 +154,7 @@ Status RedistributionOperatorInfer::InferPermuteByAxis() { return Status::FAILED; } } - (void)map_.erase(iter++); + iter = map_.erase(iter); map_[LongToSize(cat_dim)] = NONE; } else { (void)++iter; @@ -166,7 +166,7 @@ Status RedistributionOperatorInfer::InferPermuteByAxis() { Status RedistributionOperatorInfer::InferConcatByAxis() { for (auto iter = map_.begin(); iter != map_.end();) { uint64_t index = iter->first; - int64_t in_dim = map_[index]; + int64_t in_dim = iter->second; int64_t out_dim = out_tensor_map_.GetDimByIdx(index); if (in_dim != NONE && out_tensor_map_.GetIndexByValue(in_dim) == NONE) { Args args = {SizeToLong(index), in_dim, dev_mat_.GetDimByReverseIdx(LongToSize(in_dim))}; @@ -175,9 +175,9 @@ Status RedistributionOperatorInfer::InferConcatByAxis() { return Status::FAILED; } if (out_dim == NONE) { - (void)map_.erase(iter++); + iter = map_.erase(iter); } else { - map_[index] = NONE; + iter->second = NONE; (void)++iter; } } else { diff --git a/mindspore/ccsrc/frontend/parallel/tensor_layout/redistribution_operator_infer.h b/mindspore/ccsrc/frontend/parallel/tensor_layout/redistribution_operator_infer.h index ed7541bc630..89101bbabf3 100644 --- a/mindspore/ccsrc/frontend/parallel/tensor_layout/redistribution_operator_infer.h +++ b/mindspore/ccsrc/frontend/parallel/tensor_layout/redistribution_operator_infer.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ #include #include -#include #include #include +#include "utils/hash_map.h" #include "frontend/parallel/tensor_layout/construct_operator.h" #include "frontend/parallel/tensor_layout/redistribution_layout_transfer.h" #include "utils/convert_utils.h" @@ -31,7 +31,7 @@ namespace parallel { using DeviceArrangement = Shape; using TensorMap = Shape; using TensorShape = Shape; -using RedistributionOperatorMap = std::unordered_map; +using RedistributionOperatorMap = mindspore::HashMap; using OperatorR = std::pair; using OperatorC = std::pair; using OperatorList = std::vector; diff --git a/mindspore/ccsrc/pipeline/jit/parse/data_converter.cc b/mindspore/ccsrc/pipeline/jit/parse/data_converter.cc index beeb5237df3..1211c386985 100644 --- a/mindspore/ccsrc/pipeline/jit/parse/data_converter.cc +++ b/mindspore/ccsrc/pipeline/jit/parse/data_converter.cc @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,11 +17,11 @@ */ #include "pipeline/jit/parse/data_converter.h" -#include #include #include #include #include +#include "utils/hash_map.h" #include "pipeline/jit/parse/resolve.h" #include "pipeline/jit/parse/python_adapter.h" #include "frontend/operator/ops.h" @@ -340,8 +340,8 @@ ValuePtr ConvertCellObjToFuncGraph(const py::object &obj) { FuncGraphPtr bprop_graph = enable_bprop_debug ? ConvertToBpropCut(obj) : ConvertToFuncGraph(obj, PYTHON_MOD_GET_BPROP_METHOD); if (bprop_graph != nullptr) { - (void)func_graph->transforms().insert(std::make_pair(CUSTOM_BPROP_NAME, FuncGraphTransform(bprop_graph))); - (void)bprop_graph->transforms().insert(std::make_pair("primal", FuncGraphTransform(func_graph))); + (void)func_graph->transforms().emplace(CUSTOM_BPROP_NAME, FuncGraphTransform(bprop_graph)); + (void)bprop_graph->transforms().emplace("primal", FuncGraphTransform(func_graph)); func_graph->set_flag(FUNC_GRAPH_FLAG_DEFER_INLINE, true); } } @@ -575,16 +575,16 @@ FuncGraphPtr ConvertToFuncGraph(const py::object &obj, const std::string &python } namespace data_converter { -static std::unordered_map object_map_; +static mindspore::HashMap object_map_; -static std::unordered_map> object_graphs_map_; +static mindspore::HashMap> object_graphs_map_; void SetObjGraphValue(const std::string &obj_key, const FuncGraphPtr &data) { object_graphs_map_[obj_key].push_back(data); MS_LOG(DEBUG) << "Set func graph size:" << object_graphs_map_.size(); } -const std::unordered_map> &GetObjGraphs() { +const mindspore::HashMap> &GetObjGraphs() { MS_LOG(DEBUG) << "Obj size:" << object_graphs_map_.size(); return object_graphs_map_; } @@ -687,7 +687,7 @@ void ClearObjectCache() { } } // namespace data_converter -static std::unordered_map g_dataClassToClass = {}; +static mindspore::HashMap g_dataClassToClass = {}; // Parse dataclass to mindspore Class type ClassPtr ParseDataClass(const py::object &cls_obj) { @@ -709,7 +709,7 @@ ClassPtr ParseDataClass(const py::object &cls_obj) { attributes.push_back(std::make_pair(py::cast(item.first), type_value)); } - std::unordered_map methods_map; + mindspore::HashMap methods_map; py::dict methods = python_adapter::CallPyModFn(mod, PYTHON_MOD_GET_DATACLASS_METHODS, cls_obj); for (auto &item : methods) { auto fun_name = item.first.cast(); diff --git a/mindspore/ccsrc/pipeline/jit/parse/data_converter.h b/mindspore/ccsrc/pipeline/jit/parse/data_converter.h index 8bb8b4ee501..8543b0c5301 100644 --- a/mindspore/ccsrc/pipeline/jit/parse/data_converter.h +++ b/mindspore/ccsrc/pipeline/jit/parse/data_converter.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,7 +23,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "pipeline/jit/parse/parse_base.h" #include "pipeline/jit/parse/python_adapter.h" #include "utils/log_adapter.h" @@ -37,7 +37,7 @@ bool GetObjectValue(const std::string &obj_key, ValuePtr *const data); void SetObjGraphValue(const std::string &obj_key, const FuncGraphPtr &data); -const std::unordered_map> &GetObjGraphs(); +const mindspore::HashMap> &GetObjGraphs(); std::vector GetObjKey(const py::object &obj); ResolveTypeDef GetObjType(const py::object &obj); diff --git a/mindspore/ccsrc/pipeline/jit/parse/function_block.h b/mindspore/ccsrc/pipeline/jit/parse/function_block.h index a9fdda650c7..0c7785b83de 100644 --- a/mindspore/ccsrc/pipeline/jit/parse/function_block.h +++ b/mindspore/ccsrc/pipeline/jit/parse/function_block.h @@ -23,11 +23,11 @@ #include #include #include -#include #include #include #include +#include "utils/hash_map.h" #include "ir/meta_func_graph.h" #include "pipeline/jit/parse/parse_base.h" #include "utils/log_adapter.h" @@ -79,7 +79,7 @@ class FunctionBlock : public std::enable_shared_from_this { AnfNodePtr HandleBuiltinNamespaceInfo(const py::tuple &namespace_info); AnfNodePtr MakeInterpret(const std::string &script_text, const AnfNodePtr &global_dict_node, const AnfNodePtr &local_dict_node, const AnfNodePtr &orig_node); - const std::unordered_map &removable_phis() const { return removable_phis_; } + const mindspore::HashMap &removable_phis() const { return removable_phis_; } void FindIsolatedNodes(); void AddIsolatedNode(const AnfNodePtr &target); void AttachIsolatedNodesBeforeReturn(); @@ -151,19 +151,19 @@ class FunctionBlock : public std::enable_shared_from_this { std::map jumps_; // Keep all removable phis which will be removed in one pass. - std::unordered_map removable_phis_; + mindspore::HashMap removable_phis_; // Keep the map for the resolve node to the removable phi node. // For the case that ReadVariable returns a phi node although this phi node // generated in the prev block is identified as removable. The other blocks // should find this phi node. - std::unordered_map resolve_to_removable_phis_; + mindspore::HashMap resolve_to_removable_phis_; // Hold declared global variables in function std::set global_vars_; // Keep new made resolve symbol for the variable not found in vars_. - std::unordered_map var_to_resolve_; + mindspore::HashMap var_to_resolve_; // Collect all python symbols in the block. // We treat both global symbols and local symbols declared previously as global symbols. diff --git a/mindspore/ccsrc/pipeline/jit/parse/parse.cc b/mindspore/ccsrc/pipeline/jit/parse/parse.cc index d5822fc6dae..61e39d20967 100644 --- a/mindspore/ccsrc/pipeline/jit/parse/parse.cc +++ b/mindspore/ccsrc/pipeline/jit/parse/parse.cc @@ -21,9 +21,9 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "pybind_api/pybind_patch.h" #include "pipeline/jit/parse/resolve.h" #include "pipeline/jit/parse/data_converter.h" @@ -1920,7 +1920,7 @@ FunctionBlockPtr Parser::ParsePass(const FunctionBlockPtr &block, const py::obje return block; } -AnfNodePtr FindPhis(const std::unordered_map &removable_phis, const AnfNodePtr &node) { +AnfNodePtr FindPhis(const mindspore::HashMap &removable_phis, const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(node); const auto &inp = node->cast(); const auto &iter = removable_phis.find(inp); @@ -1932,13 +1932,13 @@ AnfNodePtr FindPhis(const std::unordered_map &removabl void Parser::RemoveUnnecessaryPhis() { // Merge all removable phis to one map; - std::unordered_map removable_phis; + mindspore::HashMap removable_phis; std::vector phis; for (FunctionBlockPtr &block : func_block_list_) { MS_EXCEPTION_IF_NULL(block); removable_phis.insert(block->removable_phis().begin(), block->removable_phis().end()); std::transform(block->removable_phis().begin(), block->removable_phis().end(), std::back_inserter(phis), - [](const std::pair &pair) { return pair.first; }); + [](const auto &pair) { return pair.first; }); } if (removable_phis.empty()) { return; diff --git a/mindspore/ccsrc/pipeline/jit/parse/parse_dynamic.cc b/mindspore/ccsrc/pipeline/jit/parse/parse_dynamic.cc index 548fdd856bc..581c85c5671 100644 --- a/mindspore/ccsrc/pipeline/jit/parse/parse_dynamic.cc +++ b/mindspore/ccsrc/pipeline/jit/parse/parse_dynamic.cc @@ -16,16 +16,16 @@ * limitations under the License. */ -#include #include #include #include #include +#include "utils/hash_set.h" #include "pipeline/jit/parse/parse_dynamic.h" #include "mindspore/core/ir/cell.h" namespace mindspore::parse { -static std::unordered_set cell_input_args_ = {}; +static mindspore::HashSet cell_input_args_ = {}; static const std::set ignore_judge_dynamic_cell = { "Cell mindspore.nn.layer.basic.Dense", "Cell mindspore.nn.probability.distribution.normal.Normal", "Cell src.transformer.create_attn_mask.CreateAttentionMaskFromInputMask", "Cell mindspore.nn.layer.math.MatMul"}; diff --git a/mindspore/ccsrc/pipeline/jit/pass.cc b/mindspore/ccsrc/pipeline/jit/pass.cc index ff04ccd9b92..9497411d8cd 100644 --- a/mindspore/ccsrc/pipeline/jit/pass.cc +++ b/mindspore/ccsrc/pipeline/jit/pass.cc @@ -19,9 +19,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "ir/func_graph_cloner.h" #include "pipeline/jit/parse/parse_base.h" #include "pipeline/jit/resource.h" @@ -523,7 +523,7 @@ OptPassGroupMap GetAfterRecomputePass(const opt::irpass::OptimizeIRPassLib &) { return map; } -static std::unordered_map> g_pass_opts = {}; +static mindspore::HashMap> g_pass_opts = {}; void InitOpt(const ResourcePtr &res) { if (g_pass_opts.size() == 0) { diff --git a/mindspore/ccsrc/pipeline/jit/pipeline.cc b/mindspore/ccsrc/pipeline/jit/pipeline.cc index dbfa3d11771..52a586f89a3 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline.cc +++ b/mindspore/ccsrc/pipeline/jit/pipeline.cc @@ -21,11 +21,12 @@ #include #include #include -#include #include #include #include +#include +#include "utils/hash_map.h" #include "pybind_api/pybind_patch.h" #include "ir/param_info.h" #include "pipeline/jit/pass.h" diff --git a/mindspore/ccsrc/pipeline/jit/pipeline.h b/mindspore/ccsrc/pipeline/jit/pipeline.h index 08e9f55e4ae..016c7adec16 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline.h +++ b/mindspore/ccsrc/pipeline/jit/pipeline.h @@ -21,9 +21,9 @@ #include #include #include -#include #include #include +#include #include "pybind11/pybind11.h" diff --git a/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc b/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc index 98bb35afb7c..183855c1194 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc +++ b/mindspore/ccsrc/pipeline/jit/pipeline_ge.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,10 +18,10 @@ #include #include -#include #include #include +#include "utils/hash_map.h" #include "debug/anf_ir_dump.h" #include "ir/tensor.h" #include "transform/graph_ir/convert.h" diff --git a/mindspore/ccsrc/pipeline/jit/pipeline_ge.h b/mindspore/ccsrc/pipeline/jit/pipeline_ge.h index 7054d2ecf4f..de83e4326e1 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline_ge.h +++ b/mindspore/ccsrc/pipeline/jit/pipeline_ge.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,10 +21,10 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "pybind11/pybind11.h" #include "pipeline/jit/base.h" #include "frontend/operator/ops.h" diff --git a/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.h b/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.h index 4c7dcb84f36..1027b479e74 100644 --- a/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.h +++ b/mindspore/ccsrc/pipeline/jit/remove_value_node_dup.h @@ -17,15 +17,15 @@ #ifndef MINDSPORE_CCSRC_PIPELINE_JIT_REMOVE_VALUE_NODE_DUP_H_ #define MINDSPORE_CCSRC_PIPELINE_JIT_REMOVE_VALUE_NODE_DUP_H_ -#include #include +#include "utils/hash_map.h" #include "base/base.h" #include "ir/manager.h" namespace mindspore { namespace pipeline { -using HashCache = std::unordered_map>; -using HashValue = std::unordered_map; +using HashCache = mindspore::HashMap>; +using HashValue = mindspore::HashMap; void TryToDoReplace(FuncGraphManager *manager, const AnfNodePtr &node, HashCache *hash_cache, HashValue *hash_value); } // namespace pipeline diff --git a/mindspore/ccsrc/pipeline/jit/resource.cc b/mindspore/ccsrc/pipeline/jit/resource.cc index 9bc8e9fe06e..12685b2e1a6 100644 --- a/mindspore/ccsrc/pipeline/jit/resource.cc +++ b/mindspore/ccsrc/pipeline/jit/resource.cc @@ -260,7 +260,7 @@ Resource::Resource(const py::object &obj) Resource::~Resource() { MS_LOG(DEBUG) << "Resource clear"; - std::unordered_map().swap(results_); + mindspore::HashMap().swap(results_); // If exit normally, these global variables will be cleaned // in Resource::Clean call by MsPipeline::Compile, but if exit with MS_LOGEXCEPTION, // these global variables may not being cleaned, it may diff --git a/mindspore/ccsrc/pipeline/jit/resource.h b/mindspore/ccsrc/pipeline/jit/resource.h index 840eb3b4d73..9371b424b04 100644 --- a/mindspore/ccsrc/pipeline/jit/resource.h +++ b/mindspore/ccsrc/pipeline/jit/resource.h @@ -20,10 +20,10 @@ #include #include #include -#include #include -#include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" @@ -47,7 +47,7 @@ const char kPynativeGraphId[] = "graph_id"; class InferenceResource; -using BuiltInTypeMap = std::unordered_map>; +using BuiltInTypeMap = mindspore::HashMap>; BuiltInTypeMap &GetMethodMap(); diff --git a/mindspore/ccsrc/pipeline/jit/resource_base.h b/mindspore/ccsrc/pipeline/jit/resource_base.h index 8fdbd467ead..8b3edb26598 100644 --- a/mindspore/ccsrc/pipeline/jit/resource_base.h +++ b/mindspore/ccsrc/pipeline/jit/resource_base.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "utils/any.h" #include "ir/manager.h" @@ -38,7 +38,7 @@ class ResourceBase { // set a manager defined outside which will not manage the graphs. void set_manager(const FuncGraphManagerPtr &manager) { manager_ = manager; } - std::unordered_map &results() { return results_; } + mindspore::HashMap &results() { return results_; } void SetResult(const std::string &key, const Any &value) { results_[key] = value; } @@ -53,7 +53,7 @@ class ResourceBase { protected: FuncGraphManagerPtr manager_; - std::unordered_map results_; + mindspore::HashMap results_; }; using ResourceBasePtr = std::shared_ptr; diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/auto_monad.cc b/mindspore/ccsrc/pipeline/jit/static_analysis/auto_monad.cc index 44f00f08e6b..6a1b4091d14 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/auto_monad.cc +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/auto_monad.cc @@ -15,19 +15,19 @@ */ #include "pipeline/jit/static_analysis/auto_monad.h" -#include -#include #include -#include #include #include #include +#include #include #include "pipeline/jit/parse/resolve.h" #include "frontend/operator/ops.h" #include "frontend/operator/composite/multitype_funcgraph.h" #include "utils/flags.h" #include "utils/utils.h" +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "utils/ordered_map.h" #include "utils/ordered_set.h" #include "base/core_ops.h" @@ -207,9 +207,9 @@ prim::MultitypeFuncGraphPtr GetFuncMultitypeFuncGraph(const CNodePtr &cnode) { // -------------------------------------------------------------------- // SCC (Strongly Connected Components) related types. // -------------------------------------------------------------------- -using SccVector = std::set; +using SccVector = mindspore::HashSet; using SccPtr = std::shared_ptr; -using SccMap = std::unordered_map; +using SccMap = mindspore::HashMap; // --------------------------------------------------------------------- // SccFinder find SCCs using Tarjan's algorithm. @@ -219,7 +219,7 @@ class SccFinder { explicit SccFinder(const FuncGraphPtr &root) : root_(root) {} ~SccFinder() = default; void Run() { (void)Search(root_); } - const SccMap &scc_map() const { return scc_map_; } + SccMap scc_map() { return std::move(scc_map_); } private: // Save state of a func graph. @@ -232,14 +232,14 @@ class SccFinder { }; // Search SCCs from the given graph. - const State &Search(FuncGraphPtr graph) { + State &Search(FuncGraphPtr graph) { // Create graph state, set it as visited. MS_EXCEPTION_IF_NULL(graph); - auto [inserted, ok] = visited_.emplace(graph, State(index_++)); + auto [inserted, ok] = visited_.emplace(graph, std::make_unique(index_++)); if (!ok) { MS_LOG(EXCEPTION) << "Already visited: " << graph->ToString(); } - auto &state = inserted->second; + auto &state = *(inserted->second); // Push visited graph to stack. stack_.push(graph); state.in_stack = true; @@ -251,9 +251,9 @@ class SccFinder { // Successor graph has not yet been visited, recurse on it. auto &sg_state = Search(sg); state.lowlink = std::min(state.lowlink, sg_state.lowlink); - } else if (iter->second.in_stack) { + } else if (iter->second->in_stack) { // Successor graph is in stack and hence in the current SCC. - state.lowlink = std::min(state.lowlink, iter->second.index); + state.lowlink = std::min(state.lowlink, iter->second->index); } } // If index == lowlink, this means it is the root of SCC. @@ -267,7 +267,7 @@ class SccFinder { if (found == visited_.end()) { MS_LOG(EXCEPTION) << "Unexpected graph: " << g->ToString(); } - found->second.in_stack = false; + found->second->in_stack = false; // Add graph to SCC, and create the map from graph to SCC. scc->insert(g); scc_map_.emplace(g, scc); @@ -290,7 +290,7 @@ class SccFinder { size_t index_ = 1; // Visited graphs and their states. - std::unordered_map visited_; + mindspore::HashMap> visited_; // The stack for Tarjan algorithm. std::stack stack_; @@ -957,7 +957,7 @@ class SideEffectFinder { } // Gets SCC that the given graph belongs to. - const SccPtr &GetScc(const FuncGraphPtr &func_graph) const { + SccPtr GetScc(const FuncGraphPtr &func_graph) const { auto found = scc_map_.find(func_graph); if (found == scc_map_.end()) { MS_LOG(EXCEPTION) << "SCC not found for " << (func_graph ? func_graph->ToString() : "FG(null)"); @@ -983,7 +983,7 @@ class SideEffectFinder { return effect_info; } // Get SCC that this graph belongs to. - auto &scc = GetScc(func_graph); + auto scc = GetScc(func_graph); MS_EXCEPTION_IF_NULL(scc); // To prevent SCC members be visited again, we set effect info // to 'kDetecting' state before start to check cnodes. @@ -1118,14 +1118,14 @@ class SideEffectFinder { // Map graph to its caller cnodes, so that we can add monad inputs to the // caller cnode when we late found that the graph added monad parameters. - std::map> graph_callers_; + mindspore::HashMap> graph_callers_; // Current high order func caller cnode. CNodePtr caller_ = nullptr; // Save switch caller cnodes and their branches, so that we can check and // update monad parameters for branches according the caller inputs. - std::map switch_calls_; + mindspore::HashMap switch_calls_; // switch_layer_calls save all switch_layer calls, so that // we can check whether monad argument should be added for them. diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc b/mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc index dab4d7496f0..852e3faa6e7 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc @@ -18,8 +18,8 @@ #include #include -#include +#include "utils/hash_set.h" #include "ir/func_graph_cloner.h" #include "abstract/utils.h" #include "debug/trace.h" diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.h b/mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.h index 602d012c93d..a5585000258 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.h +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.h @@ -21,10 +21,11 @@ #include #include -#include #include #include +#include +#include "utils/hash_map.h" #include "pipeline/jit/static_analysis/static_analysis.h" #include "pipeline/jit/static_analysis/async_eval_result.h" #include "utils/ms_context.h" diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/order_enforce.cc b/mindspore/ccsrc/pipeline/jit/static_analysis/order_enforce.cc index 3e20cacc066..6552f4cc372 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/order_enforce.cc +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/order_enforce.cc @@ -19,9 +19,9 @@ #include #include #include -#include -#include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "base/core_ops.h" namespace mindspore::pipeline { @@ -203,7 +203,7 @@ class OrderEnforcer { for (auto &load : loads) { // Find user nodes of the Load. auto load_users = FindLoadUsers(load); - std::unordered_set real_users; + mindspore::HashSet real_users; for (auto &load_user : load_users) { // Check the special operator, only one level of user is considered for now. if (IsSpecialPrimitive(load_user)) { @@ -240,7 +240,7 @@ class OrderEnforcer { } // Add load users as input edges of the update_state node. - void AddInputEdges(const CNodePtr &update_state, const std::unordered_set &load_users) { + void AddInputEdges(const CNodePtr &update_state, const mindspore::HashSet &load_users) { auto sorted_load_users = SortLoadUsers(load_users); for (auto &load_user : sorted_load_users) { if (IsPrimitiveCNode(load_user, prim::kPrimMakeTuple) || IsPrimitiveCNode(load_user, prim::kPrimUpdateState)) { @@ -256,7 +256,7 @@ class OrderEnforcer { } // Sort load users by their topo sort order. - std::vector SortLoadUsers(const std::unordered_set &load_users) { + std::vector SortLoadUsers(const mindspore::HashSet &load_users) { std::vector vec{load_users.begin(), load_users.end()}; std::sort(vec.begin(), vec.end(), [this](const AnfNodePtr &a, const AnfNodePtr &b) { return IsBefore(a, b); }); return vec; @@ -309,13 +309,13 @@ class OrderEnforcer { using PredFunc = std::function; // Find user nodes for the given node. - std::unordered_set FindNodeUsers(const AnfNodePtr &node, PredFunc pred = nullptr) { + mindspore::HashSet FindNodeUsers(const AnfNodePtr &node, PredFunc pred = nullptr) { auto &node_users = manager_->node_users(); auto iter = node_users.find(node); if (iter == node_users.end()) { return {}; } - std::unordered_set users; + mindspore::HashSet users; for (auto &user : iter->second) { auto &user_node = user.first; if (pred == nullptr || pred(user_node)) { @@ -326,7 +326,7 @@ class OrderEnforcer { } // Find Load or parameter users as the candidate nodes to enforce order of execution. - std::unordered_set FindLoadUsers(const AnfNodePtr &load_or_param) { + mindspore::HashSet FindLoadUsers(const AnfNodePtr &load_or_param) { return FindNodeUsers(load_or_param, [this](const AnfNodePtr &user_node) { // Skip processed nodes. return processed_nodes_.find(user_node) == processed_nodes_.end(); @@ -334,7 +334,7 @@ class OrderEnforcer { } // Find Load nodes for a parameter. - std::unordered_set FindLoadNodes(const AnfNodePtr ¶m) { + mindspore::HashSet FindLoadNodes(const AnfNodePtr ¶m) { return FindNodeUsers(param, [this](const AnfNodePtr &user_node) { // Search for Load nodes only. return IsPrimitiveCNode(user_node, prim::kPrimLoad); @@ -343,8 +343,8 @@ class OrderEnforcer { const FuncGraphPtr &func_graph_; FuncGraphManagerPtr manager_; - std::unordered_map topo_sort_map_; - std::unordered_set processed_nodes_; + mindspore::HashMap topo_sort_map_; + mindspore::HashSet processed_nodes_; }; } // namespace diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/prim.cc b/mindspore/ccsrc/pipeline/jit/static_analysis/prim.cc index 8fda744c46e..1c252c02d2d 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/prim.cc +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/prim.cc @@ -23,8 +23,8 @@ #include #include #include -#include +#include "utils/hash_set.h" #include "frontend/operator/cc_implementations.h" #include "frontend/operator/ops.h" #include "frontend/operator/composite/do_signature.h" @@ -48,7 +48,7 @@ namespace mindspore { namespace abstract { using mindspore::parse::PyObjectWrapper; -std::unordered_set prims_to_skip_undetermined_infer{ +mindspore::HashSet prims_to_skip_undetermined_infer{ "MakeTuple", "make_list", "Switch", "env_setitem", "env_getitem", "Load", "UpdateState"}; EvalResultPtr DoSignatureEvaluator::Run(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list, @@ -1455,29 +1455,30 @@ struct PrimitiveImplInferValue { bool in_white_list_; // true if this Primitive in white list, else false. }; -using PrimitiveToImplMap = std::unordered_map; +using PrimitiveToImplMap = mindspore::HashMap; PrimitiveToImplMap &GetUniformPrimitiveToImplMap() { - static PrimitiveToImplMap uniform_prim_implement_map = { - {prim::kPrimScalarAdd, {prim::ScalarAdd, true, nullptr, true}}, - {prim::kPrimScalarSub, {prim::ScalarSub, true, nullptr, true}}, - {prim::kPrimScalarMul, {prim::ScalarMul, true, nullptr, true}}, - {prim::kPrimScalarDiv, {prim::ScalarDiv, true, nullptr, true}}, - {prim::kPrimScalarMod, {prim::ScalarMod, true, nullptr, true}}, - {prim::kPrimScalarPow, {prim::ScalarPow, true, nullptr, true}}, - {prim::kPrimScalarFloordiv, {prim::ScalarFloordiv, true, nullptr, true}}, - {prim::kPrimScalarUadd, {prim::ScalarUAdd, true, nullptr, true}}, - {prim::kPrimScalarUsub, {prim::ScalarUSub, true, nullptr, true}}, - {prim::kPrimScalarLog, {prim::ScalarLog, true, nullptr, true}}, - {prim::kPrimScalarEq, {prim::ScalarEq, true, std::make_shared(), true}}, - {prim::kPrimScalarLt, {prim::ScalarLt, true, std::make_shared(), true}}, - {prim::kPrimScalarGt, {prim::ScalarGt, true, std::make_shared(), true}}, - {prim::kPrimScalarNe, {prim::ScalarNe, true, std::make_shared(), true}}, - {prim::kPrimScalarLe, {prim::ScalarLe, true, std::make_shared(), true}}, - {prim::kPrimScalarGe, {prim::ScalarGe, true, std::make_shared(), true}}, - {prim::kPrimBoolNot, {prim::BoolNot, true, std::make_shared(), true}}, - {prim::kPrimBoolAnd, {prim::BoolAnd, true, std::make_shared(), true}}, - {prim::kPrimBoolEq, {prim::BoolEq, true, std::make_shared(), true}}, - {prim::kPrimBoolOr, {prim::BoolOr, true, std::make_shared(), true}}, + using R = PrimitiveToImplMap::mapped_type; + static PrimitiveToImplMap uniform_prim_implement_map{ + {prim::kPrimScalarAdd, R{prim::ScalarAdd, true, nullptr, true}}, + {prim::kPrimScalarSub, R{prim::ScalarSub, true, nullptr, true}}, + {prim::kPrimScalarMul, R{prim::ScalarMul, true, nullptr, true}}, + {prim::kPrimScalarDiv, R{prim::ScalarDiv, true, nullptr, true}}, + {prim::kPrimScalarMod, R{prim::ScalarMod, true, nullptr, true}}, + {prim::kPrimScalarPow, R{prim::ScalarPow, true, nullptr, true}}, + {prim::kPrimScalarFloordiv, R{prim::ScalarFloordiv, true, nullptr, true}}, + {prim::kPrimScalarUadd, R{prim::ScalarUAdd, true, nullptr, true}}, + {prim::kPrimScalarUsub, R{prim::ScalarUSub, true, nullptr, true}}, + {prim::kPrimScalarLog, R{prim::ScalarLog, true, nullptr, true}}, + {prim::kPrimScalarEq, R{prim::ScalarEq, true, std::make_shared(), true}}, + {prim::kPrimScalarLt, R{prim::ScalarLt, true, std::make_shared(), true}}, + {prim::kPrimScalarGt, R{prim::ScalarGt, true, std::make_shared(), true}}, + {prim::kPrimScalarNe, R{prim::ScalarNe, true, std::make_shared(), true}}, + {prim::kPrimScalarLe, R{prim::ScalarLe, true, std::make_shared(), true}}, + {prim::kPrimScalarGe, R{prim::ScalarGe, true, std::make_shared(), true}}, + {prim::kPrimBoolNot, R{prim::BoolNot, true, std::make_shared(), true}}, + {prim::kPrimBoolAnd, R{prim::BoolAnd, true, std::make_shared(), true}}, + {prim::kPrimBoolEq, R{prim::BoolEq, true, std::make_shared(), true}}, + {prim::kPrimBoolOr, R{prim::BoolOr, true, std::make_shared(), true}}, }; return uniform_prim_implement_map; } diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/prim.h b/mindspore/ccsrc/pipeline/jit/static_analysis/prim.h index 1d9738a4a4b..2ac1977e55a 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/prim.h +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/prim.h @@ -22,9 +22,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "pipeline/jit/static_analysis/evaluator.h" #include "abstract/primitive_infer_map.h" @@ -164,7 +164,7 @@ class UniformPrimEvaluator final : public TrivialPrimEvaluator { const std::size_t nargs_; const TypePtr return_value_type_; const TypePtr specify_out_type_; - std::unordered_map>, TypeHasher, TypeEqual> type_map_; + mindspore::HashMap>, TypeHasher, TypeEqual> type_map_; }; PrimEvaluatorMap &GetPrimEvaluatorConstructors(); diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/program_specialize.cc b/mindspore/ccsrc/pipeline/jit/static_analysis/program_specialize.cc index c62fa3d8b3d..f0667b628d1 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/program_specialize.cc +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/program_specialize.cc @@ -20,6 +20,7 @@ #include #include +#include #include "frontend/operator/ops.h" #include "frontend/operator/composite/do_signature.h" #include "abstract/abstract_function.h" diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/program_specialize.h b/mindspore/ccsrc/pipeline/jit/static_analysis/program_specialize.h index f67fc647e38..6a1e552301b 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/program_specialize.h +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/program_specialize.h @@ -22,11 +22,12 @@ #include #include #include -#include -#include #include #include +#include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/anf.h" #include "ir/func_graph_cloner.h" #include "pipeline/jit/static_analysis/evaluator.h" @@ -52,7 +53,7 @@ class ProgramSpecializer { ~ProgramSpecializer() = default; // Run the program specializer on the topmost graph in the given context. FuncGraphPtr Run(const FuncGraphPtr &fg, const AnalysisContextPtr &context); - const std::unordered_set &seen() const { return seen_; } + const mindspore::HashSet &seen() const { return seen_; } void AddSeen(const AnfNodePtr &node) { (void)seen_.insert(node); } std::shared_ptr GetFuncGraphSpecializer(const AnalysisContextPtr &context); @@ -65,7 +66,7 @@ class ProgramSpecializer { private: std::shared_ptr engine_; - std::unordered_set seen_; + mindspore::HashSet seen_; FuncGraphManagerPtr mng_; std::unordered_map, ContextHasher, ContextEqual> specializations_; @@ -90,8 +91,8 @@ class FuncGraphSpecializer : public std::enable_shared_from_this engine_; ClonerPtr cloner_; std::vector todo_; - std::unordered_set marked_; - std::unordered_map evalcaches_; + mindspore::HashSet marked_; + mindspore::HashMap evalcaches_; void FirstPass(); void SecondPass(); diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/static_analysis.cc b/mindspore/ccsrc/pipeline/jit/static_analysis/static_analysis.cc index 308cc05c2ba..23f1af1459a 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/static_analysis.cc +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/static_analysis.cc @@ -98,26 +98,6 @@ AbstractBasePtr IntermediateJoin(const AbstractBasePtr &arg1, const AbstractBase return nullptr; } -std::size_t AnfNodeConfigHasher::operator()(const AnfNodeConfigPtr conf) const { - MS_EXCEPTION_IF_NULL(conf); - MS_EXCEPTION_IF_NULL(conf->node()); - std::size_t hash_value = conf->node()->hash(); - if (!conf->context()->IsDummyContext()) { - hash_value = hash_combine(hash_value, std::hash{}(conf->context().get())); - } - return hash_value; -} - -bool AnfNodeConfigEqual::operator()(const AnfNodeConfigPtr lhs, const AnfNodeConfigPtr rhs) const { - if (lhs == nullptr || rhs == nullptr) { - return false; - } - if (lhs == rhs) { - return true; - } - return (*lhs == *rhs); -} - AnalysisResult AnalysisEngine::Run(const FuncGraphPtr &func_graph, const AbstractBasePtrList &args_spec_list) { StaticAnalysisException::Instance().ClearException(); AnalysisResult result; @@ -652,7 +632,7 @@ EvaluatorPtr AnalysisEngine::HandleNestedRecursion(const std::vector undetermined_evals; + mindspore::HashSet undetermined_evals; for (auto r_it = eval_trace_.rbegin(); r_it != latest_entry_iter; r_it++) { undetermined_evals.insert(*r_it); } diff --git a/mindspore/ccsrc/pipeline/jit/static_analysis/static_analysis.h b/mindspore/ccsrc/pipeline/jit/static_analysis/static_analysis.h index 10177265f97..a6915cbb20a 100644 --- a/mindspore/ccsrc/pipeline/jit/static_analysis/static_analysis.h +++ b/mindspore/ccsrc/pipeline/jit/static_analysis/static_analysis.h @@ -22,18 +22,19 @@ #include #include #include -#include #include #include #include #include -#include #include +#include +#include #ifdef DEBUG #include #endif +#include "utils/hash_map.h" #include "utils/log_adapter.h" #include "ir/anf.h" #include "pybind_api/ir/primitive_py.h" @@ -56,7 +57,7 @@ size_t StackFrameDepth(); size_t StackFrameMaxDepth(); // define attribute value map -using AttrValueMap = std::unordered_map; +using AttrValueMap = mindspore::HashMap; using AttrValueMapPtr = std::shared_ptr; // the class to save evaluated result: abstract value and modified attribute @@ -124,15 +125,22 @@ class AnfNodeConfig : public Config { EvalResultPtr ObtainEvalResult() override; - AnalysisContextPtr context() const { return context_; } + const AnalysisContextPtr &context() const { return context_; } - AnfNodePtr node() const { return node_; } + const AnfNodePtr &node() const { return node_; } - FuncGraphPtr func_graph() const { return func_graph_; } + const FuncGraphPtr &func_graph() const { return func_graph_; } AnalysisEnginePtr engine() const { return engine_.lock(); } - // used by unordered_map; + size_t hash() const { + std::size_t node_hash = PointerHash{}(node_); + if (context_->IsDummyContext()) { + return node_hash; + } + return hash_combine(node_hash, PointerHash{}(context_)); + } + bool operator==(const AnfNodeConfig &other) const { // compare node with pointer, context with pointer except DummyContext as it's created by make_shared; // context should not be nullptr; @@ -140,7 +148,7 @@ class AnfNodeConfig : public Config { return false; } if (context_->IsDummyContext() && other.context_->IsDummyContext()) { - return true; + return node_ == other.node_; } // Don't check `func_graph_` equality. return context_ == other.context_; @@ -168,11 +176,22 @@ class AnfNodeConfig : public Config { using AnfNodeConfigPtr = std::shared_ptr; struct AnfNodeConfigHasher { - std::size_t operator()(const AnfNodeConfigPtr conf) const; + std::size_t operator()(const AnfNodeConfigPtr &conf) const { + MS_EXCEPTION_IF_NULL(conf); + return conf->hash(); + } }; struct AnfNodeConfigEqual { - bool operator()(const AnfNodeConfigPtr lhs, const AnfNodeConfigPtr rhs) const; + bool operator()(const AnfNodeConfigPtr &lhs, const AnfNodeConfigPtr &rhs) const { + if (lhs == nullptr || rhs == nullptr) { + return false; + } + if (lhs == rhs) { + return true; + } + return (*lhs == *rhs); + } }; class VirtualConfig : public Config { @@ -189,7 +208,7 @@ class VirtualConfig : public Config { AbstractBasePtr abstract_; }; -using PrimEvaluatorMap = std::unordered_map; +using PrimEvaluatorMap = mindspore::HashMap; using AnfNodeConfigMap = std::unordered_map; @@ -283,7 +302,7 @@ class AnalysisEngine : public std::enable_shared_from_this { AnalysisContextPtr root_context() const { return root_context_; } void set_root_context(const AnalysisContextPtr &context) { root_context_ = context; } - std::unordered_map prim_py_evaluators_; + mindspore::HashMap prim_py_evaluators_; bool enable_recursive_eval() const { return enable_recursive_eval_; } static EvalResultPtr ProcessEvalResults(const AbstractBasePtrList &out_specs, const AnfNodePtr &node); diff --git a/mindspore/ccsrc/pipeline/jit/validator.h b/mindspore/ccsrc/pipeline/jit/validator.h index 1ccc58c8fdd..aba85f27f6c 100644 --- a/mindspore/ccsrc/pipeline/jit/validator.h +++ b/mindspore/ccsrc/pipeline/jit/validator.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ #include #include #include -#include +#include "utils/hash_set.h" #include "frontend/operator/ops.h" #include "ir/anf.h" #include "utils/misc.h" diff --git a/mindspore/ccsrc/pipeline/pynative/base.h b/mindspore/ccsrc/pipeline/pynative/base.h index 98ae218f471..8f9b5e6ff1b 100644 --- a/mindspore/ccsrc/pipeline/pynative/base.h +++ b/mindspore/ccsrc/pipeline/pynative/base.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,9 +22,9 @@ #include #include #include -#include -#include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "pybind11/pybind11.h" #include "ir/anf.h" #include "pybind_api/ir/primitive_py.h" diff --git a/mindspore/ccsrc/pipeline/pynative/pynative_cache.h b/mindspore/ccsrc/pipeline/pynative/pynative_cache.h index ef6aabe1e7a..1a19612b3ec 100644 --- a/mindspore/ccsrc/pipeline/pynative/pynative_cache.h +++ b/mindspore/ccsrc/pipeline/pynative/pynative_cache.h @@ -20,13 +20,14 @@ #include #include #include +#include "utils/hash_map.h" #include "ir/anf.h" namespace mindspore::pynative { struct AbsCacheKey { std::string prim_name_; size_t prim_hash_value_; - std::unordered_map prim_attrs_; + mindspore::HashMap prim_attrs_; }; struct AbsCacheKeyHasher { @@ -42,19 +43,18 @@ struct AbsCacheKeyEqual { return false; } - auto all = std::all_of(lk.prim_attrs_.begin(), lk.prim_attrs_.end(), - [&rk](const std::pair &item) -> bool { - auto iter = rk.prim_attrs_.find(item.first); - if (iter == rk.prim_attrs_.end()) { - return false; - } - if (item.second == iter->second) { - return true; - } - MS_EXCEPTION_IF_NULL(item.second); - MS_EXCEPTION_IF_NULL(iter->second); - return *item.second == *iter->second; - }); + auto all = std::all_of(lk.prim_attrs_.begin(), lk.prim_attrs_.end(), [&rk](const auto &item) -> bool { + auto iter = rk.prim_attrs_.find(item.first); + if (iter == rk.prim_attrs_.end()) { + return false; + } + if (item.second == iter->second) { + return true; + } + MS_EXCEPTION_IF_NULL(item.second); + MS_EXCEPTION_IF_NULL(iter->second); + return *item.second == *iter->second; + }); return all; } }; @@ -62,7 +62,7 @@ struct AbsCacheKeyEqual { struct PrimAbsInfo { abstract::AbstractBasePtr abs; bool is_dynamic_shape = false; - std::unordered_map attrs; + mindspore::HashMap attrs; }; using AbstractListMap = std::unordered_map; @@ -81,8 +81,8 @@ using PyObjectIdCache = std::unordered_map dtypes; - std::unordered_map> type_indexes; + mindspore::HashMap> type_indexes; }; -using ImplicitCastCache = std::unordered_map; +using ImplicitCastCache = mindspore::HashMap; } // namespace mindspore::pynative #endif // MINDSPORE_CCSRC_PIPELINE_PYNATIVE_PYNATIVE_ABS_CACHE_H diff --git a/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc b/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc index b1f11f1fe13..76a32c32bdc 100644 --- a/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc +++ b/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,10 @@ #include #include #include -#include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "debug/trace.h" #include "debug/anf_ir_dump.h" #include "pybind_api/api_register.h" @@ -178,11 +179,11 @@ std::string GetId(const py::handle &obj) { } if (py::isinstance(obj) || py::isinstance(obj)) { - const auto &it = g_pyobj_id_cache.find(obj); + auto it = g_pyobj_id_cache.find(obj); if (it == g_pyobj_id_cache.end()) { - auto &&id = GetPyObjId(obj); - g_pyobj_id_cache[obj] = id; - return std::move(id); + auto id = GetPyObjId(obj); + g_pyobj_id_cache.emplace(obj, id); + return id; } else { return it->second; } @@ -192,7 +193,7 @@ std::string GetId(const py::handle &obj) { } void GetTypeIndex(const std::vector &dtypes, - std::unordered_map> *type_indexes) { + mindspore::HashMap> *type_indexes) { MS_EXCEPTION_IF_NULL(type_indexes); for (size_t i = 0; i < dtypes.size(); ++i) { auto it = type_indexes->find(dtypes[i]); @@ -224,8 +225,8 @@ TypeId JudgeMaxType(TypeId max_type, bool has_scalar_float32, bool has_scalar_in } void GetDstType(const py::tuple &py_args, - const std::unordered_map> &type_indexes, - std::unordered_map *dst_type) { + const mindspore::HashMap> &type_indexes, + mindspore::HashMap *dst_type) { for (auto it = type_indexes.begin(); it != type_indexes.end(); (void)++it) { const auto &type = it->first; const auto &indexes = it->second; @@ -393,7 +394,7 @@ py::list FilterTensorArgs(const py::args &args, bool has_sens = false) { } bool RunOpConvertConstInputToAttr(const py::object &input_object, size_t input_index, const PrimitivePtr &op_prim, - const std::unordered_set &input_attrs) { + const mindspore::HashSet &input_attrs) { MS_EXCEPTION_IF_NULL(op_prim); const auto &input_names_value = op_prim->GetAttr(kAttrInputNames); if (input_names_value == nullptr) { @@ -512,7 +513,7 @@ void ConstructInputTensor(const OpExecInfoPtr &op_run_info, std::vector bool reg_exist = false; if (op_run_info->op_name == prim::kPrimCustom->name()) { // Custom op needs to set reg dynamically - std::unordered_set attr_indexes; + mindspore::HashSet attr_indexes; opt::GetCustomOpAttrIndex(op_prim, &attr_indexes); if (!attr_indexes.empty()) { reg_exist = true; @@ -1238,7 +1239,7 @@ py::object ForwardExecutor::DoParamMixPrecisionCastTuple(bool *is_cast, const py } void ForwardExecutor::DoSignatureCast(const PrimitivePyPtr &prim, - const std::unordered_map &dst_type, + const mindspore::HashMap &dst_type, const std::vector &dtypes, const OpExecInfoPtr &op_exec_info) { MS_EXCEPTION_IF_NULL(prim); @@ -1353,10 +1354,10 @@ void ForwardExecutor::SetImplicitCast(const OpExecInfoPtr &op_exec_info) { << "signature size " << sig_size; } std::vector dtypes; - std::unordered_map> type_indexes; + mindspore::HashMap> type_indexes; bool has_dtype_sig = GetSignatureType(op_exec_info->py_primitive, &dtypes); if (has_dtype_sig) { - std::unordered_map dst_type; + mindspore::HashMap dst_type; GetTypeIndex(dtypes, &type_indexes); GetDstType(op_exec_info->op_inputs, type_indexes, &dst_type); DoSignatureCast(op_exec_info->py_primitive, dst_type, dtypes, op_exec_info); @@ -1369,7 +1370,7 @@ void ForwardExecutor::SetImplicitCast(const OpExecInfoPtr &op_exec_info) { return; } MS_LOG(DEBUG) << "Do signature for " << op_exec_info->op_name << " with cache"; - std::unordered_map dst_type; + mindspore::HashMap dst_type; GetDstType(op_exec_info->op_inputs, it->second.type_indexes, &dst_type); DoSignatureCast(op_exec_info->py_primitive, dst_type, it->second.dtypes, op_exec_info); } @@ -1834,7 +1835,7 @@ void GradExecutor::UpdateForwardTensorInfoInBpropGraph(const OpExecInfoPtr &op_e void GradExecutor::SaveForwardTensorInfoInBpropGraph(const pipeline::ResourcePtr &resource) const { MS_EXCEPTION_IF_NULL(resource); // Get all tensors id of forward op - std::unordered_set forward_op_tensor_id; + mindspore::HashSet forward_op_tensor_id; const auto &op_info_with_tensor_id = top_cell()->op_info_with_tensor_id(); for (const auto &record : op_info_with_tensor_id) { std::for_each(record.second.begin(), record.second.end(), @@ -2939,8 +2940,8 @@ void GradExecutor::DoParameterReplace(const FuncGraphPtr &first_grad_fg, const p auto second_graph_info = top_cell()->graph_info_map().at(second_df_builder); MS_EXCEPTION_IF_NULL(second_graph_info); - std::unordered_set params_weights_set; - std::unordered_set params_inputs_set; + mindspore::HashSet params_weights_set; + mindspore::HashSet params_inputs_set; for (const auto &sec : second_graph_info->params) { if (sec.second->has_default()) { params_weights_set.emplace(sec.first); diff --git a/mindspore/ccsrc/pipeline/pynative/pynative_execute.h b/mindspore/ccsrc/pipeline/pynative/pynative_execute.h index 8747abfbc0e..96937c48c1c 100644 --- a/mindspore/ccsrc/pipeline/pynative/pynative_execute.h +++ b/mindspore/ccsrc/pipeline/pynative/pynative_execute.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,13 +21,13 @@ #include #include #include -#include -#include #include #include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "pybind11/pybind11.h" #include "pybind11/numpy.h" #include "pybind_api/ir/base_ref_py.h" @@ -41,9 +41,9 @@ namespace mindspore::pynative { namespace py = pybind11; -using OpInfoWithTensorId = std::unordered_map>; -using TensorIdWithTensorObject = std::unordered_map>; -using OpInfoWithMsFuncForwardTensors = std::unordered_map>; +using OpInfoWithTensorId = mindspore::HashMap>; +using TensorIdWithTensorObject = mindspore::HashMap>; +using OpInfoWithMsFuncForwardTensors = mindspore::HashMap>; py::object RealRunOp(const py::args &args); @@ -51,7 +51,7 @@ struct GraphInfo { std::string cell_id; AnfNodePtr output; OrderedMap params; // hold input parameters and cell weights - std::unordered_map>> node_map; + mindspore::HashMap>> node_map; GraphInfo() = default; explicit GraphInfo(std::string id) : cell_id(std::move((id))) {} }; @@ -99,7 +99,7 @@ class TopCellInfo { std::string &all_op_info() { return all_op_info_; } const std::string &grad_operation() const { return grad_operation_; } void set_grad_operation(const std::string &grad_operation) { grad_operation_ = grad_operation; } - std::unordered_set &sub_cell_list() { return sub_cell_list_; } + mindspore::HashSet &sub_cell_list() { return sub_cell_list_; } bool IsSubCell(const std::string &cell_id) const; OrderedMap &graph_info_map() { return graph_info_map_; } OpInfoWithTensorId &op_info_with_tensor_id() { return op_info_with_tensor_id_; } @@ -138,7 +138,7 @@ class TopCellInfo { std::string all_op_info_; std::string grad_operation_; OrderedMap graph_info_map_; - std::unordered_set sub_cell_list_; + mindspore::HashSet sub_cell_list_; OpInfoWithTensorId op_info_with_tensor_id_; TensorIdWithTensorObject tensor_id_with_tensor_object_; OpInfoWithMsFuncForwardTensors op_info_with_ms_func_forward_tensors_; @@ -310,7 +310,7 @@ class GradExecutor { // Use vector for keep order std::vector top_cell_list_; // Record all top cell which has been ran - std::unordered_map already_run_top_cell_; + mindspore::HashMap already_run_top_cell_; // Use vector for keep order ForwardExecutorWeakPtr forward_executor_; }; @@ -327,7 +327,7 @@ class ForwardExecutor { void RunOpInner(py::object *ret, const OpExecInfoPtr &op_exec_info); OpExecInfoPtr GenerateOpExecInfo(const py::args &args); void set_grad_executor(const GradExecutorPtr &grad_executor) { grad_executor_ = GradExecutorWeakPtr(grad_executor); } - std::unordered_map &node_abs_map() { return node_abs_map_; } + mindspore::HashMap &node_abs_map() { return node_abs_map_; } void ClearRes(); CNodePtr ConstructForwardGraph(const OpExecInfoPtr &op_exec_info); void set_lazy_build(bool lazy_build) { lazy_build_ = lazy_build; } @@ -356,14 +356,14 @@ class ForwardExecutor { size_t index); py::object DoAutoCastTuple(const py::tuple &tuple, const TypeId &type_id, const std::string &op_name, size_t index); py::object DoAutoCast(const py::object &arg, const TypeId &type_id, const std::string &op_name, size_t index); - void DoSignatureCast(const PrimitivePyPtr &prim, const std::unordered_map &dst_type, + void DoSignatureCast(const PrimitivePyPtr &prim, const mindspore::HashMap &dst_type, const std::vector &dtypes, const OpExecInfoPtr &op_exec_info); private: GradExecutorWeakPtr grad_executor_; PrimAbsCache prim_abs_list_; ImplicitCastCache implicit_cast_map_; - std::unordered_map node_abs_map_; + mindspore::HashMap node_abs_map_; bool lazy_build_{false}; }; diff --git a/mindspore/ccsrc/pipeline/pynative/pynative_execute_ge.cc b/mindspore/ccsrc/pipeline/pynative/pynative_execute_ge.cc index 68eeef62430..e6c3f375fba 100644 --- a/mindspore/ccsrc/pipeline/pynative/pynative_execute_ge.cc +++ b/mindspore/ccsrc/pipeline/pynative/pynative_execute_ge.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ #include #include #include -#include +#include "utils/hash_set.h" #include "utils/any.h" #include "utils/utils.h" #include "utils/ms_context.h" @@ -119,7 +119,7 @@ bool SetInputsForSingleOpGraph(const OpExecInfoPtr &op_exec_info, const std::vec } bool BuildSingleOpGraph(const OpExecInfoPtr &op_exec_info, const std::vector &inputs, - const std::unordered_map &attrs, const GeGraphPtr &graph) { + const mindspore::HashMap &attrs, const GeGraphPtr &graph) { MS_EXCEPTION_IF_NULL(op_exec_info); std::string op_name = op_exec_info->op_name; auto op_inputs = op_exec_info->op_inputs; @@ -190,7 +190,7 @@ void ToTensorPtr(const OpExecInfoPtr op_exec_info, std::vector *con PynativeStatusCode ConvertAttributes(const OpExecInfoPtr &op_exec_info, const std::vector &inputs) { MS_EXCEPTION_IF_NULL(op_exec_info); auto op_attrs = op_exec_info->op_attrs; - std::unordered_map attrs{}; + mindspore::HashMap attrs{}; for (auto &item : op_attrs) { if (!py::isinstance(item.first)) { diff --git a/mindspore/ccsrc/pipeline/pynative/pynative_execute_ge.h b/mindspore/ccsrc/pipeline/pynative/pynative_execute_ge.h index b8459db6876..1ee8da873b4 100644 --- a/mindspore/ccsrc/pipeline/pynative/pynative_execute_ge.h +++ b/mindspore/ccsrc/pipeline/pynative/pynative_execute_ge.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,8 +21,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "pipeline/pynative/base.h" #include "transform/graph_ir/convert.h" #include "transform/graph_ir/graph_runner.h" @@ -37,7 +37,7 @@ using GeGraphPtr = std::shared_ptr; namespace mindspore { namespace pynative { bool BuildSingleOpGraph(const OpExecInfoPtr &op_exec_info, const std::vector &inputs, - const std::unordered_map &attrs, const GeGraphPtr &graph); + const mindspore::HashMap &attrs, const GeGraphPtr &graph); py::object RunOpInGE(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *status); } // namespace pynative diff --git a/mindspore/ccsrc/pipeline/pynative/pynative_profiling.h b/mindspore/ccsrc/pipeline/pynative/pynative_profiling.h index c95ff5f49df..882552651e3 100644 --- a/mindspore/ccsrc/pipeline/pynative/pynative_profiling.h +++ b/mindspore/ccsrc/pipeline/pynative/pynative_profiling.h @@ -21,7 +21,6 @@ #include #include #include -#include namespace mindspore { class PynativeProfiler { diff --git a/mindspore/ccsrc/profiler/device/profiling.h b/mindspore/ccsrc/profiler/device/profiling.h index c4af655fd01..fa46b7ab9c4 100644 --- a/mindspore/ccsrc/profiler/device/profiling.h +++ b/mindspore/ccsrc/profiler/device/profiling.h @@ -25,6 +25,7 @@ #include #include #include +#include "utils/hash_map.h" namespace mindspore { namespace profiler { diff --git a/mindspore/ccsrc/ps/core/abstract_node.cc b/mindspore/ccsrc/ps/core/abstract_node.cc index 9c54c9b44a8..832b7da7357 100644 --- a/mindspore/ccsrc/ps/core/abstract_node.cc +++ b/mindspore/ccsrc/ps/core/abstract_node.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -1128,7 +1128,7 @@ void AbstractNode::OnCustomEventCallback(const uint32_t &event) { } } -bool AbstractNode::IsWorkerOrServer0(const std::unordered_map &info) { +bool AbstractNode::IsWorkerOrServer0(const mindspore::HashMap &info) { for (const auto &it : info) { if (it.second.is_alive == true && it.second.node_role_ == NodeRole::WORKER) { return true; diff --git a/mindspore/ccsrc/ps/core/abstract_node.h b/mindspore/ccsrc/ps/core/abstract_node.h index 16bc9b30500..78a04960c46 100644 --- a/mindspore/ccsrc/ps/core/abstract_node.h +++ b/mindspore/ccsrc/ps/core/abstract_node.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,8 +22,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ps/core/node.h" #include "ps/core/communicator/message.h" #include "ps/core/follower_scaler.h" @@ -208,7 +208,7 @@ class AbstractNode : public Node { // Trigger the callback corresponding to the custom event. void OnCustomEventCallback(const uint32_t &event); - bool IsWorkerOrServer0(const std::unordered_map &info); + bool IsWorkerOrServer0(const mindspore::HashMap &info); void CreateTcpServer(); @@ -229,13 +229,13 @@ class AbstractNode : public Node { std::condition_variable receive_cond_; // the key is rank_id, the value is rank_id's expected request_id - std::unordered_map expected_rank_request_ids_; + mindspore::HashMap expected_rank_request_ids_; // the key is rank_id, the value is rank_id's actual request_id - std::unordered_map actual_rank_request_ids_; + mindspore::HashMap actual_rank_request_ids_; std::mutex rank_request_ids_mutex; timeval scheduler_time_{0, 0}; - std::unordered_map handlers_; - std::unordered_map server_handler_; + mindspore::HashMap handlers_; + mindspore::HashMap server_handler_; // Workers and servers launch the server to process command: FINISH,SCALE_OUT,SCALE_IN,SEND_METADATA std::shared_ptr server_; @@ -269,10 +269,10 @@ class AbstractNode : public Node { uint16_t scheduler_port_; // Synchronize all node metadata from the scheduler. - std::unordered_map all_nodes_info_; + mindspore::HashMap all_nodes_info_; RequestHandler request_handler_; - std::unordered_map> communicators_; + mindspore::HashMap> communicators_; std::mutex communicator_mutex_; }; } // namespace core diff --git a/mindspore/ccsrc/ps/core/communicator/communicator_base.h b/mindspore/ccsrc/ps/core/communicator/communicator_base.h index c01aec8fbbc..60b80c208f9 100644 --- a/mindspore/ccsrc/ps/core/communicator/communicator_base.h +++ b/mindspore/ccsrc/ps/core/communicator/communicator_base.h @@ -19,10 +19,10 @@ #include #include -#include #include #include +#include "utils/hash_map.h" #include "ps/core/communicator/message_handler.h" #include "utils/log_adapter.h" #include "ps/core/communicator/http_message_handler.h" @@ -87,7 +87,7 @@ class CommunicatorBase { bool running() const; protected: - std::unordered_map msg_callbacks_; + mindspore::HashMap msg_callbacks_; std::thread running_thread_; bool running_; }; diff --git a/mindspore/ccsrc/ps/core/communicator/http_communicator.h b/mindspore/ccsrc/ps/core/communicator/http_communicator.h index c5385cf63ca..064f123da4c 100644 --- a/mindspore/ccsrc/ps/core/communicator/http_communicator.h +++ b/mindspore/ccsrc/ps/core/communicator/http_communicator.h @@ -19,7 +19,7 @@ #include #include -#include +#include "utils/hash_map.h" #include "ps/core/communicator/http_server.h" #include "ps/core/communicator/http_message_handler.h" #include "ps/core/communicator/task_executor.h" @@ -46,7 +46,7 @@ class HttpCommunicator : public CommunicatorBase { private: std::shared_ptr task_executor_; std::shared_ptr http_server_; - std::unordered_map http_msg_callbacks_; + mindspore::HashMap http_msg_callbacks_; std::string ip_; uint16_t port_; diff --git a/mindspore/ccsrc/ps/core/communicator/http_request_handler.cc b/mindspore/ccsrc/ps/core/communicator/http_request_handler.cc index 191907b88d5..3bb2ef2a08e 100644 --- a/mindspore/ccsrc/ps/core/communicator/http_request_handler.cc +++ b/mindspore/ccsrc/ps/core/communicator/http_request_handler.cc @@ -26,7 +26,7 @@ HttpRequestHandler::~HttpRequestHandler() { } } -bool HttpRequestHandler::Initialize(int fd, const std::unordered_map &handlers) { +bool HttpRequestHandler::Initialize(int fd, const mindspore::HashMap &handlers) { evbase_ = event_base_new(); MS_EXCEPTION_IF_NULL(evbase_); struct evhttp *http = evhttp_new(evbase_); diff --git a/mindspore/ccsrc/ps/core/communicator/http_request_handler.h b/mindspore/ccsrc/ps/core/communicator/http_request_handler.h index 43048525fd9..f905e0e62f4 100644 --- a/mindspore/ccsrc/ps/core/communicator/http_request_handler.h +++ b/mindspore/ccsrc/ps/core/communicator/http_request_handler.h @@ -25,8 +25,8 @@ #include #include -#include +#include "utils/hash_map.h" #include "utils/log_adapter.h" #include "ps/core/communicator/http_message_handler.h" #include "ps/core/communicator/ssl_http.h" @@ -46,7 +46,7 @@ class HttpRequestHandler { HttpRequestHandler() : evbase_(nullptr) {} virtual ~HttpRequestHandler(); - bool Initialize(int fd, const std::unordered_map &handlers); + bool Initialize(int fd, const mindspore::HashMap &handlers); void Run(); bool Stop(); static bufferevent *BuffereventCallback(event_base *base, void *arg); diff --git a/mindspore/ccsrc/ps/core/communicator/http_server.h b/mindspore/ccsrc/ps/core/communicator/http_server.h index 096dc83adca..e161ac4e6eb 100644 --- a/mindspore/ccsrc/ps/core/communicator/http_server.h +++ b/mindspore/ccsrc/ps/core/communicator/http_server.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,8 +17,6 @@ #ifndef MINDSPORE_CCSRC_PS_CORE_COMMUNICATOR_HTTP_SERVER_H_ #define MINDSPORE_CCSRC_PS_CORE_COMMUNICATOR_HTTP_SERVER_H_ -#include "ps/core/communicator/http_message_handler.h" - #include #include #include @@ -36,9 +34,10 @@ #include #include #include -#include #include +#include "utils/hash_map.h" +#include "ps/core/communicator/http_message_handler.h" #include "ps/core/communicator/http_request_handler.h" namespace mindspore { @@ -78,7 +77,7 @@ class HttpServer { std::vector> worker_threads_; std::vector> http_request_handlers; int32_t backlog_; - std::unordered_map request_handlers_; + mindspore::HashMap request_handlers_; int fd_; }; } // namespace core diff --git a/mindspore/ccsrc/ps/core/communicator/tcp_communicator.h b/mindspore/ccsrc/ps/core/communicator/tcp_communicator.h index 61bacc42ab4..f81c57d37b0 100644 --- a/mindspore/ccsrc/ps/core/communicator/tcp_communicator.h +++ b/mindspore/ccsrc/ps/core/communicator/tcp_communicator.h @@ -21,7 +21,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "proto/ps.pb.h" #include "ps/core/server_node.h" #include "ps/core/cluster_metadata.h" @@ -36,7 +36,7 @@ namespace mindspore { namespace ps { namespace core { -const std::unordered_map kUserCommandToMsgType = { +const mindspore::HashMap kUserCommandToMsgType = { {TcpUserCommand::kPush, "push"}, {TcpUserCommand::kPull, "pull"}, {TcpUserCommand::kCount, "count"}, diff --git a/mindspore/ccsrc/ps/core/configuration.h b/mindspore/ccsrc/ps/core/configuration.h index 21047f7544e..4a92de9847f 100644 --- a/mindspore/ccsrc/ps/core/configuration.h +++ b/mindspore/ccsrc/ps/core/configuration.h @@ -25,8 +25,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ps/constants.h" #include "utils/log_adapter.h" diff --git a/mindspore/ccsrc/ps/core/file_configuration.h b/mindspore/ccsrc/ps/core/file_configuration.h index 2a2564e9203..223f7ecceaa 100644 --- a/mindspore/ccsrc/ps/core/file_configuration.h +++ b/mindspore/ccsrc/ps/core/file_configuration.h @@ -25,8 +25,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ps/constants.h" #include "utils/log_adapter.h" #include "ps/core/comm_util.h" diff --git a/mindspore/ccsrc/ps/core/node.cc b/mindspore/ccsrc/ps/core/node.cc index e5a0f90c9a1..b74e4a26c43 100644 --- a/mindspore/ccsrc/ps/core/node.cc +++ b/mindspore/ccsrc/ps/core/node.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -159,8 +159,8 @@ void Node::ProcessSendDataResp(const std::shared_ptr &meta, const P if (it != receive_messages_.end()) { it->second[rank_id] = received_data; } else { - std::unordered_map res; - (void)res.insert(std::make_pair(rank_id, received_data)); + mindspore::HashMap res; + (void)res.emplace(rank_id, received_data); receive_messages_[request_id] = res; } } else { @@ -176,8 +176,8 @@ void Node::ProcessSendDataResp(const std::shared_ptr &meta, const P if (it != workder_receive_messages_.end()) { it->second[rank_id] = received_data; } else { - std::unordered_map res; - (void)res.insert(std::make_pair(rank_id, received_data)); + mindspore::HashMap res; + (void)res.emplace(rank_id, received_data); workder_receive_messages_[request_id] = res; } } diff --git a/mindspore/ccsrc/ps/core/node.h b/mindspore/ccsrc/ps/core/node.h index cceca75848d..d0da5291f7b 100644 --- a/mindspore/ccsrc/ps/core/node.h +++ b/mindspore/ccsrc/ps/core/node.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,13 +24,13 @@ #include #include #include -#include #include #include #include #include #include +#include "utils/hash_map.h" #include "ps/core/cluster_metadata.h" #include "ps/core/cluster_config.h" #include "ps/ps_context.h" @@ -108,7 +108,7 @@ class Node { std::mutex finish_mutex_; // the key is: request_id, the value is: - std::unordered_map> message_tracker_; + mindspore::HashMap> message_tracker_; std::mutex message_tracker_mutex_; std::condition_variable message_tracker_cond_; @@ -128,13 +128,13 @@ class Node { std::mutex client_mutex_; // the key is: request_id - std::unordered_map message_callbacks_; + mindspore::HashMap message_callbacks_; std::mutex message_callbacks_mutex_; // the key is: request_id, the value is: - std::unordered_map> receive_messages_; + mindspore::HashMap> receive_messages_; // the key is: request_id, the value is: - std::unordered_map> workder_receive_messages_; + mindspore::HashMap> workder_receive_messages_; std::map, bool> receive_messages_done_; std::mutex receive_messages_mutex_; }; diff --git a/mindspore/ccsrc/ps/core/node_manager.cc b/mindspore/ccsrc/ps/core/node_manager.cc index 410f15de710..4783b0b055b 100644 --- a/mindspore/ccsrc/ps/core/node_manager.cc +++ b/mindspore/ccsrc/ps/core/node_manager.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -223,9 +223,9 @@ bool NodeManager::IsAllNodesScaleOutDone() const { bool NodeManager::IsAllNodesScaleInDone() const { return SizeToInt(scale_in_done_nodes_id_.size()) == total_node_num_; } -const std::unordered_map &NodeManager::nodes_info() const { return nodes_info_; } +const mindspore::HashMap &NodeManager::nodes_info() const { return nodes_info_; } -const std::unordered_map &NodeManager::registered_nodes_info() const { +const mindspore::HashMap &NodeManager::registered_nodes_info() const { return registered_nodes_info_; } diff --git a/mindspore/ccsrc/ps/core/node_manager.h b/mindspore/ccsrc/ps/core/node_manager.h index fd0b297a2cf..5b787f61139 100644 --- a/mindspore/ccsrc/ps/core/node_manager.h +++ b/mindspore/ccsrc/ps/core/node_manager.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,13 +26,13 @@ #include #include #include -#include #include #include -#include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ps/core/node.h" #include "utils/log_adapter.h" #include "utils/convert_utils_base.h" @@ -86,8 +86,8 @@ class NodeManager { // nodes and Determine whether the nodes are equal to total_node_num_. bool IsAllNodesScaleInDone() const; - const std::unordered_map &nodes_info() const; - const std::unordered_map ®istered_nodes_info() const; + const mindspore::HashMap &nodes_info() const; + const mindspore::HashMap ®istered_nodes_info() const; // After all the nodes are registered successfully, the nodes info can be updated. void UpdateNodesInfo(); @@ -124,22 +124,22 @@ class NodeManager { std::atomic next_server_rank_id_; // Whenever a node is registered, it will be stored in this map. - std::unordered_map registered_nodes_info_; + mindspore::HashMap registered_nodes_info_; // When all nodes are registered successfully, then all nodes info will be stored in this map. In other words, the // nodes_info_ is a snapshot of the registered_nodes_info_. - std::unordered_map nodes_info_; + mindspore::HashMap nodes_info_; std::mutex assign_rank_id_mutex_; std::mutex heartbeat_mutex_; - std::unordered_map heartbeats_; + mindspore::HashMap heartbeats_; // timeout nodes - std::unordered_map timeout_nodes_info_; - std::unordered_set finish_nodes_id_; + mindspore::HashMap timeout_nodes_info_; + mindspore::HashSet finish_nodes_id_; // The scheduler aggregates scale_out_done messages from workers/servers - std::unordered_set scale_out_done_nodes_id_; + mindspore::HashSet scale_out_done_nodes_id_; // The scheduler aggregates scale_in_done messages from workers/servers - std::unordered_set scale_in_done_nodes_id_; + mindspore::HashSet scale_in_done_nodes_id_; // Cluster metadata information can be dynamically changed std::unique_ptr meta_data_; diff --git a/mindspore/ccsrc/ps/core/scheduler_node.cc b/mindspore/ccsrc/ps/core/scheduler_node.cc index e8bed92d202..2e16e0edb83 100644 --- a/mindspore/ccsrc/ps/core/scheduler_node.cc +++ b/mindspore/ccsrc/ps/core/scheduler_node.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,6 +15,8 @@ */ #include "ps/core/scheduler_node.h" +#include +#include namespace mindspore { namespace ps { @@ -635,7 +637,7 @@ void SchedulerNode::ProcessScaleIn(const std::shared_ptr &re MS_LOG(WARNING) << "The scale in node ids:" << scale_in_node_ids_; - std::unordered_map scale_in_nodes; + mindspore::HashMap scale_in_nodes; int32_t scale_worker_num = 0; int32_t scale_server_num = 0; @@ -709,7 +711,7 @@ void SchedulerNode::ProcessGetNodesInfo(const std::shared_ptrAddRespString(js.dump()); @@ -763,7 +765,7 @@ void SchedulerNode::ProcessNewInstance(const std::shared_ptr uint64_t request_id = AddMessageTrack(node_manager_.server_num()); - std::unordered_map outputs; + mindspore::HashMap outputs; set_message_callback(request_id, [&]() { receive_messages_mutex_.lock(); @@ -817,7 +819,7 @@ void SchedulerNode::ProcessQueryInstance(const std::shared_ptr outputs; + mindspore::HashMap outputs; set_message_callback(request_id, [&]() { receive_messages_mutex_.lock(); @@ -871,7 +873,7 @@ void SchedulerNode::ProcessEnableFLS(const std::shared_ptr & uint64_t request_id = AddMessageTrack(node_manager_.server_num()); - std::unordered_map outputs; + mindspore::HashMap outputs; set_message_callback(request_id, [&]() { receive_messages_mutex_.lock(); @@ -927,7 +929,7 @@ void SchedulerNode::ProcessDisableFLS(const std::shared_ptr uint64_t request_id = AddMessageTrack(node_manager_.server_num()); - std::unordered_map outputs; + mindspore::HashMap outputs; set_message_callback(request_id, [&]() { receive_messages_mutex_.lock(); diff --git a/mindspore/ccsrc/ps/core/scheduler_node.h b/mindspore/ccsrc/ps/core/scheduler_node.h index 868b4fcfa8d..b70f40fd886 100644 --- a/mindspore/ccsrc/ps/core/scheduler_node.h +++ b/mindspore/ccsrc/ps/core/scheduler_node.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,8 +25,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ps/core/cluster_config.h" #include "ps/ps_context.h" #include "ps/core/communicator/tcp_client.h" @@ -147,7 +147,7 @@ class SchedulerNode : public Node { std::shared_ptr server_; std::unique_ptr scheduler_thread_; std::unique_ptr update_state_thread_; - std::unordered_map handlers_; + mindspore::HashMap handlers_; NodeManager node_manager_; @@ -155,14 +155,14 @@ class SchedulerNode : public Node { std::unique_ptr restful_thread_; std::shared_ptr http_server_; - std::unordered_map> connected_nodes_; + mindspore::HashMap> connected_nodes_; std::unique_ptr client_thread_; std::atomic is_client_started_; std::unique_ptr leader_scaler_; - std::unordered_map callbacks_; + mindspore::HashMap callbacks_; // Used to persist and obtain metadata information for scheduler. std::unique_ptr scheduler_recovery_; diff --git a/mindspore/ccsrc/ps/core/server_node.h b/mindspore/ccsrc/ps/core/server_node.h index 81521c180ab..73cc8e35286 100644 --- a/mindspore/ccsrc/ps/core/server_node.h +++ b/mindspore/ccsrc/ps/core/server_node.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,8 +24,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ps/core/cluster_metadata.h" #include "ps/core/cluster_config.h" #include "ps/ps_context.h" diff --git a/mindspore/ccsrc/ps/parameter_server.cc b/mindspore/ccsrc/ps/parameter_server.cc index 91a092801ea..323657ee451 100644 --- a/mindspore/ccsrc/ps/parameter_server.cc +++ b/mindspore/ccsrc/ps/parameter_server.cc @@ -504,7 +504,7 @@ void ParameterServer::GetEmbeddingTableParamPtr() { MS_EXCEPTION_IF_NULL(embedding_table); if (embedding_table->isa()) { MS_LOG(INFO) << "Embedding table name is " << embedding_table->fullname_with_scope() << ", key is " << count; - embedding_tables_.insert(std::make_pair(count, embedding_table->cast())); + (void)embedding_tables_.emplace(count, embedding_table->cast()); count++; } } diff --git a/mindspore/ccsrc/ps/parameter_server.h b/mindspore/ccsrc/ps/parameter_server.h index b84b5fdd103..9bc7750d169 100644 --- a/mindspore/ccsrc/ps/parameter_server.h +++ b/mindspore/ccsrc/ps/parameter_server.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PS_PARAMETER_SERVER_H_ #include -#include #include #include #include @@ -34,6 +33,7 @@ #include #include +#include "utils/hash_map.h" #include "ir/func_graph.h" #include "backend/session/session_basic.h" #include "backend/session/anf_runtime_algorithm.h" @@ -111,11 +111,11 @@ class ParameterServer { private: ParameterServer *ps_; typedef void (ServerHandler::*RequestHandler)(const DataPtr &data, size_t size, const VectorPtr &res); - std::unordered_map handlers_; - std::unordered_map commands_; - std::unordered_map init_weights_; - std::unordered_map init_weight_to_optim_; - std::unordered_map init_optim_info_; + mindspore::HashMap handlers_; + mindspore::HashMap commands_; + mindspore::HashMap init_weights_; + mindspore::HashMap init_weight_to_optim_; + mindspore::HashMap init_optim_info_; }; bool Init(const FuncGraphPtr &func_graph); @@ -156,19 +156,19 @@ class ParameterServer { // Used to cache embedding table parameter, key: parameter name, value: parameter node pointer std::map embedding_parameter_tables_; - std::unordered_map> optimizers_; - std::unordered_map optim_inputs_shape_; - std::unordered_map original_optim_inputs_shape_; - std::unordered_map> optim_infos_; - std::unordered_map> optim_info_builders_; - std::unordered_map weight_key_to_optims_; - std::unordered_map weight_key_to_optim_op_; - std::unordered_map weights_; - std::unordered_map is_embedding_; - std::unordered_map grads_; - std::unordered_map grads_accum_counter_; - std::unordered_map> embedding_lookup_ops_; - std::unordered_map tokens_; + mindspore::HashMap> optimizers_; + mindspore::HashMap optim_inputs_shape_; + mindspore::HashMap original_optim_inputs_shape_; + mindspore::HashMap> optim_infos_; + mindspore::HashMap> optim_info_builders_; + mindspore::HashMap weight_key_to_optims_; + mindspore::HashMap weight_key_to_optim_op_; + mindspore::HashMap weights_; + mindspore::HashMap is_embedding_; + mindspore::HashMap grads_; + mindspore::HashMap grads_accum_counter_; + mindspore::HashMap> embedding_lookup_ops_; + mindspore::HashMap tokens_; std::mutex mutex_; std::condition_variable apply_grads_cv_; diff --git a/mindspore/ccsrc/ps/ps_cache/embedding_hash_map.h b/mindspore/ccsrc/ps/ps_cache/embedding_hash_map.h index d2a1482c733..d920344fafd 100644 --- a/mindspore/ccsrc/ps/ps_cache/embedding_hash_map.h +++ b/mindspore/ccsrc/ps/ps_cache/embedding_hash_map.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "utils/convert_utils_base.h" namespace mindspore { @@ -63,7 +63,7 @@ class EmbeddingHashMap { const size_t graph_running_step, size_t *const swap_out_size, bool *const need_wait_graph); size_t hash_step(const int hash_index) const { return hash_map_elements_[hash_index].step_; } void set_hash_step(const int hash_index, const size_t step) { hash_map_elements_[hash_index].set_step(step); } - const std::unordered_map &hash_id_to_index() const { return hash_id_to_index_; } + const mindspore::HashMap &hash_id_to_index() const { return hash_id_to_index_; } size_t hash_capacity() const { return hash_capacity_; } void DumpHashMap(); void Reset(); @@ -74,7 +74,7 @@ class EmbeddingHashMap { size_t hash_count_; size_t hash_capacity_; std::vector hash_map_elements_; - std::unordered_map hash_id_to_index_; + mindspore::HashMap hash_id_to_index_; size_t current_pos_; size_t current_batch_start_pos_; size_t graph_running_index_num_; diff --git a/mindspore/ccsrc/ps/util.cc b/mindspore/ccsrc/ps/util.cc index 4d19b4657b5..f2968ea97e3 100644 --- a/mindspore/ccsrc/ps/util.cc +++ b/mindspore/ccsrc/ps/util.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,30 +15,30 @@ */ #include "ps/util.h" -#include #include #include +#include "utils/hash_map.h" #include "ps/constants.h" #include "ps/ps_context.h" #include "utils/ms_utils.h" namespace mindspore { namespace ps { -std::unordered_map Util::optimizer_to_ids{ +mindspore::HashMap Util::optimizer_to_ids{ {kApplyMomentum, 0}, {kSparseAdam, 1}, {kSparseLazyAdam, 2}, {kSparseFtrl, 3}, }; -std::unordered_map Util::id_to_optimizers{ +mindspore::HashMap Util::id_to_optimizers{ {0, kApplyMomentum}, {1, kSparseAdam}, {2, kSparseLazyAdam}, {3, kSparseFtrl}, }; -std::unordered_map Util::id_to_optimizer_nodes{ +mindspore::HashMap Util::id_to_optimizer_nodes{ {0, kApplyMomentumOp}, {1, kSparseAdamOp}, {2, kSparseLazyAdamOp}, diff --git a/mindspore/ccsrc/ps/util.h b/mindspore/ccsrc/ps/util.h index ebadc534299..2ed3c0fe75b 100644 --- a/mindspore/ccsrc/ps/util.h +++ b/mindspore/ccsrc/ps/util.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "frontend/optimizer/optimizer.h" #include "backend/session/anf_runtime_algorithm.h" #include "backend/kernel_compiler/common_utils.h" @@ -61,9 +61,9 @@ class Util { const std::string &fused_cnode_name); static kernel::KernelBuildInfoPtr GenerateKernelBuildInfo(const std::vector &node_list); - static std::unordered_map optimizer_to_ids; - static std::unordered_map id_to_optimizers; - static std::unordered_map id_to_optimizer_nodes; + static mindspore::HashMap optimizer_to_ids; + static mindspore::HashMap id_to_optimizers; + static mindspore::HashMap id_to_optimizer_nodes; static int64_t rank_id_; }; } // namespace ps diff --git a/mindspore/ccsrc/ps/worker.cc b/mindspore/ccsrc/ps/worker.cc index b1fcdce6d15..1c107a88b7e 100644 --- a/mindspore/ccsrc/ps/worker.cc +++ b/mindspore/ccsrc/ps/worker.cc @@ -312,7 +312,7 @@ bool Worker::DoPSEmbeddingLookup(const Key &key, const std::vector &lookup_ return false; } int64_t single_id_len = SizeToLong(lookup_result->size() / lookup_ids.size()); - std::unordered_map>> id_addr_map; + mindspore::HashMap>> id_addr_map; std::shared_ptr> values = std::make_shared>(); std::shared_ptr> keys = std::make_shared>(); int64_t value_offset = 0; @@ -534,7 +534,7 @@ bool Worker::IsReadyForPull(const Key &key) { } } -void Worker::PrepareSparseGradient(const size_t, const size_t, const std::unordered_set &distinct_ids, +void Worker::PrepareSparseGradient(const size_t, const size_t, const mindspore::HashSet &distinct_ids, const std::vector> &indice_to_grads, const int *all_indice, const size_t segment_size, float *gradient, int *indices) { MS_EXCEPTION_IF_NULL(all_indice); @@ -696,7 +696,7 @@ void Worker::LookupIdPartitioner(const EmbeddingTableLookup &send, PartitionEmbe const EmbeddingTableShardMetadata &range = ranges[i]; const auto &begin = range.begin(); const auto &end = range.end(); - std::unordered_set unique_ids; + mindspore::HashSet unique_ids; auto &kvs = partition->at(i).second; kvs.set_key(key); @@ -780,7 +780,7 @@ void Worker::SparsePartitioner(const KVMessage &send, PartitionKVMessages *parti // Prepare the sparse gradient and indice std::vector indice_ids; - std::unordered_set distinct_ids; + mindspore::HashSet distinct_ids; for (size_t j = 0; j < indice_size; j++) { size_t indice = static_cast(indice_data[j]); if (indice >= begin && indice <= end) { diff --git a/mindspore/ccsrc/ps/worker.h b/mindspore/ccsrc/ps/worker.h index 1b2325f4497..dbd6878d7c6 100644 --- a/mindspore/ccsrc/ps/worker.h +++ b/mindspore/ccsrc/ps/worker.h @@ -26,9 +26,9 @@ #include #include #include -#include -#include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "utils/log_adapter.h" #include "ir/tensor.h" #include "ps/util.h" @@ -95,7 +95,7 @@ class Worker { void InitPSParamData(const std::vector &keys, void *const origin_addr, size_t size); bool IsReadyForPush(const Key &key); bool IsReadyForPull(const Key &key); - void PrepareSparseGradient(const size_t begin, const size_t end, const std::unordered_set &distinct_ids, + void PrepareSparseGradient(const size_t begin, const size_t end, const mindspore::HashSet &distinct_ids, const std::vector> &indice_to_grads, const int *all_indice, const size_t segment_size, float *gradient, int *indices); void BuildSparseValue(const std::vector &lengths, const size_t grad_index, const size_t indice_index, @@ -143,10 +143,10 @@ class Worker { KVPartitioner worker_init_embedding_partitioner_; KVPartitioner update_embedding_partitioner_; KVPartitioner broadcast_partitioner_; - std::unordered_map key_to_server_id_; - std::unordered_map embedding_row_cnt_; + mindspore::HashMap key_to_server_id_; + mindspore::HashMap embedding_row_cnt_; - std::unordered_map>> embedding_table_ranges_; + mindspore::HashMap>> embedding_table_ranges_; }; } // namespace ps } // namespace mindspore diff --git a/mindspore/ccsrc/pybind_api/ir/primitive_py.h b/mindspore/ccsrc/pybind_api/ir/primitive_py.h index a5a0e65ab37..4be06d5ad29 100644 --- a/mindspore/ccsrc/pybind_api/ir/primitive_py.h +++ b/mindspore/ccsrc/pybind_api/ir/primitive_py.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "abstract/abstract_value.h" #include "frontend/parallel/ops_info/operator_info.h" #include "ir/primitive.h" @@ -109,7 +109,7 @@ class PrimitivePyAdapter { friend PrimitivePy; std::string name_; PrimitivePyWeakPtr attached_primitive_; - std::unordered_map attrs_; + mindspore::HashMap attrs_; PrimType prim_type_{kPrimTypeBuiltIn}; bool is_const_prim_{false}; std::vector const_input_indexes_; diff --git a/mindspore/ccsrc/runtime/framework/actor/actor_common.h b/mindspore/ccsrc/runtime/framework/actor/actor_common.h index c60382b53c2..d27c119ebf9 100644 --- a/mindspore/ccsrc/runtime/framework/actor/actor_common.h +++ b/mindspore/ccsrc/runtime/framework/actor/actor_common.h @@ -19,10 +19,10 @@ #include #include -#include #include #include #include +#include "utils/hash_map.h" #include "mindrt/include/actor/op_actor.h" #include "runtime/device/device_address.h" #include "backend/session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/runtime/framework/actor/control_flow/entrance_actor.h b/mindspore/ccsrc/runtime/framework/actor/control_flow/entrance_actor.h index 3db793866fc..9bfdbc7e741 100644 --- a/mindspore/ccsrc/runtime/framework/actor/control_flow/entrance_actor.h +++ b/mindspore/ccsrc/runtime/framework/actor/control_flow/entrance_actor.h @@ -20,10 +20,10 @@ #include #include #include -#include #include #include #include +#include "utils/hash_map.h" #include "runtime/framework/actor/actor_common.h" #include "runtime/framework/actor/control_flow/control_actor.h" @@ -62,7 +62,7 @@ class EntranceActor : public ControlActor { bool is_actor_ready_{true}; // Input data with branch id. - std::unordered_map> input_op_data_with_branch_id_; + mindspore::HashMap> input_op_data_with_branch_id_; // Call nodes are used to record the caller of the subgraph, and are used to connect the data arrow // and branch id arrow in the link process. diff --git a/mindspore/ccsrc/runtime/framework/actor/control_flow/exit_actor.h b/mindspore/ccsrc/runtime/framework/actor/control_flow/exit_actor.h index fedec1e87cf..ccd09b91272 100644 --- a/mindspore/ccsrc/runtime/framework/actor/control_flow/exit_actor.h +++ b/mindspore/ccsrc/runtime/framework/actor/control_flow/exit_actor.h @@ -20,9 +20,9 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "runtime/framework/actor/actor_common.h" #include "runtime/framework/actor/control_flow/control_actor.h" @@ -41,13 +41,13 @@ class ExitActor : public ControlActor { void Init() override; - const std::unordered_map> &output_branch_control_arrows() const { + const mindspore::HashMap> &output_branch_control_arrows() const { return output_branch_control_arrows_; } - const std::unordered_map> &output_branch_data_arrows() const { + const mindspore::HashMap> &output_branch_data_arrows() const { return output_branch_data_arrows_; } - const std::unordered_map> &output_branch_partial_arrows() const { + const mindspore::HashMap> &output_branch_partial_arrows() const { return output_branch_partial_arrows_; } @@ -61,9 +61,9 @@ class ExitActor : public ControlActor { // Exit actor will send to different actors according to different callers, so the output data, control, // and partial arrows will have branch. - std::unordered_map> output_branch_data_arrows_; - std::unordered_map> output_branch_control_arrows_; - std::unordered_map> output_branch_partial_arrows_; + mindspore::HashMap> output_branch_data_arrows_; + mindspore::HashMap> output_branch_control_arrows_; + mindspore::HashMap> output_branch_partial_arrows_; // The exit actor needs to create a new device address and take out the ptr from the device tensor come from // the kernel actor. These new created device tensors are stored in the created device tensors. @@ -73,7 +73,7 @@ class ExitActor : public ControlActor { std::vector is_need_copy_device_tensors_; // Output data. // The output branch data corresponds to the output_data_arrows_ one by one. - std::unordered_map>>> output_branch_data_; + mindspore::HashMap>>> output_branch_data_; }; using ExitActorPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/runtime/framework/actor/control_flow/gather_actor.h b/mindspore/ccsrc/runtime/framework/actor/control_flow/gather_actor.h index 03c2e7566d8..9ff48dd652f 100644 --- a/mindspore/ccsrc/runtime/framework/actor/control_flow/gather_actor.h +++ b/mindspore/ccsrc/runtime/framework/actor/control_flow/gather_actor.h @@ -20,11 +20,10 @@ #include #include #include -#include -#include #include #include #include +#include "utils/hash_map.h" #include "runtime/framework/actor/actor_common.h" #include "runtime/framework/actor/control_flow/control_actor.h" @@ -37,7 +36,7 @@ class GatherActor : public ControlActor { public: GatherActor(const std::string &name, const std::vector ¶meters, const AnfNodePtr &node); ~GatherActor() override = default; - const std::unordered_map> &output_data_with_branch_id_arrows() const { + const mindspore::HashMap> &output_data_with_branch_id_arrows() const { return output_data_with_branch_id_arrows_; } @@ -49,7 +48,7 @@ class GatherActor : public ControlActor { friend class ControlNodeScheduler; // There will be multiple output branches for gather actor according the funcgraph in partial. - std::unordered_map> output_data_with_branch_id_arrows_; + mindspore::HashMap> output_data_with_branch_id_arrows_; }; using GatherActorPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/runtime/framework/actor/control_flow/stack_actor.h b/mindspore/ccsrc/runtime/framework/actor/control_flow/stack_actor.h index 4efd50d0d6a..c4738221373 100644 --- a/mindspore/ccsrc/runtime/framework/actor/control_flow/stack_actor.h +++ b/mindspore/ccsrc/runtime/framework/actor/control_flow/stack_actor.h @@ -20,8 +20,8 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "runtime/framework/actor/actor_common.h" #include "runtime/framework/actor/control_flow/control_actor.h" @@ -58,7 +58,7 @@ class StackActor : public ControlActor { // The input data records that the stack actor is copied from the input nodes and needs to be stored in the // device tensor in the stack. - std::unordered_map>> input_parameter_data_; + mindspore::HashMap>> input_parameter_data_; // Input parameter data num represents the number of actor's input come from funcgraph itself, these inputs // will be ranked at the front of input. size_t input_parameter_data_num_{0}; diff --git a/mindspore/ccsrc/runtime/framework/actor/control_flow/switch_actor.h b/mindspore/ccsrc/runtime/framework/actor/control_flow/switch_actor.h index 3ec965e2cdd..c2c9600b0be 100644 --- a/mindspore/ccsrc/runtime/framework/actor/control_flow/switch_actor.h +++ b/mindspore/ccsrc/runtime/framework/actor/control_flow/switch_actor.h @@ -19,7 +19,6 @@ #include #include -#include #include #include #include "runtime/framework/actor/actor_common.h" diff --git a/mindspore/ccsrc/runtime/framework/actor/copy_actor.h b/mindspore/ccsrc/runtime/framework/actor/copy_actor.h index 39333f9a5fe..e5aa3e0d563 100644 --- a/mindspore/ccsrc/runtime/framework/actor/copy_actor.h +++ b/mindspore/ccsrc/runtime/framework/actor/copy_actor.h @@ -21,7 +21,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "runtime/framework/actor/actor_common.h" #include "runtime/framework/actor/memory_aware_actor.h" #include "runtime/hardware/device_context.h" diff --git a/mindspore/ccsrc/runtime/framework/actor/data_prepare_actor.h b/mindspore/ccsrc/runtime/framework/actor/data_prepare_actor.h index 78638b86252..56b253af0c3 100644 --- a/mindspore/ccsrc/runtime/framework/actor/data_prepare_actor.h +++ b/mindspore/ccsrc/runtime/framework/actor/data_prepare_actor.h @@ -21,8 +21,8 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "runtime/framework/graph_compiler.h" #include "runtime/framework/actor/actor_common.h" #include "runtime/framework/actor/data_source_actor.h" diff --git a/mindspore/ccsrc/runtime/framework/actor/data_source_actor.h b/mindspore/ccsrc/runtime/framework/actor/data_source_actor.h index 48f99e6fedd..1ca63efa1c9 100644 --- a/mindspore/ccsrc/runtime/framework/actor/data_source_actor.h +++ b/mindspore/ccsrc/runtime/framework/actor/data_source_actor.h @@ -20,9 +20,9 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "runtime/framework/actor/actor_common.h" #include "runtime/framework/actor/debug_aware_actor.h" #include "runtime/hardware/device_context.h" @@ -142,7 +142,7 @@ class HostQueueDataSourceActor : public DataSourceActor { std::vector data_nodes_; // The location of the data node in the data source actor. - std::unordered_map data_node_position_map_; + mindspore::HashMap data_node_position_map_; }; using DataSourceActorPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/runtime/framework/actor/kernel_actor.h b/mindspore/ccsrc/runtime/framework/actor/kernel_actor.h index 993dd33a07c..1ced3d83054 100644 --- a/mindspore/ccsrc/runtime/framework/actor/kernel_actor.h +++ b/mindspore/ccsrc/runtime/framework/actor/kernel_actor.h @@ -21,7 +21,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "runtime/framework/actor/actor_common.h" #include "runtime/framework/actor/debug_aware_actor.h" #include "runtime/hardware/device_context.h" diff --git a/mindspore/ccsrc/runtime/framework/actor/loop_count_actor.h b/mindspore/ccsrc/runtime/framework/actor/loop_count_actor.h index 46bb10a1a22..f695738cd36 100644 --- a/mindspore/ccsrc/runtime/framework/actor/loop_count_actor.h +++ b/mindspore/ccsrc/runtime/framework/actor/loop_count_actor.h @@ -20,9 +20,9 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "runtime/framework/actor/actor_common.h" #include "runtime/framework/actor/debug_aware_actor.h" #include "runtime/framework/device_tensor_store.h" diff --git a/mindspore/ccsrc/runtime/framework/actor/memory_manager_actor.h b/mindspore/ccsrc/runtime/framework/actor/memory_manager_actor.h index f1e0e25514f..7782e137a68 100644 --- a/mindspore/ccsrc/runtime/framework/actor/memory_manager_actor.h +++ b/mindspore/ccsrc/runtime/framework/actor/memory_manager_actor.h @@ -21,7 +21,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "runtime/framework/actor/actor_common.h" #include "runtime/framework/device_tensor_store.h" #include "runtime/hardware/device_context.h" diff --git a/mindspore/ccsrc/runtime/framework/actor/output_actor.h b/mindspore/ccsrc/runtime/framework/actor/output_actor.h index a02fa517d1b..c0a9e446243 100644 --- a/mindspore/ccsrc/runtime/framework/actor/output_actor.h +++ b/mindspore/ccsrc/runtime/framework/actor/output_actor.h @@ -23,7 +23,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "runtime/framework/control_node_parser.h" #include "runtime/framework/device_tensor_store.h" #include "runtime/framework/actor/actor_common.h" diff --git a/mindspore/ccsrc/runtime/framework/control_node_parser.cc b/mindspore/ccsrc/runtime/framework/control_node_parser.cc index b4865e2a87f..0c1d81efa10 100644 --- a/mindspore/ccsrc/runtime/framework/control_node_parser.cc +++ b/mindspore/ccsrc/runtime/framework/control_node_parser.cc @@ -139,14 +139,14 @@ std::vector TopoSortForFuncGraph(const FuncGraphPtr &root, FuncGra auto &sub_edges = iter->second; for (auto sub_iter = sub_edges.begin(); sub_iter != sub_edges.end();) { if (sub_iter->find(node) != sub_iter->end()) { - sub_edges.erase(sub_iter); + sub_iter = sub_edges.erase(sub_iter); } else { ++sub_iter; } } if (sub_edges.empty()) { que.push(iter->first); - edges->erase(iter++); + iter = edges->erase(iter); } else { ++iter; } @@ -381,7 +381,7 @@ void ControlNodeParser::ParseDeviceContextForFuncGraph(const std::vector &kernel_graphs, const std::vector &device_contexts, const FuncGraphToKernelGraph &func_graph_to_kernel_graphs) { - std::unordered_map kernel_graph_to_device_context; + mindspore::HashMap kernel_graph_to_device_context; for (size_t i = 0; i < kernel_graphs.size(); ++i) { kernel_graph_to_device_context[kernel_graphs[i]] = device_contexts[i]; } @@ -392,7 +392,7 @@ void ControlNodeParser::ParseDeviceContextForFuncGraph(const std::vectorparameters(); std::vector parameter_device_contexts(front_parameters.size(), default_context); - std::unordered_map front_parameter_to_device_context; + mindspore::HashMap front_parameter_to_device_context; for (const auto &kernel_graph : func_graph_to_kernel_graph.second) { const auto &backend_parameters = kernel_graph->parameters(); @@ -691,7 +691,7 @@ void ControlNodeParser::FetchFrontValueNode(DeviceContext *default_context) { } void ControlNodeParser::ParseFormalToRealParameter(const std::vector &control_nodes) { - std::unordered_map> formal_to_real_parameters; + mindspore::HashMap> formal_to_real_parameters; // The actual parameters of the function are divided into two parts: // 1. Input of partial node. diff --git a/mindspore/ccsrc/runtime/framework/control_node_parser.h b/mindspore/ccsrc/runtime/framework/control_node_parser.h index 137c1b2fb78..09788b96bf1 100644 --- a/mindspore/ccsrc/runtime/framework/control_node_parser.h +++ b/mindspore/ccsrc/runtime/framework/control_node_parser.h @@ -24,8 +24,8 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "runtime/hardware/device_context.h" #include "backend/session/kernel_graph.h" @@ -60,15 +60,15 @@ const char kStackActorNameSuffix[] = "_StackActor"; using FrontToBackendNodeWithContext = std::map>>; using FrontToBackendKernelWithContext = std::map>; -using FuncGraphToKernelGraph = std::unordered_map>; -using HostParameterToWeight = std::unordered_map>; +using FuncGraphToKernelGraph = mindspore::HashMap>; +using HostParameterToWeight = mindspore::HashMap>; using NodeWithDeviceContext = std::set>; -using RealToFormalNode = std::unordered_map>; -using FormalToRealParameter = std::unordered_map>; -using RealToFormalParameter = std::unordered_map>; +using RealToFormalNode = mindspore::HashMap>; +using FormalToRealParameter = mindspore::HashMap>; +using RealToFormalParameter = mindspore::HashMap>; using KernelBuildInfoBuilder = kernel::KernelBuildInfo::KernelBuildInfoBuilder; -using FrontNodeToKernelGraph = std::unordered_map; -using FuncGraphCallRelation = std::unordered_map>>; +using FrontNodeToKernelGraph = mindspore::HashMap; +using FuncGraphCallRelation = mindspore::HashMap>>; // Check whether the parameter is a weight. In the control flow, weight is passed to the subgraph, and in the subgraph, // it is determined whether it is a weight. @@ -200,12 +200,12 @@ class ControlNodeParser { // different places. Therefore, a branch id is created for each funcgraph. When funcgraph is called, the branch // id needs to be sent to the gather actor corresponding to the funcgraph, and the gather will send the branch id // to its output switch actor. - std::unordered_map call_node_to_branch_id_; - std::unordered_map> call_node_to_func_graphs_; + mindspore::HashMap call_node_to_branch_id_; + mindspore::HashMap> call_node_to_func_graphs_; // host parameter to weights records the weights in the subgraph corresponding to the node in the root funcgraph. // When initializing the weights, all related weights need to be recorded as the same device tensor. HostParameterToWeight host_parameter_to_weights_; - std::unordered_map sub_front_node_to_root_front_node_; + mindspore::HashMap sub_front_node_to_root_front_node_; // The front value node saves all value nodes that are not in the kernel graph. These nodes are generally the // input of the control node. NodeWithDeviceContext front_value_nodes_; @@ -215,18 +215,18 @@ class ControlNodeParser { // The kernel graph of call exists in the front input node. // In the scene of funcgrarph recursive call, general input and call input are passed recursively, so a gather actor // is created for kernel graph which has a call input. - std::unordered_map call_input_kernel_graphs_; + mindspore::HashMap call_input_kernel_graphs_; // The dependency between kernel and call node in auto monad. - std::unordered_map kernel_to_call_nodes_; + mindspore::HashMap kernel_to_call_nodes_; // Control nodes without a control node input in the topological sorting of funcgraph. - std::unordered_map> func_graph_to_first_control_nodes_; + mindspore::HashMap> func_graph_to_first_control_nodes_; // In heterogeneous scenario, each parameter has its own device context type, so the device context corresponding // to the type needs to be parsed in advance so that it can add some copy operation in the scheduler. // 1. The device context type of the formal parameters of funcgraph. - std::unordered_map> func_graph_to_device_contexts_; + mindspore::HashMap> func_graph_to_device_contexts_; // 2. The device context type of the control node inputs. - std::unordered_map> control_node_to_device_contexts_; + mindspore::HashMap> control_node_to_device_contexts_; // Is control flow enable. bool is_inited_{false}; diff --git a/mindspore/ccsrc/runtime/framework/device_tensor_store.h b/mindspore/ccsrc/runtime/framework/device_tensor_store.h index de03a7d52bd..7bb7ff7a95b 100644 --- a/mindspore/ccsrc/runtime/framework/device_tensor_store.h +++ b/mindspore/ccsrc/runtime/framework/device_tensor_store.h @@ -18,8 +18,8 @@ #define MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_DEVICE_TENSOR_STORE_H_ #include -#include #include +#include "utils/hash_map.h" #include "utils/ms_utils.h" #include "runtime/device/device_address.h" @@ -100,7 +100,7 @@ class DeviceTensorStore { // The data storage of device tensor. Key is the anf node, value is the vector which may contains the device // tensors from different devices. - std::unordered_map> device_tensors_; + mindspore::HashMap> device_tensors_; }; } // namespace runtime } // namespace mindspore diff --git a/mindspore/ccsrc/runtime/framework/graph_compiler.h b/mindspore/ccsrc/runtime/framework/graph_compiler.h index 5bddf8ee3b4..1db5d1438a9 100644 --- a/mindspore/ccsrc/runtime/framework/graph_compiler.h +++ b/mindspore/ccsrc/runtime/framework/graph_compiler.h @@ -20,9 +20,9 @@ #include #include #include -#include #include #include +#include "utils/hash_map.h" #include "runtime/hardware/device_context.h" #include "runtime/framework/actor/actor_common.h" #include "runtime/framework/control_node_parser.h" @@ -187,9 +187,9 @@ class GraphCompiler { void CreateDeviceAddressWithoutWorkspace(const KernelGraphPtr &graph, const DeviceContext *device_context) const; // Single op kernel graph cache for PyNative mode. - std::unordered_map run_op_graphs_; + mindspore::HashMap run_op_graphs_; // Single op kernel graph output nodes cache for PyNative mode. - std::unordered_map> run_op_graph_output_nodes_; + mindspore::HashMap> run_op_graph_output_nodes_; // The member variable 'session_' will be removed after removing session module. // Now all the GraphCompiler share the same 'session_'. diff --git a/mindspore/ccsrc/runtime/framework/graph_scheduler.cc b/mindspore/ccsrc/runtime/framework/graph_scheduler.cc index 42e602f8082..437b05dd650 100644 --- a/mindspore/ccsrc/runtime/framework/graph_scheduler.cc +++ b/mindspore/ccsrc/runtime/framework/graph_scheduler.cc @@ -551,7 +551,7 @@ std::vector GraphScheduler::BuildDataSourceActor(const Graph std::vector data_source_actors; HostQueueDSActorPtr host_queue_ds_actor = nullptr; size_t data_node_position = 0; - std::unordered_map front_node_position_temp_map; + mindspore::HashMap front_node_position_temp_map; for (size_t i = 0; i < graph_compiler_info.graphs_.size(); ++i) { const auto &graph = graph_compiler_info.graphs_[i]; @@ -890,7 +890,7 @@ void GraphScheduler::LinkDataArrowInNonSinkMode(const KernelGraphPtr &graph, MS_EXCEPTION_IF_NULL(auto_monad_actors); MS_EXCEPTION_IF_NULL(communication_nodes); - const std::unordered_set auto_monad_prims = { + const mindspore::HashSet auto_monad_prims = { prim::kPrimDepend, prim::kPrimUpdateState, prim::kPrimLoad}; auto &execution_order = graph->execution_order(); // Foreach the execution order to link the actors. @@ -1173,7 +1173,7 @@ void GraphScheduler::LinkControlArrowByAutoMonad(AbstractActor *to_actor, const return; } - const std::unordered_set recursion_prims = { + const mindspore::HashSet recursion_prims = { prim::kPrimDepend, prim::kPrimUpdateState, prim::kPrimLoad, prim::kPrimMakeTuple}; // Get the real depend input by monad node which needs to link the control arrow. std::vector real_depend_inputs; diff --git a/mindspore/ccsrc/runtime/framework/graph_scheduler.h b/mindspore/ccsrc/runtime/framework/graph_scheduler.h index e5b79612e6c..5a37f998175 100644 --- a/mindspore/ccsrc/runtime/framework/graph_scheduler.h +++ b/mindspore/ccsrc/runtime/framework/graph_scheduler.h @@ -21,12 +21,12 @@ #include #include #include -#include -#include #include #include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "runtime/framework/control_node_scheduler.h" #include "runtime/framework/actor/actor_set.h" #include "runtime/framework/graph_compiler.h" @@ -180,7 +180,7 @@ class GraphScheduler { void DumpDeviceTensorStore(const GraphCompilerInfo &graph_compiler_info, std::ofstream &ofs) const; // The global maps, only be cleared in the deconstruction. - std::unordered_map actors_; + mindspore::HashMap actors_; // The local maps and vectors, will be cleared at the end of each graph transform: // 1.The second element of pair represents the output index of op actor corresponding to the graph output front node. diff --git a/mindspore/ccsrc/transform/express_ir/mindir_exporter.cc b/mindspore/ccsrc/transform/express_ir/mindir_exporter.cc index 43864196b80..72d4df14c44 100644 --- a/mindspore/ccsrc/transform/express_ir/mindir_exporter.cc +++ b/mindspore/ccsrc/transform/express_ir/mindir_exporter.cc @@ -16,11 +16,11 @@ #include #include -#include #include #include #include +#include "utils/hash_map.h" #include "ir/tensor.h" #include "ir/param_info.h" #include "ir/func_graph.h" @@ -36,7 +36,7 @@ using FloatPtr = std::shared_ptr; using IntPtr = std::shared_ptr; using UIntPtr = std::shared_ptr; // anf type to mindir type map -static std::unordered_map g_data_type_map = { +static mindspore::HashMap g_data_type_map = { {kNumberTypeBool, mind_ir::TensorProto_DataType_BOOL}, {kNumberTypeInt8, mind_ir::TensorProto_DataType_INT8}, {kNumberTypeInt16, mind_ir::TensorProto_DataType_INT16}, @@ -52,21 +52,21 @@ static std::unordered_map g_data_type_map = {kObjectTypeString, mind_ir::TensorProto_DataType_STRING}, }; -static std::unordered_map g_data_bits_int_map = { +static mindspore::HashMap g_data_bits_int_map = { {8, mind_ir::TensorProto_DataType_INT8}, {16, mind_ir::TensorProto_DataType_INT16}, {32, mind_ir::TensorProto_DataType_INT32}, {64, mind_ir::TensorProto_DataType_INT64}, }; -static std::unordered_map g_data_bits_uint_map = { +static mindspore::HashMap g_data_bits_uint_map = { {8, mind_ir::TensorProto_DataType_UINT8}, {16, mind_ir::TensorProto_DataType_UINT16}, {32, mind_ir::TensorProto_DataType_UINT32}, {64, mind_ir::TensorProto_DataType_UINT64}, }; -static std::unordered_map g_data_bits_float_map = { +static mindspore::HashMap g_data_bits_float_map = { {16, mind_ir::TensorProto_DataType_FLOAT16}, {32, mind_ir::TensorProto_DataType_FLOAT}, {64, mind_ir::TensorProto_DataType_FLOAT64}, diff --git a/mindspore/ccsrc/transform/express_ir/onnx_exporter.cc b/mindspore/ccsrc/transform/express_ir/onnx_exporter.cc index 21aec0b71e9..60f1384b702 100644 --- a/mindspore/ccsrc/transform/express_ir/onnx_exporter.cc +++ b/mindspore/ccsrc/transform/express_ir/onnx_exporter.cc @@ -17,11 +17,11 @@ #include #include #include -#include #include #include #include +#include "utils/hash_map.h" #include "ir/tensor.h" #include "ir/param_info.h" #include "ir/func_graph.h" @@ -353,14 +353,14 @@ class OpConvertRegistry { return registry; } - static const std::unordered_map &GetOpConvertMap() { return GetSingleton().op_map_; } + static const mindspore::HashMap &GetOpConvertMap() { return GetSingleton().op_map_; } void Clear() noexcept { op_map_.clear(); } private: OpConvertRegistry() {} - std::unordered_map op_map_; + mindspore::HashMap op_map_; }; class OnnxExporter { @@ -385,8 +385,8 @@ class OnnxExporter { void SetTensorProtoInfo(const ParameterPtr ¶m, onnx::TensorProto *tensor_proto); void MatchAndMark(const FuncGraphPtr &func_graph, const std::vector &nodes, - std::unordered_map *op_merged_infos_ptr); - void MatchAndMarkCNode(const CNodePtr &cnode, std::unordered_map *op_merged_infos_ptr); + mindspore::HashMap *op_merged_infos_ptr); + void MatchAndMarkCNode(const CNodePtr &cnode, mindspore::HashMap *op_merged_infos_ptr); void ExportNodes(const FuncGraphPtr &func_graph, std::map *node_map_ptr, onnx::GraphProto *graph_proto); @@ -540,7 +540,7 @@ void OnnxExporter::ExportParameters(const FuncGraphPtr &func_graph, onnx::GraphP onnx::TensorProto_DataType OnnxExporter::GetOnnxDataType(TypeId type_id) { // clang-format off - static std::unordered_map type_map = { + static mindspore::HashMap type_map = { {kNumberTypeBool, onnx::TensorProto_DataType_BOOL}, {kNumberTypeInt8, onnx::TensorProto_DataType_INT8}, {kNumberTypeInt16, onnx::TensorProto_DataType_INT16}, @@ -602,7 +602,7 @@ void OnnxExporter::SetTensorProtoInfo(const ParameterPtr ¶m, onnx::TensorPro } void OnnxExporter::MatchAndMark(const FuncGraphPtr &func_graph, const std::vector &nodes, - std::unordered_map *op_merged_infos_ptr) { + mindspore::HashMap *op_merged_infos_ptr) { auto &op_merged_infos = *op_merged_infos_ptr; for (auto &node : nodes) { @@ -631,7 +631,7 @@ void OnnxExporter::MatchAndMark(const FuncGraphPtr &func_graph, const std::vecto } void OnnxExporter::MatchAndMarkCNode(const CNodePtr &cnode, - std::unordered_map *op_merged_infos_ptr) { + mindspore::HashMap *op_merged_infos_ptr) { auto &op_merged_infos = *op_merged_infos_ptr; // MindSpore Conv + BiasAdd --> ONNX Conv if (cnode->IsApply(std::make_shared("BiasAdd")) && IsPrimitiveCNode(cnode->input(1), prim::kPrimConv2D)) { @@ -675,7 +675,7 @@ void OnnxExporter::ExportNodes(const FuncGraphPtr &func_graph, std::map nodes = TopoSort(func_graph->get_return(), SuccIncoming, AlwaysInclude); - std::unordered_map op_merged_infos; + mindspore::HashMap op_merged_infos; MatchAndMark(func_graph, nodes, &op_merged_infos); int count = -1; for (const AnfNodePtr &node : nodes) { diff --git a/mindspore/ccsrc/transform/graph_ir/convert.cc b/mindspore/ccsrc/transform/graph_ir/convert.cc index 4afb92bf456..4a12ba43251 100644 --- a/mindspore/ccsrc/transform/graph_ir/convert.cc +++ b/mindspore/ccsrc/transform/graph_ir/convert.cc @@ -431,7 +431,7 @@ void DfGraphConvertor::BuildSaveCheckpointGraph() { size_t index = 0; string name; - int32_t count_size = std::count_if(vars_.begin(), vars_.end(), [](const std::pair &it) { + int32_t count_size = std::count_if(vars_.begin(), vars_.end(), [](const auto &it) { return (it.second == nullptr || it.first.find("/") != std::string::npos); }); @@ -609,7 +609,7 @@ void DfGraphConvertor::TraceOutputFromTupleGetItem(const AnfNodePtr &anf_out) { auto op = handle.op; if (op != nullptr) { MS_LOG(INFO) << "op name: " << op->GetName() << ", op type: " << op->GetOpType() << ", out_name: " << handle.out; - graph_outputs_.emplace_back(std::make_pair(*op, handle.out)); + graph_outputs_.emplace_back(*op, handle.out); } else { MS_LOG(EXCEPTION) << "tuple_getitem: " << anf_out->fullname_with_scope() << " is not converted"; } @@ -628,7 +628,7 @@ void DfGraphConvertor::TraceOutput(const AnfNodePtr node) { if (node->isa()) { auto op = Convert(anf_out); if (op != nullptr) { - graph_outputs_.emplace_back(std::make_pair(*op, "")); + graph_outputs_.emplace_back(*op, ""); AddGraphConstInput(op); } return; @@ -678,7 +678,7 @@ void DfGraphConvertor::TraceOutput(const AnfNodePtr node) { } } MS_LOG(INFO) << "Add graph output: " << anf_out->fullname_with_scope() << ":" << index; - graph_outputs_.emplace_back(make_pair(*op, index)); + graph_outputs_.emplace_back(*op, index); } } } @@ -693,13 +693,13 @@ void DfGraphConvertor::TraceOutputFromParameter(const AnfNodePtr &anf_out) { OutHandler handle = it->second; auto op = handle.op; MS_LOG(INFO) << "op name: " << op->GetName() << ", op type: " << op->GetOpType() << ", out_name: " << handle.out; - graph_outputs_.emplace_back(make_pair(*op, handle.out)); + graph_outputs_.emplace_back(*op, handle.out); } else { // common parameter case auto op = Convert(anf_out); if (op != nullptr) { MS_LOG(INFO) << "op name: " << op->GetName() << ", op type: " << op->GetOpType(); - graph_outputs_.emplace_back(std::make_pair(*op, "")); + graph_outputs_.emplace_back(*op, ""); } } } @@ -1840,7 +1840,7 @@ OperatorPtr DfGraphConvertor::ConvertCNode(const CNodePtr node) { (void)adpt->setAttr(op, node); // add into cache - (void)op_cache_.insert(std::make_pair(node.get(), op)); + (void)op_cache_.emplace(node.get(), op); DrawCNode(node, adpt); diff --git a/mindspore/ccsrc/transform/graph_ir/convert.h b/mindspore/ccsrc/transform/graph_ir/convert.h index 340a51d0d82..90167f8c7fc 100644 --- a/mindspore/ccsrc/transform/graph_ir/convert.h +++ b/mindspore/ccsrc/transform/graph_ir/convert.h @@ -23,13 +23,13 @@ #include #include #include -#include #include #include #include #include #include +#include "utils/hash_map.h" #include "ir/anf.h" #include "ir/func_graph.h" #include "transform/graph_ir/util.h" @@ -140,7 +140,7 @@ class DfGraphConvertor { std::ostringstream init_sout_; std::ostringstream checkpoint_sout_; std::ostringstream restore_checkpoint_sout_; - std::unordered_map op_draw_name_; + mindspore::HashMap op_draw_name_; std::map param_format_; AnfNodePtr TraceTupleGetItem(const CNodePtr &node, uint64_t *index); @@ -198,17 +198,17 @@ class DfGraphConvertor { std::shared_ptr save_ckp_graph_{nullptr}; std::shared_ptr restore_ckp_graph_{nullptr}; std::shared_ptr broadcast_graph_{nullptr}; - std::unordered_map branches_map_; - std::unordered_map op_cache_; - std::unordered_map> control_edge_cache_; - std::unordered_map> monad_control_edge_cache_; + mindspore::HashMap branches_map_; + mindspore::HashMap op_cache_; + mindspore::HashMap> control_edge_cache_; + mindspore::HashMap> monad_control_edge_cache_; /* record "tuple_getitem"<->"out_handler" mapping */ - std::unordered_map out_handle_cache_; + mindspore::HashMap out_handle_cache_; /* record "make_tuple"<->"out_handler vector" mapping */ - std::unordered_map>> tuple_out_handle_cache_; - std::unordered_map>> case_input_handle_cache_; - std::unordered_map params_; - std::unordered_map vars_; + mindspore::HashMap>> tuple_out_handle_cache_; + mindspore::HashMap>> case_input_handle_cache_; + mindspore::HashMap params_; + mindspore::HashMap vars_; std::vector> graph_outputs_; std::vector graph_const_inputs_; std::vector init_ops_; diff --git a/mindspore/ccsrc/transform/graph_ir/io_format_map.cc b/mindspore/ccsrc/transform/graph_ir/io_format_map.cc index 77eefeb6918..5c99ca25f0e 100644 --- a/mindspore/ccsrc/transform/graph_ir/io_format_map.cc +++ b/mindspore/ccsrc/transform/graph_ir/io_format_map.cc @@ -18,7 +18,7 @@ namespace mindspore { namespace transform { -std::unordered_map IOFormatMap::io_format_map_ = {{"BasicLSTMCell", "ND"}, +mindspore::HashMap IOFormatMap::io_format_map_ = {{"BasicLSTMCell", "ND"}, {"BasicLSTMCellInputGrad", "ND"}, {"BasicLSTMCellCStateGrad", "ND"}, {"Dequant", "ND"}, @@ -37,6 +37,6 @@ std::unordered_map IOFormatMap::io_format_map_ = {{"Ba {"Conv3DBackpropFilter", "format"}, {"Conv3DBackpropInput", "format"}, {"Conv3DTranspose", "format"}}; -std::unordered_map &IOFormatMap::get() { return io_format_map_; } +mindspore::HashMap &IOFormatMap::get() { return io_format_map_; } } // namespace transform } // namespace mindspore diff --git a/mindspore/ccsrc/transform/graph_ir/io_format_map.h b/mindspore/ccsrc/transform/graph_ir/io_format_map.h index c53a79103de..17d9935d1d6 100644 --- a/mindspore/ccsrc/transform/graph_ir/io_format_map.h +++ b/mindspore/ccsrc/transform/graph_ir/io_format_map.h @@ -16,17 +16,17 @@ #ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_IO_FORMAT_MAP_H_ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_IO_FORMAT_MAP_H_ -#include #include +#include "utils/hash_map.h" namespace mindspore { namespace transform { class IOFormatMap { public: - static std::unordered_map &get(); + static mindspore::HashMap &get(); private: - static std::unordered_map io_format_map_; + static mindspore::HashMap io_format_map_; }; } // namespace transform } // namespace mindspore diff --git a/mindspore/ccsrc/transform/graph_ir/op_adapter.cc b/mindspore/ccsrc/transform/graph_ir/op_adapter.cc index c93fad4cf0c..ee4c11afd04 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_adapter.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_adapter.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -33,7 +33,7 @@ Status OpAdapterImpl::GenerateCustomOpInputMap(const CusOperatorPtr &op, const P MS_EXCEPTION_IF_NULL(op); MS_EXCEPTION_IF_NULL(prim); // Create the map of custom op from input index to input name. - std::unordered_map input_map; + mindspore::HashMap input_map; auto value = prim->GetAttr("input_names"); if (value == nullptr) { (*cus_output_map_)[prim->name()] = input_map; @@ -57,7 +57,7 @@ Status OpAdapterImpl::GenerateCustomOpOutputMap(const CusOperatorPtr &op, const MS_EXCEPTION_IF_NULL(op); MS_EXCEPTION_IF_NULL(prim); // Create the map of custom op from output index to output name. - std::unordered_map output_map; + mindspore::HashMap output_map; auto value = prim->GetAttr("output_names"); if (value == nullptr) { // generate a empty output_map for it @@ -127,7 +127,7 @@ Status OpAdapterImpl::SetCustomOpInput(const CusOperatorPtr &op, int index, cons if (it == cus_input_map_->end()) { return NOT_FOUND; } - std::unordered_map &input_map = it->second; + mindspore::HashMap &input_map = it->second; if ((input_map.find(index) != input_map.end())) { MS_LOG(DEBUG) << "Link op " << input->GetName() << " to " << op->GetName() << ":" << input_map[index]; @@ -164,7 +164,7 @@ Status OpAdapterImpl::SetCustomOpInput(const CusOperatorPtr &op, int index, cons return NOT_FOUND; } - std::unordered_map &input_map = it->second; + mindspore::HashMap &input_map = it->second; if ((handle.op != nullptr) && (input_map.find(index) != input_map.end())) { if (handle.out.empty()) { MS_LOG(DEBUG) << "Link op " << handle.op->GetName() << " to " << op->GetName() << ":" << input_map[index]; @@ -249,7 +249,7 @@ OutHandler OpAdapterImpl::getCustomOutput(const OperatorPtr &op, int index) { return OutHandler(); } - std::unordered_map &output_map = it->second; + mindspore::HashMap &output_map = it->second; if ((output_map.find(index) != output_map.end())) { return OutHandler(op, output_map[index]); @@ -293,7 +293,7 @@ Status OpAdapterImpl::UpdateSingleOutputDesc(const OperatorPtr &op, const abstra } auto cus_op = std::dynamic_pointer_cast(op); MS_EXCEPTION_IF_NULL(cus_op); - std::unordered_map output_map = (*cus_output_map_)[op->GetOpType()]; + mindspore::HashMap output_map = (*cus_output_map_)[op->GetOpType()]; (void)cus_op->UpdateOutputDesc(output_map[0], *desc); } else { if (output_map_.empty()) { @@ -435,7 +435,7 @@ void OpAdapterImpl::UpdateCustomOpInputDesc(const CusOperatorPtr &op, const AnfN return; } - std::unordered_map &input_map = (*cus_input_map_)[op->GetOpType()]; + mindspore::HashMap &input_map = (*cus_input_map_)[op->GetOpType()]; auto inputs = node->cast()->inputs(); for (size_t i = 1; i < inputs.size(); ++i) { if (input_map.find(i) != input_map.end()) { diff --git a/mindspore/ccsrc/transform/graph_ir/op_adapter.h b/mindspore/ccsrc/transform/graph_ir/op_adapter.h index d54cd4ed743..e59798f4c3e 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_adapter.h +++ b/mindspore/ccsrc/transform/graph_ir/op_adapter.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,26 +20,26 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_adapter_util.h" #include "utils/utils.h" namespace mindspore { namespace transform { class OpAdapterImpl { public: - OpAdapterImpl(const std::unordered_map &input_map, - const std::unordered_map &dyn_input_map, - const std::unordered_map &output_map, - const std::unordered_map &dyn_output_map, - const std::unordered_map &dyn_subgraph_map, - const std::unordered_map &attr_map, - const std::unordered_map &enum_map, - const std::unordered_map &input_attr_map, - std::unordered_map> *cus_input_map, - std::unordered_map> *cus_output_map, - std::unordered_map *extra_attr, - std::unordered_map *name_counts, BaseOpAdapter *adpt) + OpAdapterImpl(const mindspore::HashMap &input_map, + const mindspore::HashMap &dyn_input_map, + const mindspore::HashMap &output_map, + const mindspore::HashMap &dyn_output_map, + const mindspore::HashMap &dyn_subgraph_map, + const mindspore::HashMap &attr_map, + const mindspore::HashMap &enum_map, + const mindspore::HashMap &input_attr_map, + mindspore::HashMap> *cus_input_map, + mindspore::HashMap> *cus_output_map, + mindspore::HashMap *extra_attr, + mindspore::HashMap *name_counts, BaseOpAdapter *adpt) : input_map_(input_map), dyn_input_map_(dyn_input_map), output_map_(output_map), @@ -95,18 +95,18 @@ class OpAdapterImpl { int setAttr(const OperatorPtr &op, const AnfNodePtr &node); private: - const std::unordered_map &input_map_; - const std::unordered_map &dyn_input_map_; - const std::unordered_map &output_map_; - const std::unordered_map &dyn_output_map_; - const std::unordered_map &dyn_subgraph_map_; - const std::unordered_map &attr_map_; - const std::unordered_map &enum_map_; - const std::unordered_map &input_attr_map_; - std::unordered_map> *const cus_input_map_; - std::unordered_map> *const cus_output_map_; - std::unordered_map *const extra_attr_; - std::unordered_map *const name_counts_; + const mindspore::HashMap &input_map_; + const mindspore::HashMap &dyn_input_map_; + const mindspore::HashMap &output_map_; + const mindspore::HashMap &dyn_output_map_; + const mindspore::HashMap &dyn_subgraph_map_; + const mindspore::HashMap &attr_map_; + const mindspore::HashMap &enum_map_; + const mindspore::HashMap &input_attr_map_; + mindspore::HashMap> *const cus_input_map_; + mindspore::HashMap> *const cus_output_map_; + mindspore::HashMap *const extra_attr_; + mindspore::HashMap *const name_counts_; BaseOpAdapter *const adpt_; }; @@ -183,11 +183,11 @@ class OpAdapter : public BaseOpAdapter { OperatorPtr generate(const std::string &op_name) override { return std::make_shared(op_name); } - const std::unordered_map &getInputMap() override { return input_map_; } - const std::unordered_map &getInputAttrMap() override { return input_attr_map_; } - const std::unordered_map &getDynInputMap() override { return dyn_input_map_; } - const std::unordered_map &getOutputMap() override { return output_map_; } - const std::unordered_map &getDynSubgraphMap() override { return dyn_subgraph_map_; } + const mindspore::HashMap &getInputMap() override { return input_map_; } + const mindspore::HashMap &getInputAttrMap() override { return input_attr_map_; } + const mindspore::HashMap &getDynInputMap() override { return dyn_input_map_; } + const mindspore::HashMap &getOutputMap() override { return output_map_; } + const mindspore::HashMap &getDynSubgraphMap() override { return dyn_subgraph_map_; } Status SetOpSubgraphFunc(const OperatorPtr &op, int index, std::shared_ptr> branches) { return impl_->SetOpSubgraphFunc(op, index, branches); @@ -279,7 +279,7 @@ class OpAdapter : public BaseOpAdapter { int setAttr(const OperatorPtr &op, const AnfNodePtr &node) override { return impl_->setAttr(op, node); } - std::unordered_map GetExtraAttr() override { return extra_attr_; } + mindspore::HashMap GetExtraAttr() override { return extra_attr_; } private: template @@ -423,42 +423,42 @@ class OpAdapter : public BaseOpAdapter { return ConvertAnyUtil(value, anyTraitsValue); } - static const std::unordered_map input_map_; - static const std::unordered_map dyn_input_map_; - static const std::unordered_map output_map_; - static const std::unordered_map dyn_output_map_; - static const std::unordered_map dyn_subgraph_map_; - static const std::unordered_map attr_map_; - static const std::unordered_map enum_map_; + static const mindspore::HashMap input_map_; + static const mindspore::HashMap dyn_input_map_; + static const mindspore::HashMap output_map_; + static const mindspore::HashMap dyn_output_map_; + static const mindspore::HashMap dyn_subgraph_map_; + static const mindspore::HashMap attr_map_; + static const mindspore::HashMap enum_map_; // convert input from anf graph to Attr in Operators - static const std::unordered_map input_attr_map_; - static std::unordered_map> cus_input_map_; - static std::unordered_map> cus_output_map_; - std::unordered_map extra_attr_; - std::unordered_map name_counts_; + static const mindspore::HashMap input_attr_map_; + static mindspore::HashMap> cus_input_map_; + static mindspore::HashMap> cus_output_map_; + mindspore::HashMap extra_attr_; + mindspore::HashMap name_counts_; const std::shared_ptr impl_; }; template -const std::unordered_map OpAdapter::input_map_; +const mindspore::HashMap OpAdapter::input_map_; template -const std::unordered_map OpAdapter::dyn_input_map_; +const mindspore::HashMap OpAdapter::dyn_input_map_; template -const std::unordered_map OpAdapter::output_map_; +const mindspore::HashMap OpAdapter::output_map_; template -const std::unordered_map OpAdapter::dyn_output_map_; +const mindspore::HashMap OpAdapter::dyn_output_map_; template -const std::unordered_map OpAdapter::dyn_subgraph_map_; +const mindspore::HashMap OpAdapter::dyn_subgraph_map_; template -const std::unordered_map OpAdapter::attr_map_; +const mindspore::HashMap OpAdapter::attr_map_; template -const std::unordered_map OpAdapter::enum_map_; +const mindspore::HashMap OpAdapter::enum_map_; template -const std::unordered_map OpAdapter::input_attr_map_; +const mindspore::HashMap OpAdapter::input_attr_map_; template -std::unordered_map> OpAdapter::cus_input_map_; +mindspore::HashMap> OpAdapter::cus_input_map_; template -std::unordered_map> OpAdapter::cus_output_map_; +mindspore::HashMap> OpAdapter::cus_output_map_; // specialization for method } // namespace transform diff --git a/mindspore/ccsrc/transform/graph_ir/op_adapter_base.h b/mindspore/ccsrc/transform/graph_ir/op_adapter_base.h index 947c81941fe..98755d4e11c 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_adapter_base.h +++ b/mindspore/ccsrc/transform/graph_ir/op_adapter_base.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,13 +17,13 @@ #ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_ADAPTER_BASE_H_ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_ADAPTER_BASE_H_ -#include #include #include #include #include #include +#include "utils/hash_map.h" #include "transform/graph_ir/util.h" #include "ir/anf.h" #include "ir/primitive.h" @@ -139,7 +139,7 @@ class BaseOpAdapter { virtual int setAttr(const OperatorPtr &op, const std::string &attrKey, const ValuePtr &attrValue) = 0; virtual int setAttr(const OperatorPtr &op, const PrimitivePtr &prim) = 0; virtual int setAttr(const OperatorPtr &op, const AnfNodePtr &node) = 0; - virtual std::unordered_map GetExtraAttr() = 0; + virtual mindspore::HashMap GetExtraAttr() = 0; template ::value>::type> int setAttr(const OperatorPtr &op, const std::string &attrKey, const std::shared_ptr &attrValue) { return setAttr(op, attrKey, MakeValue(attrValue)); @@ -151,11 +151,11 @@ class BaseOpAdapter { virtual OutHandler getOutput(const OperatorPtr &op, int index) = 0; virtual void updateOutputDesc(const OperatorPtr &op, const abstract::BaseShapePtr &shp, const TypePtr &type, const AnfNodePtr &node) = 0; - virtual const std::unordered_map &getInputMap() = 0; - virtual const std::unordered_map &getInputAttrMap() = 0; - virtual const std::unordered_map &getDynInputMap() = 0; - virtual const std::unordered_map &getOutputMap() = 0; - virtual const std::unordered_map &getDynSubgraphMap() = 0; + virtual const mindspore::HashMap &getInputMap() = 0; + virtual const mindspore::HashMap &getInputAttrMap() = 0; + virtual const mindspore::HashMap &getDynInputMap() = 0; + virtual const mindspore::HashMap &getOutputMap() = 0; + virtual const mindspore::HashMap &getDynSubgraphMap() = 0; void AddAttrToDrawGraph(const std::string &attr_str) { attrs_vec_.push_back(attr_str); } const std::vector &GetAttrsFromDrawGraph() const { return attrs_vec_; } void clearAttrVect() { attrs_vec_.clear(); } @@ -193,7 +193,7 @@ struct AnyTraits { using type = int64_t; }; -using ExtraAttr = std::unordered_map; +using ExtraAttr = mindspore::HashMap; } // namespace transform } // namespace mindspore #endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_ADAPTER_BASE_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_adapter_map.cc b/mindspore/ccsrc/transform/graph_ir/op_adapter_map.cc index d13c148f342..9b9cf599200 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_adapter_map.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_adapter_map.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,13 +20,13 @@ namespace mindspore { namespace transform { template <> -std::unordered_map> OpAdapter::cus_input_map_{}; +mindspore::HashMap> OpAdapter::cus_input_map_{}; template <> -std::unordered_map> OpAdapter::cus_output_map_{}; +mindspore::HashMap> OpAdapter::cus_output_map_{}; -std::unordered_map OpAdapterMap::adpt_map_ = { +mindspore::HashMap OpAdapterMap::adpt_map_ = { {kNameCustomOp, std::make_shared(std::make_shared>())}}; -std::unordered_map &OpAdapterMap::get() { return adpt_map_; } +mindspore::HashMap &OpAdapterMap::get() { return adpt_map_; } } // namespace transform } // namespace mindspore diff --git a/mindspore/ccsrc/transform/graph_ir/op_adapter_map.h b/mindspore/ccsrc/transform/graph_ir/op_adapter_map.h index 1d2428fc2b6..89879737f9d 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_adapter_map.h +++ b/mindspore/ccsrc/transform/graph_ir/op_adapter_map.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_ADAPTER_MAP_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_adapter_desc.h" namespace mindspore { @@ -343,10 +343,10 @@ constexpr const char kNameFillV1[] = "FillV1"; class OpAdapterMap { public: - static std::unordered_map &get(); + static mindspore::HashMap &get(); private: - static std::unordered_map adpt_map_; + static mindspore::HashMap adpt_map_; }; } // namespace transform } // namespace mindspore diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.h index 4251f8c7447..487265ce6e2 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/array_ops_declare.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_ARRAY_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/array_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/control_flow_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/control_flow_ops_declare.h index bbbe81d96cb..f42e50e1980 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/control_flow_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/control_flow_ops_declare.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_CONTROL_FLOW_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/control_flow_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/ctc_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/ctc_ops_declare.h index 074b19592a3..46adf3517da 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/ctc_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/ctc_ops_declare.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_CTC_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/ctc_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.h index 1b3e38756f7..f8b22c4b193 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_ELEWISE_CALCULATION_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/elewise_calculation_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.h index 025b0d0c60b..213288b0da0 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/functional_ops_declare.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_FUNCTIONAL_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/functional_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/hcom_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/hcom_ops_declare.h index 635b7b1e22f..38829a679fd 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/hcom_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/hcom_ops_declare.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_HCOM_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/hcom_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.h index 985796b2b31..9bb3148d100 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/image_ops_declare.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_IMAGE_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/image_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/logging_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/logging_ops_declare.h index 3110e96cc25..75d7de1d6f6 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/logging_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/logging_ops_declare.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_LOGGING_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/logging_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/math_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/math_ops_declare.h index cb5239b053b..d40b5484934 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/math_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/math_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_MATH_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/math_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.h index 7c930d42544..9a9f7723e3e 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_MATRIX_CALCULATION_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/matrix_calculation_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_batch_norm_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_batch_norm_ops_declare.h index f1bf33cfdc8..db71da36664 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_batch_norm_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_batch_norm_ops_declare.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_BATCH_NORM_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/nn_batch_norm_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_calculation_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_calculation_ops_declare.h index 867e30290e7..299d8f0ee3c 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_calculation_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_calculation_ops_declare.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_CALCULATION_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/nn_calculation_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_detect_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_detect_ops_declare.h index f7c4af37e42..4ca6d89005e 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_detect_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_detect_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_DETECT_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/nn_detect_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_norm_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_norm_ops_declare.h index 6fe446506b7..23bea22e03e 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_norm_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_norm_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_IMAGE_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/nn_norm_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_pooling_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_pooling_ops_declare.h index abfe614e6b6..1b7b55ee1b1 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_pooling_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_pooling_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_POOLING_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/nn_ops.h" #include "ops/nn_pooling_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_training_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_training_ops_declare.h index 4451d76aa91..932cdb79fc5 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/nn_training_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nn_training_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NN_TRAINING_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/nn_training_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/nonlinear_fuc_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/nonlinear_fuc_ops_declare.h index 54f391cad4f..2aa41b6958d 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/nonlinear_fuc_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/nonlinear_fuc_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NONLINEAR_FUC_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "ops/nonlinear_fuc_ops.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/npu_loss_scale_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/npu_loss_scale_ops_declare.h index 7bf5986c5cc..39734bb9f8d 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/npu_loss_scale_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/npu_loss_scale_ops_declare.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_NPU_LOSS_SCALE_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/npu_loss_scale_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/op_declare_macro.h b/mindspore/ccsrc/transform/graph_ir/op_declare/op_declare_macro.h index f43ef550fb7..8e205da8e57 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/op_declare_macro.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/op_declare_macro.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,8 +18,8 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_MACRO_H_ #include -#include #include +#include "utils/hash_map.h" #include "transform/graph_ir/op_adapter.h" #include "transform/graph_ir/op_adapter_map.h" #include "mindspore/core/base/core_ops.h" @@ -28,38 +28,38 @@ namespace mindspore::transform { #define DECLARE_OP_ADAPTER(T) \ using T = ge::op::T; \ template <> \ - const std::unordered_map OpAdapter::input_map_; \ + const mindspore::HashMap OpAdapter::input_map_; \ template <> \ - const std::unordered_map OpAdapter::attr_map_; + const mindspore::HashMap OpAdapter::attr_map_; #define DECLARE_OP_USE_OUTPUT(T) \ template <> \ - const std::unordered_map OpAdapter::output_map_; + const mindspore::HashMap OpAdapter::output_map_; #define DECLARE_OP_USE_ENUM(T) \ template <> \ - const std::unordered_map OpAdapter::enum_map_{}; + const mindspore::HashMap OpAdapter::enum_map_{}; #define DECLARE_OP_USE_INPUT_ATTR(T) \ template <> \ - const std::unordered_map OpAdapter::input_attr_map_; + const mindspore::HashMap OpAdapter::input_attr_map_; #define DECLARE_OP_USE_DYN_INPUT(T) \ template <> \ - const std::unordered_map OpAdapter::dyn_input_map_; + const mindspore::HashMap OpAdapter::dyn_input_map_; #define DECLARE_OP_USE_DYN_SUBGRAPH(T) \ template <> \ - const std::unordered_map OpAdapter::dyn_subgraph_map_; + const mindspore::HashMap OpAdapter::dyn_subgraph_map_; #define DECLARE_OP_USE_DYN_OUTPUT(T) \ template <> \ - const std::unordered_map OpAdapter::dyn_output_map_; + const mindspore::HashMap OpAdapter::dyn_output_map_; #define INPUT_MAP(T) \ template <> \ - const std::unordered_map OpAdapter::input_map_ -#define EMPTY_INPUT_MAP std::unordered_map() + const mindspore::HashMap OpAdapter::input_map_ +#define EMPTY_INPUT_MAP mindspore::HashMap() #define INPUT_DESC(name) \ { \ #name, \ @@ -79,7 +79,7 @@ namespace mindspore::transform { #define DYN_INPUT_MAP(T) \ template <> \ - const std::unordered_map OpAdapter::dyn_input_map_ + const mindspore::HashMap OpAdapter::dyn_input_map_ #define DYN_INPUT_DESC(name) \ { \ #name, \ @@ -99,7 +99,7 @@ namespace mindspore::transform { #define DYN_SUBGRAPH_MAP(T) \ template <> \ - const std::unordered_map OpAdapter::dyn_subgraph_map_ + const mindspore::HashMap OpAdapter::dyn_subgraph_map_ #define DYN_SUBGRAPH_DESC(name) \ { \ #name, \ @@ -115,8 +115,8 @@ namespace mindspore::transform { #define ATTR_MAP(T) \ template <> \ - const std::unordered_map OpAdapter::attr_map_ -#define EMPTY_ATTR_MAP std::unordered_map() + const mindspore::HashMap OpAdapter::attr_map_ +#define EMPTY_ATTR_MAP mindspore::HashMap() #define ATTR_DESC(name, ...) \ { \ #name, \ @@ -128,11 +128,11 @@ namespace mindspore::transform { #define INPUT_ATTR_MAP(T) \ template <> \ - const std::unordered_map OpAdapter::input_attr_map_ + const mindspore::HashMap OpAdapter::input_attr_map_ #define OUTPUT_MAP(T) \ template <> \ - const std::unordered_map OpAdapter::output_map_ + const mindspore::HashMap OpAdapter::output_map_ #define OUTPUT_DESC(name) \ { \ #name, \ @@ -144,7 +144,7 @@ namespace mindspore::transform { #define DYN_OUTPUT_MAP(T) \ template <> \ - const std::unordered_map OpAdapter::dyn_output_map_ + const mindspore::HashMap OpAdapter::dyn_output_map_ #define DYN_OUTPUT_DESC(name) \ { \ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/pad_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/pad_ops_declare.h index 70bacd01669..2768c3b5b3b 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/pad_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/pad_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_PAD_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/pad_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/quantize_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/quantize_ops_declare.h index 60cb9a84c86..c1ec358971e 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/quantize_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/quantize_ops_declare.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_QUANTIZE_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/quantize_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/random_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/random_ops_declare.h index 29232a46ed9..dc654c8489f 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/random_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/random_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_RANDOM_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/random_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.h index da609c24ba7..f09e77b33bf 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/reduce_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_REDUCE_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/reduce_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.h index 525725ff54d..125bb50a3bd 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/rnn_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_RNN_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "ops/rnn.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/rpn_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/rpn_ops_declare.h index 97928a4cad4..b0916368722 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/rpn_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/rpn_ops_declare.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_RPN_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/rpn_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.h index 4a94bfe692b..e4c898a4dde 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/selection_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_SELECTION_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/selection_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/split_combination_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/split_combination_ops_declare.h index 94d637cb111..9b45c2d080d 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/split_combination_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/split_combination_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_SPLIT_COMBINATION_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/split_combination_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/state_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/state_ops_declare.h index 6c7c0c639f0..33d169a74ec 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/state_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/state_ops_declare.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_STATE_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/state_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.h b/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.h index df4edd464c7..453a908b8c4 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.h +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/transformation_ops_declare.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_TRANSFORMATION_OPS_DECLARE_H_ #include -#include +#include "utils/hash_map.h" #include "transform/graph_ir/op_declare/op_declare_macro.h" #include "ops/transformation_ops.h" diff --git a/mindspore/ccsrc/transform/graph_ir/types.h b/mindspore/ccsrc/transform/graph_ir/types.h index 0b76393231e..b91c0899c94 100644 --- a/mindspore/ccsrc/transform/graph_ir/types.h +++ b/mindspore/ccsrc/transform/graph_ir/types.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ir/anf.h" #include "ir/dtype.h" #include "ir/tensor.h" @@ -52,7 +52,7 @@ using Operator = ge::Operator; using OperatorPtr = std::shared_ptr; using DfGraph = ge::Graph; using DfGraphPtr = std::shared_ptr; -using TensorMap = std::unordered_map>; +using TensorMap = mindspore::HashMap>; } // namespace transform } // namespace mindspore diff --git a/mindspore/ccsrc/utils/convert_utils.cc b/mindspore/ccsrc/utils/convert_utils.cc index cfac0ad9ffa..1a0490bfffc 100644 --- a/mindspore/ccsrc/utils/convert_utils.cc +++ b/mindspore/ccsrc/utils/convert_utils.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -166,7 +166,7 @@ bool SameNode(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMap bool SameSubgraph(const AnfNodePtr &root1, const AnfNodePtr &root2, FuncGraphPairMapEquiv *equiv_func_graph, NodeMapEquiv *const equiv_node) { - std::unordered_set done; + mindspore::HashSet done; std::stack> todo; todo.push(std::make_pair(root1, root2)); diff --git a/mindspore/ccsrc/utils/convert_utils.h b/mindspore/ccsrc/utils/convert_utils.h index 28679487a9e..f953bc281f4 100644 --- a/mindspore/ccsrc/utils/convert_utils.h +++ b/mindspore/ccsrc/utils/convert_utils.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,9 +23,9 @@ #include #include #include -#include -#include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "utils/convert_utils_base.h" #include "utils/any.h" #include "base/base_ref.h" @@ -56,8 +56,8 @@ struct PairHasher { enum EquivState { kNotEquiv = 0, kEquiv = 1, kPending = 2 }; -using FuncGraphPairMapEquiv = std::unordered_map, EquivState, PairHasher>; -using NodeMapEquiv = std::unordered_map; +using FuncGraphPairMapEquiv = mindspore::HashMap, EquivState, PairHasher>; +using NodeMapEquiv = mindspore::HashMap; bool Isomorphic(const FuncGraphPtr &g1, const FuncGraphPtr &g2, FuncGraphPairMapEquiv *equiv_func_graph, NodeMapEquiv *equiv_node); @@ -83,14 +83,14 @@ size_t CountValueNum(const ValueTuplePtr &value_tuple); // sparse_attr_map converts CNode{kPrimSparseGetAttr, SparseTensor} // to CNode{kPrimTupleGetItem, SparseTensor, int64_t(index)}, used // in backend common optimization pass: sparse_process.cc -const std::unordered_map sparse_attr_map = {{prim::kPrimCSRTensorGetIndptr->name(), 0}, +const mindspore::HashMap sparse_attr_map = {{prim::kPrimCSRTensorGetIndptr->name(), 0}, {prim::kPrimCSRTensorGetIndices->name(), 1}, {prim::kPrimCSRTensorGetValues->name(), 2}, {prim::kPrimCSRTensorGetDenseShape->name(), 3}}; // make_sparse_set records all make_sparse primitives, and tries to replace // make_sparse to make_tuple, used in backend common optimization pass: // sparse_process.cc -const std::unordered_set make_sparse_set = { +const mindspore::HashSet make_sparse_set = { {prim::kPrimMakeCSRTensor->name()}, {prim::kPrimMakeSparseTensor->name()}, {prim::kPrimMakeRowTensor->name()}}; } // namespace mindspore diff --git a/mindspore/ccsrc/vm/backend.h b/mindspore/ccsrc/vm/backend.h index d4d7042c803..125947011a4 100644 --- a/mindspore/ccsrc/vm/backend.h +++ b/mindspore/ccsrc/vm/backend.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,11 +19,11 @@ #include #include #include -#include #include #include #include +#include "utils/hash_map.h" #include "utils/contract.h" #include "ir/anf.h" #include "vm/segment_runner.h" @@ -97,7 +97,7 @@ class MsBackend : public Backend { session::SessionPtr other_sess_; std::string target_device_; std::string other_device_; - std::unordered_map graph_id_map_; + mindspore::HashMap graph_id_map_; }; class MindRTBackend : public Backend { @@ -181,7 +181,7 @@ class MindRTBackend : public Backend { std::map graph_info_to_device_context_; std::vector control_nodes_; - std::unordered_map> actor_to_graph_compiler_info_; + mindspore::HashMap> actor_to_graph_compiler_info_; // Cache output tensor ref count of kernels for back propagation graph in PyNative mode. std::map> cnode_ref_counts_; diff --git a/mindspore/ccsrc/vm/segment_runner.cc b/mindspore/ccsrc/vm/segment_runner.cc index fbe11c7e747..19431b3d8bf 100644 --- a/mindspore/ccsrc/vm/segment_runner.cc +++ b/mindspore/ccsrc/vm/segment_runner.cc @@ -22,12 +22,12 @@ #include #include #include -#include #include -#include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "utils/log_adapter.h" #include "utils/utils.h" #include "ir/manager.h" @@ -43,7 +43,7 @@ namespace { // users: dict mapping each node to its users (globally) // seen: set of nodes that are part of the segment AnfNodePtrList GetOutput(const AnfNodePtrList &nodes, const NodeUsersMap &users, - const std::unordered_set &seen) { + const mindspore::HashSet &seen) { AnfNodePtrList output; if (users.size() == 0) { return output; @@ -138,9 +138,10 @@ std::tuple TransformSegmentToAnfGr eqv[n]->set_abstract(n->abstract()); eqv[n]->set_kernel_info(n->kernel_info_ptr()); } - std::unordered_set eqv_keys; - (void)std::transform(std::begin(eqv), std::end(eqv), std::inserter(eqv_keys, eqv_keys.end()), - [](const std::pair &elem) -> AnfNodePtr { return elem.first; }); + mindspore::HashSet eqv_keys; + for (auto &e : eqv) { + eqv_keys.emplace(e.first); + } auto mgr = lst[0]->func_graph()->manager(); MS_EXCEPTION_IF_NULL(mgr); auto outputs = GetOutput(lst, mgr->node_users(), eqv_keys); diff --git a/mindspore/ccsrc/vm/segment_runner.h b/mindspore/ccsrc/vm/segment_runner.h index 841d3ce4e9b..1407c81412a 100644 --- a/mindspore/ccsrc/vm/segment_runner.h +++ b/mindspore/ccsrc/vm/segment_runner.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,11 +20,11 @@ #define MINDSPORE_CCSRC_VM_SEGMENT_RUNNER_H_ #include -#include #include #include #include +#include "utils/hash_map.h" #include "ir/anf.h" #include "vm/vmimpl.h" #include "vm/graph_partition.h" diff --git a/mindspore/ccsrc/vm/transform.h b/mindspore/ccsrc/vm/transform.h index 4562331d105..cc456c32363 100644 --- a/mindspore/ccsrc/vm/transform.h +++ b/mindspore/ccsrc/vm/transform.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,9 +23,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "vm/vm.h" #include "ir/anf.h" #include "frontend/operator/ops.h" @@ -103,7 +103,7 @@ class CompileGraph { int64_t height_{0}; int64_t max_height_{0}; - std::unordered_map slots_; + mindspore::HashMap slots_; InstSet inst_; }; @@ -127,7 +127,7 @@ class CompileGraphs { protected: InstSet insts_; - std::unordered_map mapping_; + mindspore::HashMap mapping_; CompileGraphPtr transform_; BackendPtr backend_; }; diff --git a/mindspore/ccsrc/vm/vm.h b/mindspore/ccsrc/vm/vm.h index 067b59f0bf4..ea5bcb01d8b 100644 --- a/mindspore/ccsrc/vm/vm.h +++ b/mindspore/ccsrc/vm/vm.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,8 +27,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "pybind11/pybind11.h" #include "ir/anf.h" diff --git a/mindspore/ccsrc/vm/vmimpl.h b/mindspore/ccsrc/vm/vmimpl.h index f0b746a6652..0505c118b64 100644 --- a/mindspore/ccsrc/vm/vmimpl.h +++ b/mindspore/ccsrc/vm/vmimpl.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,10 @@ #define MINDSPORE_CCSRC_VM_VMIMPL_H_ #include -#include #include #include +#include "utils/hash_map.h" #include "ir/anf.h" #include "ir/manager.h" #include "ir/tensor.h" @@ -33,10 +33,10 @@ namespace mindspore { namespace compile { using AnfNodePtrList = std::vector; -using AnfNodePtrToBaseRefMap = std::unordered_map; -using AnfNodePtrToAnfNodePtrMap = std::unordered_map; +using AnfNodePtrToBaseRefMap = mindspore::HashMap; +using AnfNodePtrToAnfNodePtrMap = mindspore::HashMap; -using FuncGraphPtrToBaseRefMap = std::unordered_map; +using FuncGraphPtrToBaseRefMap = mindspore::HashMap; using TensorList = std::vector; diff --git a/mindspore/core/abstract/abstract_value.cc b/mindspore/core/abstract/abstract_value.cc index 70055af2b70..5f082771599 100644 --- a/mindspore/core/abstract/abstract_value.cc +++ b/mindspore/core/abstract/abstract_value.cc @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ #include #include +#include "utils/hash_map.h" #include "utils/symbolic.h" #include "abstract/utils.h" #include "utils/ms_context.h" @@ -917,7 +918,7 @@ ValuePtr AbstractClass::RealBuildValue() const { auto type = BuildType(); MS_EXCEPTION_IF_NULL(type); auto cls = type->cast(); - std::unordered_map attributes_value_map; + mindspore::HashMap attributes_value_map; for (const auto &attr : attributes_) { MS_EXCEPTION_IF_NULL(attr.second); ValuePtr value = attr.second->BuildValue(); diff --git a/mindspore/core/abstract/abstract_value.h b/mindspore/core/abstract/abstract_value.h index 7c19c87a72f..36321e02541 100644 --- a/mindspore/core/abstract/abstract_value.h +++ b/mindspore/core/abstract/abstract_value.h @@ -22,13 +22,13 @@ #include #include #include -#include #include #include "utils/log_adapter.h" #include "utils/hashing.h" #include "utils/any.h" #include "utils/flags.h" +#include "utils/hash_map.h" #include "base/base.h" #include "ir/dtype.h" #include "ir/value.h" @@ -864,7 +864,7 @@ class MS_CORE_API AbstractClass final : public AbstractBase { /// \param[in] attributes The abstracts of the attributes of the class. /// \param[in] methods The methods of the class. AbstractClass(const Named &tag, const std::vector &attributes, - const std::unordered_map &methods) + const mindspore::HashMap &methods) : attributes_(attributes), tag_(tag), methods_(methods) {} /// \brief Destructor of AbstractClass. @@ -890,7 +890,7 @@ class MS_CORE_API AbstractClass final : public AbstractBase { /// \brief get the methods of the class. /// /// \return A map of the method names and methods. - std::unordered_map methods() { return methods_; } + mindspore::HashMap methods() { return methods_; } /// \brief Get a attribute by name. /// @@ -923,7 +923,7 @@ class MS_CORE_API AbstractClass final : public AbstractBase { private: std::vector attributes_; Named tag_; - std::unordered_map methods_; + mindspore::HashMap methods_; }; using AbstractClassPtr = std::shared_ptr; diff --git a/mindspore/core/abstract/analysis_context.h b/mindspore/core/abstract/analysis_context.h index 926697b5759..45e03fd03b6 100644 --- a/mindspore/core/abstract/analysis_context.h +++ b/mindspore/core/abstract/analysis_context.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,9 +21,10 @@ #include #include -#include #include +#include +#include "utils/hash_map.h" #include "abstract/abstract_value.h" #include "ir/meta_func_graph.h" @@ -69,10 +70,10 @@ class AnalysisContext { AbstractBasePtrList args_spec_list_; // Record all created context for each func graph. // `extant_context_cache_` is copied from its parent context. - std::unordered_map extant_context_cache_; + mindspore::HashMap extant_context_cache_; // Record all created child contexts from this context. // Like: key: [func_graph & arguments], value: [child_context] - std::unordered_map children_cache_; + mindspore::HashMap children_cache_; // There may may be shared_ptr loop like: // FuncGraphAbstactClosur->AnalysisContext->children_cache_->ArgsSpec->FuncGraphAbstactClosur. diff --git a/mindspore/core/abstract/dshape.h b/mindspore/core/abstract/dshape.h index e4d72950825..6500b587112 100644 --- a/mindspore/core/abstract/dshape.h +++ b/mindspore/core/abstract/dshape.h @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/mindspore/core/abstract/primitive_infer_map.cc b/mindspore/core/abstract/primitive_infer_map.cc index fbd7bc17501..804271cf1a5 100644 --- a/mindspore/core/abstract/primitive_infer_map.cc +++ b/mindspore/core/abstract/primitive_infer_map.cc @@ -17,7 +17,6 @@ */ #include "abstract/primitive_infer_map.h" -#include #include #include #include "ops/exp.h" @@ -41,64 +40,67 @@ namespace mindspore { namespace abstract { std::vector GetDependsFormMap(const CNodePtr &cnode) { - const auto kOneHot = prim::kPrimOneHot->name(); - const auto kDropoutGenMask = prim::kPrimDropoutGenMask->name(); - const auto kTranspose = prim::kPrimTranspose->name(); - const auto kStridedSlice = prim::kPrimStridedSlice->name(); - const auto kStridedSliceGrad = prim::kPrimStridedSliceGrad->name(); - const auto kReduceSum = prim::kPrimReduceSum->name(); - const auto kDynamicBroadcastTo = prim::kPrimDynamicBroadcastTo->name(); - const auto kUnsortedSegmentSum = prim::kPrimUnsortedSegmentSum->name(); - const auto kUnsortedSegmentMin = prim::kPrimUnsortedSegmentMin->name(); - const auto kUnsortedSegmentMax = prim::kPrimUnsortedSegmentMax->name(); - const auto kGather = prim::kPrimGather->name(); - const auto kGatherV2 = prim::kPrimGatherV2->name(); - const auto kDynamicShape = prim::kPrimDynamicShape->name(); - const auto kRange = prim::kPrimRange->name(); - - const auto kConv2DBackpropFilter = prim::kPrimConv2DBackpropFilter->name(); - const auto kConv2DBackpropInput = prim::kPrimConv2DBackpropInput->name(); - const auto kTile = prim::kPrimTile->name(); - const auto kSlice = prim::kPrimSlice->name(); - const auto kSliceGrad = prim::kPrimSliceGrad->name(); - const auto kReshape = prim::kPrimReshape->name(); - const auto kDynamicReshape = prim::kPrimDynamicReshape->name(); - // common dynamic shape depends - static std::map> dynamic_shape_depends = {{kUnsortedSegmentSum, {2}}, - {kUnsortedSegmentMin, {2}}, - {kUnsortedSegmentMax, {2}}, - {kGather, {2}}, - {kGatherV2, {2}}, - {kDynamicShape, {0}}, - {kRange, {0, 1, 2}}, - {kConv2DBackpropFilter, {2}}, - {kConv2DBackpropInput, {2}}, - {kOneHot, {1, 3}}, - {kDropoutGenMask, {0}}, - {kStridedSlice, {1, 2, 3}}, - {kStridedSliceGrad, {1, 2, 3, 4}}, - {kTile, {1}}, - {kReshape, {1}}, - {kDynamicReshape, {1}}, - {kSlice, {1, 2}}, - {kSliceGrad, {2, 3}}, - {kDynamicBroadcastTo, {1}}}; - - auto ms_context = MsContext::GetInstance(); - MS_EXCEPTION_IF_NULL(ms_context); - auto device = ms_context->get_param(MS_CTX_DEVICE_TARGET); - if (device == kAscendDevice) { - (void)dynamic_shape_depends.insert({kReduceSum, {1}}); - (void)dynamic_shape_depends.insert({kTranspose, {1}}); - } + using ShapeVec = std::vector; + using PrimShapeDependMap = mindspore::HashMap; + static const auto &kOneHot = prim::kPrimOneHot->name(); + static const auto &kDropoutGenMask = prim::kPrimDropoutGenMask->name(); + static const auto &kTranspose = prim::kPrimTranspose->name(); + static const auto &kStridedSlice = prim::kPrimStridedSlice->name(); + static const auto &kStridedSliceGrad = prim::kPrimStridedSliceGrad->name(); + static const auto &kReduceSum = prim::kPrimReduceSum->name(); + static const auto &kDynamicBroadcastTo = prim::kPrimDynamicBroadcastTo->name(); + static const auto &kUnsortedSegmentSum = prim::kPrimUnsortedSegmentSum->name(); + static const auto &kUnsortedSegmentMin = prim::kPrimUnsortedSegmentMin->name(); + static const auto &kUnsortedSegmentMax = prim::kPrimUnsortedSegmentMax->name(); + static const auto &kGather = prim::kPrimGather->name(); + static const auto &kGatherV2 = prim::kPrimGatherV2->name(); + static const auto &kDynamicShape = prim::kPrimDynamicShape->name(); + static const auto &kRange = prim::kPrimRange->name(); + static const auto &kConv2DBackpropFilter = prim::kPrimConv2DBackpropFilter->name(); + static const auto &kConv2DBackpropInput = prim::kPrimConv2DBackpropInput->name(); + static const auto &kTile = prim::kPrimTile->name(); + static const auto &kSlice = prim::kPrimSlice->name(); + static const auto &kSliceGrad = prim::kPrimSliceGrad->name(); + static const auto &kReshape = prim::kPrimReshape->name(); + static const auto &kDynamicReshape = prim::kPrimDynamicReshape->name(); + // Common dynamic shape depends. + static const PrimShapeDependMap dynamic_shape_depends{{kUnsortedSegmentSum, ShapeVec{2}}, + {kUnsortedSegmentMin, ShapeVec{2}}, + {kUnsortedSegmentMax, ShapeVec{2}}, + {kGather, ShapeVec{2}}, + {kGatherV2, ShapeVec{2}}, + {kDynamicShape, ShapeVec{0}}, + {kRange, ShapeVec{0, 1, 2}}, + {kConv2DBackpropFilter, ShapeVec{2}}, + {kConv2DBackpropInput, ShapeVec{2}}, + {kOneHot, ShapeVec{1, 3}}, + {kDropoutGenMask, ShapeVec{0}}, + {kStridedSlice, ShapeVec{1, 2, 3}}, + {kStridedSliceGrad, ShapeVec{1, 2, 3, 4}}, + {kTile, ShapeVec{1}}, + {kReshape, ShapeVec{1}}, + {kDynamicReshape, ShapeVec{1}}, + {kSlice, ShapeVec{1, 2}}, + {kSliceGrad, ShapeVec{2, 3}}, + {kDynamicBroadcastTo, ShapeVec{1}}}; MS_EXCEPTION_IF_NULL(cnode); if (cnode->inputs().empty()) { MS_LOG(EXCEPTION) << "Invalid inputs"; } - auto primitive = GetValueNode(cnode->inputs()[0]); + auto primitive = GetValueNode(cnode->input(0)); MS_EXCEPTION_IF_NULL(primitive); - auto iter = dynamic_shape_depends.find(primitive->ToString()); + auto prim_name = primitive->ToString(); + + auto ms_context = MsContext::GetInstance(); + MS_EXCEPTION_IF_NULL(ms_context); + auto device = ms_context->get_param(MS_CTX_DEVICE_TARGET); + // Special dynamic shape depends for Ascend. + if (device == kAscendDevice && (prim_name == kReduceSum || prim_name == kTranspose)) { + return {1}; + } + + auto iter = dynamic_shape_depends.find(prim_name); if (iter != dynamic_shape_depends.end()) { int64_t cnode_input_size = SizeToLong(cnode->inputs().size()); std::vector res; @@ -111,178 +113,180 @@ std::vector GetDependsFormMap(const CNodePtr &cnode) { } PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() { - static PrimitiveEvalImplMap prim_eval_implement_map = { + using R = PrimitiveEvalImplMap::mapped_type; + static PrimitiveEvalImplMap prim_eval_implement_map{ // Statements - {prim::kPrimReturn, {InferImplReturn, nullptr, true}}, - {prim::kPrimSwitch, {InferImplSwitch, nullptr, true}}, - {prim::kPrimSwitchLayer, {InferImplSwitchLayer, nullptr, true}}, - {prim::kPrimIs_, {InferImplIs_, nullptr, true}}, - {prim::kPrimIsNot, {InferImplIsNot, nullptr, true}}, - {prim::kPrimInDict, {InferImplInDict, nullptr, true}}, - {prim::kPrimNotInDict, {InferImplNotInDict, nullptr, true}}, - {prim::kPrimIsConsant, {InferImplIsConstant, nullptr, true}}, + {prim::kPrimReturn, R{InferImplReturn, nullptr, true}}, + {prim::kPrimSwitch, R{InferImplSwitch, nullptr, true}}, + {prim::kPrimSwitchLayer, R{InferImplSwitchLayer, nullptr, true}}, + {prim::kPrimIs_, R{InferImplIs_, nullptr, true}}, + {prim::kPrimIsNot, R{InferImplIsNot, nullptr, true}}, + {prim::kPrimInDict, R{InferImplInDict, nullptr, true}}, + {prim::kPrimNotInDict, R{InferImplNotInDict, nullptr, true}}, + {prim::kPrimIsConsant, R{InferImplIsConstant, nullptr, true}}, // Maths - {prim::kPrimMatMul, {InferImplMatMul, nullptr, true}}, - {prim::kPrimBatchMatMul, {InferImplBatchMatMul, nullptr, true}}, - {prim::kPrimMaximumGrad, {InferImplMinOrMaxGrad, nullptr, true}}, - {prim::kPrimMinimumGrad, {InferImplMinOrMaxGrad, nullptr, true}}, - {prim::kPrimSqrt, {InferImplSqrt, nullptr, true}}, - {prim::kPrimReal, {InferImplReal, nullptr, true}}, + {prim::kPrimMatMul, R{InferImplMatMul, nullptr, true}}, + {prim::kPrimBatchMatMul, R{InferImplBatchMatMul, nullptr, true}}, + {prim::kPrimMaximumGrad, R{InferImplMinOrMaxGrad, nullptr, true}}, + {prim::kPrimMinimumGrad, R{InferImplMinOrMaxGrad, nullptr, true}}, + {prim::kPrimSqrt, R{InferImplSqrt, nullptr, true}}, + {prim::kPrimReal, R{InferImplReal, nullptr, true}}, // Array - {prim::kPrimRange, {InferImplRange, nullptr, true}}, - {prim::kPrimScalarToArray, {InferImplScalarToArray, nullptr, true}}, - {prim::kPrimArrayToScalar, {InferImplArrayToScalar, nullptr, true}}, - {prim::kPrimBroadcastShape, {InferImplBroadCastShape, nullptr, true}}, - {prim::kPrimUnique, {InferImplUnique, nullptr, true}}, - {prim::kPrimUniqueGrad, {InferImplUniqueGrad, nullptr, true}}, - {prim::kPrimEmbeddingLookup, {InferImplEmbeddingLookup, nullptr, true}}, - {prim::kPrimSparseGatherV2, {InferImplGatherV2, nullptr, true}}, - {prim::kPrimUnsortedSegmentMax, {InferImplUnsortedSegmentMax, nullptr, true}}, - {prim::kPrimUnsortedSegmentMin, {InferImplUnsortedSegmentMin, nullptr, true}}, - {prim::kPrimScatterAdd, {InferImplScatterAdd, nullptr, true}}, - {prim::kPrimScatterSub, {InferImplScatterSub, nullptr, true}}, - {prim::kPrimScatterElements, {InferImplScatterElements, nullptr, true}}, - {prim::kPrimSubAndFilter, {InferImplSubAndFilter, nullptr, true}}, - {prim::kPrimScatterUpdate, {InferImplScatterUpdate, nullptr, true}}, - {prim::kPrimMapCacheIdx, {InferImplMapCacheIdx, nullptr, true}}, - {prim::kPrimDynamicAssign, {InferImplDynamicAssign, nullptr, true}}, - {prim::kPrimCacheSwapTable, {InferImplCacheSwapTable, nullptr, true}}, - {prim::kPrimUpdateCache, {InferImplUpdateCache, nullptr, true}}, - {prim::kPrimComputeAccidentalHits, {InferImplComputeAccidentalHits, nullptr, true}}, - {prim::kPrimDynamicStitch, {InferImplDynamicStitch, nullptr, true}}, - {prim::kPrimPadAndShift, {InferImplPadAndShift, nullptr, true}}, - {prim::kPrimDynamicShape, {InferImplDynamicShape, nullptr, true}}, - {prim::kPrimDynamicReshape, {InferImplReshape, nullptr, true}}, - {prim::kPrimMapUniform, {InferImplMapUniform, nullptr, true}}, - {prim::kPrimSplit, {InferImplSplit, nullptr, true}}, - {prim::kPrimSequenceMask, {InferImplSequenceMask, nullptr, true}}, - {prim::kPrimSort, {InferImplSort, nullptr, true}}, - {prim::kPrimMaskedSelect, {InferImplMaskedSelect, nullptr, true}}, - {prim::kPrimTensorCopySlices, {InferImplTensorCopySlices, nullptr, true}}, - {prim::kPrimNonZero, {InferImplNonZero, nullptr, true}}, + {prim::kPrimRange, R{InferImplRange, nullptr, true}}, + {prim::kPrimScalarToArray, R{InferImplScalarToArray, nullptr, true}}, + {prim::kPrimArrayToScalar, R{InferImplArrayToScalar, nullptr, true}}, + {prim::kPrimBroadcastShape, R{InferImplBroadCastShape, nullptr, true}}, + {prim::kPrimUnique, R{InferImplUnique, nullptr, true}}, + {prim::kPrimUniqueGrad, R{InferImplUniqueGrad, nullptr, true}}, + {prim::kPrimEmbeddingLookup, R{InferImplEmbeddingLookup, nullptr, true}}, + {prim::kPrimSparseGatherV2, R{InferImplGatherV2, nullptr, true}}, + {prim::kPrimUnsortedSegmentMax, R{InferImplUnsortedSegmentMax, nullptr, true}}, + {prim::kPrimUnsortedSegmentMin, R{InferImplUnsortedSegmentMin, nullptr, true}}, + {prim::kPrimScatterAdd, R{InferImplScatterAdd, nullptr, true}}, + {prim::kPrimScatterSub, R{InferImplScatterSub, nullptr, true}}, + {prim::kPrimScatterElements, R{InferImplScatterElements, nullptr, true}}, + {prim::kPrimSubAndFilter, R{InferImplSubAndFilter, nullptr, true}}, + {prim::kPrimScatterUpdate, R{InferImplScatterUpdate, nullptr, true}}, + {prim::kPrimMapCacheIdx, R{InferImplMapCacheIdx, nullptr, true}}, + {prim::kPrimDynamicAssign, R{InferImplDynamicAssign, nullptr, true}}, + {prim::kPrimCacheSwapTable, R{InferImplCacheSwapTable, nullptr, true}}, + {prim::kPrimUpdateCache, R{InferImplUpdateCache, nullptr, true}}, + {prim::kPrimComputeAccidentalHits, R{InferImplComputeAccidentalHits, nullptr, true}}, + {prim::kPrimDynamicStitch, R{InferImplDynamicStitch, nullptr, true}}, + {prim::kPrimPadAndShift, R{InferImplPadAndShift, nullptr, true}}, + {prim::kPrimDynamicShape, R{InferImplDynamicShape, nullptr, true}}, + {prim::kPrimDynamicReshape, R{InferImplReshape, nullptr, true}}, + {prim::kPrimMapUniform, R{InferImplMapUniform, nullptr, true}}, + {prim::kPrimSplit, R{InferImplSplit, nullptr, true}}, + {prim::kPrimSequenceMask, R{InferImplSequenceMask, nullptr, true}}, + {prim::kPrimSort, R{InferImplSort, nullptr, true}}, + {prim::kPrimMaskedSelect, R{InferImplMaskedSelect, nullptr, true}}, + {prim::kPrimTensorCopySlices, R{InferImplTensorCopySlices, nullptr, true}}, + {prim::kPrimNonZero, R{InferImplNonZero, nullptr, true}}, // Structure - {prim::kPrimMakeTuple, {InferImplMakeTuple, nullptr, true}}, - {prim::kPrimMakeList, {InferImplMakeList, nullptr, true}}, - {prim::kPrimMakeDict, {InferImplMakeDict, nullptr, true}}, - {prim::kPrimMakeKeywordArg, {InferImplMakeKwarg, nullptr, true}}, - {prim::kPrimExtractKeywordArg, {InferImplExtractKwarg, nullptr, true}}, - {prim::kPrimTupleGetItem, {InferImplTupleGetItem, nullptr, true}}, - {prim::kPrimListGetItem, {InferImplListGetItem, nullptr, true}}, - {prim::kPrimTupleSetItem, {InferImplTupleSetItem, nullptr, true}}, - {prim::kPrimListSetItem, {InferImplListSetItem, nullptr, true}}, - {prim::kPrimDictGetItem, {InferImplDictGetItem, nullptr, true}}, - {prim::kPrimDictSetItem, {InferImplDictSetItem, nullptr, true}}, - {prim::kPrimDictGetKeys, {InferImplDictGetKeys, nullptr, true}}, - {prim::kPrimDictGetValues, {InferImplDictGetValues, nullptr, true}}, - {prim::kPrimDictItems, {InferImplDictItems, nullptr, true}}, - {prim::kPrimListAppend, {InferImplListAppend, nullptr, true}}, - {prim::kPrimTupleLen, {InferImplTupleLen, nullptr, true}}, - {prim::kPrimListLen, {InferImplListLen, nullptr, true}}, - {prim::kPrimArrayLen, {InferImplArrayLen, nullptr, true}}, + {prim::kPrimMakeTuple, R{InferImplMakeTuple, nullptr, true}}, + {prim::kPrimMakeList, R{InferImplMakeList, nullptr, true}}, + {prim::kPrimMakeDict, R{InferImplMakeDict, nullptr, true}}, + {prim::kPrimMakeKeywordArg, R{InferImplMakeKwarg, nullptr, true}}, + {prim::kPrimExtractKeywordArg, R{InferImplExtractKwarg, nullptr, true}}, + {prim::kPrimTupleGetItem, R{InferImplTupleGetItem, nullptr, true}}, + {prim::kPrimListGetItem, R{InferImplListGetItem, nullptr, true}}, + {prim::kPrimTupleSetItem, R{InferImplTupleSetItem, nullptr, true}}, + {prim::kPrimListSetItem, R{InferImplListSetItem, nullptr, true}}, + {prim::kPrimDictGetItem, R{InferImplDictGetItem, nullptr, true}}, + {prim::kPrimDictSetItem, R{InferImplDictSetItem, nullptr, true}}, + {prim::kPrimDictGetKeys, R{InferImplDictGetKeys, nullptr, true}}, + {prim::kPrimDictGetValues, R{InferImplDictGetValues, nullptr, true}}, + {prim::kPrimDictItems, R{InferImplDictItems, nullptr, true}}, + {prim::kPrimListAppend, R{InferImplListAppend, nullptr, true}}, + {prim::kPrimTupleLen, R{InferImplTupleLen, nullptr, true}}, + {prim::kPrimListLen, R{InferImplListLen, nullptr, true}}, + {prim::kPrimArrayLen, R{InferImplArrayLen, nullptr, true}}, // NN - {prim::kPrimPooling, {InferImplPooling, nullptr, true}}, - {prim::kPrimPoolingGrad, {InferImplPoolingGrad, nullptr, true}}, - {prim::kPrimBatchNorm, {InferImplBatchNorm, nullptr, true}}, - {prim::kPrimConv2D, {InferImplConv2D, nullptr, true}}, - {prim::kPrimBpropCut, {InferImplBpropCut, nullptr, true}}, - {prim::kPrimDropout, {InferImplDropout, nullptr, true}}, - {prim::kPrimSparseApplyFtrl, {InferImplSparseApplyFtrl, nullptr, true}}, - {prim::kPrimSparseApplyProximalAdagrad, {InferImplSparseApplyProximalAdagrad, nullptr, true}}, - {prim::kPrimSGD, {InferImplSGD, nullptr, true}}, - {prim::kPrimCTCGreedyDecoder, {InferImplCTCGreedyDecoder, nullptr, true}}, - {prim::kPrimHSigmoid, {InferImplHSigmoid, nullptr, true}}, - {prim::kPrimHSigmoidGrad, {InferImplHSigmoidGrad, nullptr, true}}, + {prim::kPrimPooling, R{InferImplPooling, nullptr, true}}, + {prim::kPrimPoolingGrad, R{InferImplPoolingGrad, nullptr, true}}, + {prim::kPrimBatchNorm, R{InferImplBatchNorm, nullptr, true}}, + {prim::kPrimConv2D, R{InferImplConv2D, nullptr, true}}, + {prim::kPrimBpropCut, R{InferImplBpropCut, nullptr, true}}, + {prim::kPrimDropout, R{InferImplDropout, nullptr, true}}, + {prim::kPrimSparseApplyFtrl, R{InferImplSparseApplyFtrl, nullptr, true}}, + {prim::kPrimSparseApplyProximalAdagrad, R{InferImplSparseApplyProximalAdagrad, nullptr, true}}, + {prim::kPrimSGD, R{InferImplSGD, nullptr, true}}, + {prim::kPrimCTCGreedyDecoder, R{InferImplCTCGreedyDecoder, nullptr, true}}, + {prim::kPrimHSigmoid, R{InferImplHSigmoid, nullptr, true}}, + {prim::kPrimHSigmoidGrad, R{InferImplHSigmoidGrad, nullptr, true}}, // Others - {prim::kPrimIdentity, {InferImplIdentity, nullptr, true}}, - {prim::kPrimLoad, {InferImplLoad, nullptr, true}}, + {prim::kPrimIdentity, R{InferImplIdentity, nullptr, true}}, + {prim::kPrimLoad, R{InferImplLoad, nullptr, true}}, // Set impl to null as it will use PartialEvaluator; - {prim::kPrimPartial, {nullptr, nullptr, true}}, - {prim::kPrimEnvGetItem, {InferImplEnvGetItem, nullptr, true}}, - {prim::kPrimEnvSetItem, {InferImplEnvSetItem, nullptr, true}}, - {prim::kPrimEnvAdd, {InferImplEnvAdd, nullptr, true}}, - {prim::kPrimMakeRefKey, {InferImplMakeRefKey, nullptr, true}}, - {prim::kPrimMakeRef, {InferImplMakeRef, nullptr, true}}, - {prim::kPrimGetRefKey, {InferImplGetRefKey, nullptr, true}}, - {prim::kPrimGetRefValue, {InferImplGetRefValue, nullptr, true}}, - {prim::kPrimStateSetItem, {InferImplStateSetItem, nullptr, true}}, - {prim::kPrimDepend, {InferImplDepend, nullptr, true}}, - {prim::kPrimUpdateState, {InferImplUpdateState, nullptr, true}}, + {prim::kPrimPartial, R{nullptr, nullptr, true}}, + {prim::kPrimEnvGetItem, R{InferImplEnvGetItem, nullptr, true}}, + {prim::kPrimEnvSetItem, R{InferImplEnvSetItem, nullptr, true}}, + {prim::kPrimEnvAdd, R{InferImplEnvAdd, nullptr, true}}, + {prim::kPrimMakeRefKey, R{InferImplMakeRefKey, nullptr, true}}, + {prim::kPrimMakeRef, R{InferImplMakeRef, nullptr, true}}, + {prim::kPrimGetRefKey, R{InferImplGetRefKey, nullptr, true}}, + {prim::kPrimGetRefValue, R{InferImplGetRefValue, nullptr, true}}, + {prim::kPrimStateSetItem, R{InferImplStateSetItem, nullptr, true}}, + {prim::kPrimDepend, R{InferImplDepend, nullptr, true}}, + {prim::kPrimUpdateState, R{InferImplUpdateState, nullptr, true}}, // Debug - {prim::kPrimDebug, {InferImplDebug, nullptr, true}}, + {prim::kPrimDebug, R{InferImplDebug, nullptr, true}}, // Dynamic shape testing - {prim::kPrimGpuConvertToDynamicShape, {InferImplGpuConvertToDynamicShape, nullptr, true}}, + {prim::kPrimGpuConvertToDynamicShape, R{InferImplGpuConvertToDynamicShape, nullptr, true}}, // SparseTensor - {prim::kPrimMakeSparseTensor, {InferImplMakeSparseTensor, nullptr, true}}, - {prim::kPrimSparseTensorGetValues, {InferImplSparseTensorGetValues, nullptr, true}}, - {prim::kPrimSparseTensorGetIndices, {InferImplSparseTensorGetIndices, nullptr, true}}, - {prim::kPrimSparseTensorGetDenseShape, {InferImplSparseTensorGetDenseShape, nullptr, true}}, + {prim::kPrimMakeSparseTensor, R{InferImplMakeSparseTensor, nullptr, true}}, + {prim::kPrimSparseTensorGetValues, R{InferImplSparseTensorGetValues, nullptr, true}}, + {prim::kPrimSparseTensorGetIndices, R{InferImplSparseTensorGetIndices, nullptr, true}}, + {prim::kPrimSparseTensorGetDenseShape, R{InferImplSparseTensorGetDenseShape, nullptr, true}}, // RowTensor - {prim::kPrimMakeRowTensor, {InferImplMakeRowTensor, nullptr, true}}, - {prim::kPrimRowTensorGetValues, {InferImplRowTensorGetValues, nullptr, true}}, - {prim::kPrimRowTensorGetIndices, {InferImplRowTensorGetIndices, nullptr, true}}, - {prim::kPrimRowTensorGetDenseShape, {InferImplRowTensorGetDenseShape, nullptr, true}}, - {prim::kPrimRowTensorAdd, {InferImplRowTensorAdd, nullptr, false}}, + {prim::kPrimMakeRowTensor, R{InferImplMakeRowTensor, nullptr, true}}, + {prim::kPrimRowTensorGetValues, R{InferImplRowTensorGetValues, nullptr, true}}, + {prim::kPrimRowTensorGetIndices, R{InferImplRowTensorGetIndices, nullptr, true}}, + {prim::kPrimRowTensorGetDenseShape, R{InferImplRowTensorGetDenseShape, nullptr, true}}, + {prim::kPrimRowTensorAdd, R{InferImplRowTensorAdd, nullptr, false}}, // CSRTensor - {prim::kPrimMakeCSRTensor, {InferImplMakeCSRTensor, nullptr, true}}, - {prim::kPrimCSRTensorGetValues, {InferImplCSRTensorGetValues, nullptr, true}}, - {prim::kPrimCSRTensorGetIndptr, {InferImplCSRTensorGetIndptr, nullptr, true}}, - {prim::kPrimCSRTensorGetIndices, {InferImplCSRTensorGetIndices, nullptr, true}}, - {prim::kPrimCSRTensorGetDenseShape, {InferImplCSRTensorGetDenseShape, nullptr, true}}, + {prim::kPrimMakeCSRTensor, R{InferImplMakeCSRTensor, nullptr, true}}, + {prim::kPrimCSRTensorGetValues, R{InferImplCSRTensorGetValues, nullptr, true}}, + {prim::kPrimCSRTensorGetIndptr, R{InferImplCSRTensorGetIndptr, nullptr, true}}, + {prim::kPrimCSRTensorGetIndices, R{InferImplCSRTensorGetIndices, nullptr, true}}, + {prim::kPrimCSRTensorGetDenseShape, R{InferImplCSRTensorGetDenseShape, nullptr, true}}, // Comm Ops - {prim::kPrimAllSwap, {InferImplAllSwap, nullptr, true}}, - {prim::kPrimMemCpyAsync, {InferImplMemCpyAsync, nullptr, true}}, - {prim::kPrimFusedPushWeight, {nullptr, nullptr, true}}, - {prim::kPrimFusedPullWeight, {nullptr, nullptr, true}}, + {prim::kPrimAllSwap, R{InferImplAllSwap, nullptr, true}}, + {prim::kPrimMemCpyAsync, R{InferImplMemCpyAsync, nullptr, true}}, + {prim::kPrimFusedPushWeight, R{nullptr, nullptr, true}}, + {prim::kPrimFusedPullWeight, R{nullptr, nullptr, true}}, }; return prim_eval_implement_map; } PrimitiveEvalImplMap &GetPrimitiveToBackendEvalImplMap() { + using R = PrimitiveEvalImplMap::mapped_type; static PrimitiveEvalImplMap prim_backend_eval_implement_map = { - {prim::kPrimMul, {ops::MulInfer, nullptr, true}}, - {prim::kPrimAdd, {ops::AddInfer, nullptr, false}}, - {prim::kPrimSqrtGrad, {InferImplSqrtGrad, nullptr, true}}, - {prim::kPrimSub, {ops::SubInfer, nullptr, false}}, - {prim::kPrimNeg, {ops::NegInfer, nullptr, false}}, - {prim::kPrimTile, {ops::TileInfer, nullptr, true}}, - {prim::kPrimEqual, {ops::EqualInfer, nullptr, true}}, - {prim::kPrimNotEqual, {ops::NotEqualInfer, nullptr, true}}, - {prim::kPrimLog, {ops::LogInfer, nullptr, true}}, - {prim::kPrimReciprocal, {ops::ReciprocalInfer, nullptr, true}}, - {prim::kPrimReduceSum, {ops::ReduceSumInfer, nullptr, true}}, - {prim::kPrimReduceMean, {InferImplReduceFunc, nullptr, true}}, - {prim::kPrimReduceAll, {InferImplReduceFunc, nullptr, true}}, - {prim::kPrimReduceAny, {InferImplReduceFunc, nullptr, true}}, - {prim::kPrimReduceMax, {InferImplReduceFunc, nullptr, true}}, - {prim::kPrimReduceMin, {InferImplReduceFunc, nullptr, true}}, - {prim::kPrimBiasAddGrad, {InferImplBiasAddGrad, nullptr, true}}, - {prim::kPrimReduceScatter, {InferImplReduceScatter, nullptr, true}}, - {prim::kPrimCast, {InferImplCast, nullptr, true}}, - {prim::kPrimExp, {ops::ExpInfer, nullptr, true}}, - {prim::kPrimExpandDims, {InferImplExpandDims, nullptr, true}}, - {prim::kPrimAllReduce, {InferImplAllReduce, nullptr, true}}, - {prim::kPrimBroadcast, {InferImplBroadcast, nullptr, true}}, - {prim::kPrimAllGather, {InferImplAllGather, nullptr, true}}, - {prim::kPrimMinimum, {InferImplMinimum, nullptr, true}}, - {prim::kPrimDivNoNan, {InferImplDivNoNan, nullptr, true}}, - {prim::kPrimLinSpace, {InferImplLinSpace, nullptr, true}}, + {prim::kPrimMul, R{ops::MulInfer, nullptr, true}}, + {prim::kPrimAdd, R{ops::AddInfer, nullptr, false}}, + {prim::kPrimSqrtGrad, R{InferImplSqrtGrad, nullptr, true}}, + {prim::kPrimSub, R{ops::SubInfer, nullptr, false}}, + {prim::kPrimNeg, R{ops::NegInfer, nullptr, false}}, + {prim::kPrimTile, R{ops::TileInfer, nullptr, true}}, + {prim::kPrimEqual, R{ops::EqualInfer, nullptr, true}}, + {prim::kPrimNotEqual, R{ops::NotEqualInfer, nullptr, true}}, + {prim::kPrimLog, R{ops::LogInfer, nullptr, true}}, + {prim::kPrimReciprocal, R{ops::ReciprocalInfer, nullptr, true}}, + {prim::kPrimReduceSum, R{ops::ReduceSumInfer, nullptr, true}}, + {prim::kPrimReduceMean, R{InferImplReduceFunc, nullptr, true}}, + {prim::kPrimReduceAll, R{InferImplReduceFunc, nullptr, true}}, + {prim::kPrimReduceAny, R{InferImplReduceFunc, nullptr, true}}, + {prim::kPrimReduceMax, R{InferImplReduceFunc, nullptr, true}}, + {prim::kPrimReduceMin, R{InferImplReduceFunc, nullptr, true}}, + {prim::kPrimBiasAddGrad, R{InferImplBiasAddGrad, nullptr, true}}, + {prim::kPrimReduceScatter, R{InferImplReduceScatter, nullptr, true}}, + {prim::kPrimCast, R{InferImplCast, nullptr, true}}, + {prim::kPrimExp, R{ops::ExpInfer, nullptr, true}}, + {prim::kPrimExpandDims, R{InferImplExpandDims, nullptr, true}}, + {prim::kPrimAllReduce, R{InferImplAllReduce, nullptr, true}}, + {prim::kPrimBroadcast, R{InferImplBroadcast, nullptr, true}}, + {prim::kPrimAllGather, R{InferImplAllGather, nullptr, true}}, + {prim::kPrimMinimum, R{InferImplMinimum, nullptr, true}}, + {prim::kPrimDivNoNan, R{InferImplDivNoNan, nullptr, true}}, + {prim::kPrimLinSpace, R{InferImplLinSpace, nullptr, true}}, - {prim::kPrimLess, {InferImplLess, nullptr, true}}, - {prim::kPrimStack, {InferImplStack, nullptr, true}}, - {prim::kPrimPad, {InferImplPad, nullptr, true}}, - {prim::kPrimUnsortedSegmentSum, {InferImplUnsortedSegmentSum, nullptr, true}}, - {prim::kPrimDiv, {InferImplDiv, nullptr, true}}, - {prim::kPrimRealDiv, {ops::RealDivInfer, nullptr, false}}, - {prim::kPrimTranspose, {InferImplTranspose, nullptr, true}}, - {prim::kPrimStridedSlice, {ops::StridedSliceInfer, nullptr, true}}, - {prim::kPrimSlice, {ops::SliceInfer, nullptr, true}}, - {prim::kPrimSliceGrad, {ops::SliceGradInfer, nullptr, true}}, - {prim::kPrimReshape, {InferImplReshape, nullptr, true}}, - {prim::kPrimConcat, {InferImplConcat, nullptr, true}}, - {prim::kPrimConcatOffset, {InferImplConcatOffset, nullptr, true}}, - {prim::kPrimArgMaxWithValue, {InferImplArgMaxWithValue, nullptr, true}}, - {prim::kPrimFusedSparseAdam, {InferImplFusedSparseAdam, nullptr, true}}, - {prim::kPrimTransData, {InferImplTransData, nullptr, true}}, + {prim::kPrimLess, R{InferImplLess, nullptr, true}}, + {prim::kPrimStack, R{InferImplStack, nullptr, true}}, + {prim::kPrimPad, R{InferImplPad, nullptr, true}}, + {prim::kPrimUnsortedSegmentSum, R{InferImplUnsortedSegmentSum, nullptr, true}}, + {prim::kPrimDiv, R{InferImplDiv, nullptr, true}}, + {prim::kPrimRealDiv, R{ops::RealDivInfer, nullptr, false}}, + {prim::kPrimTranspose, R{InferImplTranspose, nullptr, true}}, + {prim::kPrimStridedSlice, R{ops::StridedSliceInfer, nullptr, true}}, + {prim::kPrimSlice, R{ops::SliceInfer, nullptr, true}}, + {prim::kPrimSliceGrad, R{ops::SliceGradInfer, nullptr, true}}, + {prim::kPrimReshape, R{InferImplReshape, nullptr, true}}, + {prim::kPrimConcat, R{InferImplConcat, nullptr, true}}, + {prim::kPrimConcatOffset, R{InferImplConcatOffset, nullptr, true}}, + {prim::kPrimArgMaxWithValue, R{InferImplArgMaxWithValue, nullptr, true}}, + {prim::kPrimFusedSparseAdam, R{InferImplFusedSparseAdam, nullptr, true}}, + {prim::kPrimTransData, R{InferImplTransData, nullptr, true}}, }; return prim_backend_eval_implement_map; } diff --git a/mindspore/core/abstract/primitive_infer_map.h b/mindspore/core/abstract/primitive_infer_map.h index 15a34c32b0c..bd92bde1996 100644 --- a/mindspore/core/abstract/primitive_infer_map.h +++ b/mindspore/core/abstract/primitive_infer_map.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,9 +17,10 @@ */ #ifndef MINDSPORE_CORE_ABSTRACT_PRIMITIVE_INFER_MAP_H_ #define MINDSPORE_CORE_ABSTRACT_PRIMITIVE_INFER_MAP_H_ -#include + #include #include +#include "utils/hash_map.h" #include "ir/primitive.h" #include "ops/primitive_c.h" #include "base/core_ops.h" @@ -41,7 +42,7 @@ struct StandardPrimitiveImplReg { }; using PrimitiveEvalImplMap = - std::unordered_map; + mindspore::HashMap; PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap(); diff --git a/mindspore/core/api/ir/func_graph_manager.h b/mindspore/core/api/ir/func_graph_manager.h index de3b7c75ba1..6d64112ffa6 100644 --- a/mindspore/core/api/ir/func_graph_manager.h +++ b/mindspore/core/api/ir/func_graph_manager.h @@ -19,10 +19,10 @@ #include #include -#include #include "utils/visible.h" #include "utils/compact_set.h" +#include "utils/hash_map.h" #include "utils/hashing.h" #include "ir/anf.h" @@ -35,7 +35,7 @@ class FuncGraphManager; using FuncGraphManagerPtr = std::shared_ptr; using AnfNodeIndexSet = CompactSet>; -using NodeUsersMap = std::unordered_map>; +using NodeUsersMap = mindspore::HashMap>; /// \brief FuncGraphManager defines interface for function graph management. class MS_CORE_API FuncGraphManager { diff --git a/mindspore/core/base/base.h b/mindspore/core/base/base.h index 1bfddec763a..486541c3d37 100644 --- a/mindspore/core/base/base.h +++ b/mindspore/core/base/base.h @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/mindspore/core/base/core_ops.h b/mindspore/core/base/core_ops.h index 1ecba43723c..ea2ebdf1e84 100644 --- a/mindspore/core/base/core_ops.h +++ b/mindspore/core/base/core_ops.h @@ -20,15 +20,15 @@ #include #include #include -#include #include "ir/anf.h" #include "ir/primitive.h" #include "utils/flags.h" +#include "utils/hash_map.h" namespace mindspore { namespace prim { inline const ValuePtr kValueOne = std::make_shared(1); -inline const std::unordered_map kSideEffectPropagate = { +inline const mindspore::HashMap kSideEffectPropagate = { {mindspore::GRAPH_FLAG_SIDE_EFFECT_PROPAGATE, kValueOne}, }; diff --git a/mindspore/core/base/user_data.h b/mindspore/core/base/user_data.h index 6912d0767da..45835dc4d80 100644 --- a/mindspore/core/base/user_data.h +++ b/mindspore/core/base/user_data.h @@ -19,7 +19,7 @@ #include #include -#include +#include "utils/hash_map.h" namespace mindspore { class UserData { @@ -45,7 +45,7 @@ class UserData { bool has(const std::string &key) const { return data_.find(key) != data_.end(); } private: - std::map> data_; + mindspore::HashMap> data_; }; } // namespace mindspore diff --git a/mindspore/core/ir/anf.cc b/mindspore/core/ir/anf.cc index b2d659d456f..eda7866f10e 100644 --- a/mindspore/core/ir/anf.cc +++ b/mindspore/core/ir/anf.cc @@ -22,8 +22,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "base/core_ops.h" #include "ir/func_graph.h" #include "ir/primitive.h" @@ -373,7 +373,7 @@ size_t NewSeenGeneration() { } namespace id_generator { -static std::unordered_map node_ids; +static mindspore::HashMap node_ids; std::string get_id(const AnfNodePtr &node) { auto type_name = node->type_name(); if (node_ids.find(type_name) == node_ids.end()) { diff --git a/mindspore/core/ir/anf.h b/mindspore/core/ir/anf.h index e6ddef161d3..f1489db7ce9 100644 --- a/mindspore/core/ir/anf.h +++ b/mindspore/core/ir/anf.h @@ -23,11 +23,11 @@ #include #include #include -#include -#include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "base/base.h" #include "base/user_data.h" #include "base/effect_info.h" @@ -36,6 +36,7 @@ #include "ir/primal_attr.h" #include "ir/primal_debug_info.h" #include "utils/info.h" +#include "utils/hashing.h" #include "utils/ms_utils.h" // A MindSpore ANF IR defined here. @@ -76,7 +77,7 @@ class Primitive; using PrimitivePtr = std::shared_ptr; struct PrimitiveHasher; struct PrimitiveEqual; -using PrimitiveSet = std::unordered_set; +using PrimitiveSet = mindspore::HashSet; class BaseRef; @@ -114,7 +115,6 @@ class MS_CORE_API AnfNode : public Base { intermediate_abstract_(nullptr), debug_info_(std::move(debug_info)), fullname_with_scope_(""), - hash_(std::hash()), scope_(ScopeManager::GetInstance().GetCurrentScope()), kernel_info_(nullptr), interpret_(false), @@ -224,7 +224,7 @@ class MS_CORE_API AnfNode : public Base { /// \return The shape of the element. BaseShapePtr Shape() const; - std::size_t hash() const override { return this->hash_(this); } + std::size_t hash() const final { return PointerHash{}(this); } /// \brief Obtain detailed information about scope namespace. /// @@ -364,7 +364,6 @@ class MS_CORE_API AnfNode : public Base { std::string fullname_with_scope_; private: - std::hash hash_; ScopePtr scope_; KernelInfoDevicePtr kernel_info_; UserData user_data_; @@ -540,8 +539,8 @@ class MS_CORE_API CNode final : public AnfNode, public EffectInfoHolder { /// \brief Get all attributes of this CNode. /// /// \return Attributes of this CNode. - const std::unordered_map &attrs() const { return attrs_; } - void set_attrs(const std::unordered_map &attrs) { + const mindspore::HashMap &attrs() const { return attrs_; } + void set_attrs(const mindspore::HashMap &attrs) { attrs_.insert(attrs.cbegin(), attrs.cend()); } @@ -579,12 +578,12 @@ class MS_CORE_API CNode final : public AnfNode, public EffectInfoHolder { /// \brief Get the primal attributes of this CNode. /// /// \return The primal attributes. - const std::unordered_map &primal_attrs() const { return primal_attrs_; } + const mindspore::HashMap &primal_attrs() const { return primal_attrs_; } /// \brief Set the primal attributes of this CNode. /// /// \param[in] attrs The primal attributes. - void set_primal_attrs(const std::unordered_map &attrs) { + void set_primal_attrs(const mindspore::HashMap &attrs) { primal_attrs_.insert(attrs.cbegin(), attrs.cend()); } @@ -612,7 +611,7 @@ class MS_CORE_API CNode final : public AnfNode, public EffectInfoHolder { /// /// \param[in] name The name of the attribute. /// \return True if it exists, otherwise false. - bool HasPrimalAttr(const std::string &name) const { return primal_attrs_.find(name) != attrs_.cend(); } + bool HasPrimalAttr(const std::string &name) const { return primal_attrs_.find(name) != primal_attrs_.end(); } /// \brief Get primal debug information. /// @@ -688,8 +687,8 @@ class MS_CORE_API CNode final : public AnfNode, public EffectInfoHolder { // output_value_ store cnode value and id in pynative mode std::vector> inputs_value_; std::pair output_value_; - std::unordered_map attrs_; - std::unordered_map primal_attrs_; + mindspore::HashMap attrs_; + mindspore::HashMap primal_attrs_; std::vector primal_debug_infos_; std::vector fused_debug_infos_; ssize_t input_tensor_num_ = -1; @@ -1136,7 +1135,7 @@ namespace id_generator { std::string get_id(const AnfNodePtr &node); void reset_id(); } // namespace id_generator -using TaggedNodeMap = std::unordered_map; +using TaggedNodeMap = mindspore::HashMap; using TaggedGraph = std::pair; std::string GetCNodeTarget(const AnfNodePtr &node); std::string GetOriginNodeTarget(const AnfNodePtr &node); diff --git a/mindspore/core/ir/anf_extends.cc b/mindspore/core/ir/anf_extends.cc index fe225325815..aef7cb05f12 100644 --- a/mindspore/core/ir/anf_extends.cc +++ b/mindspore/core/ir/anf_extends.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ir/visitor.h" #include "ir/func_graph.h" #include "base/core_ops.h" diff --git a/mindspore/core/ir/cell.cc b/mindspore/core/ir/cell.cc index 8cb339f0d9c..d27f10c21bb 100644 --- a/mindspore/core/ir/cell.cc +++ b/mindspore/core/ir/cell.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -43,7 +43,7 @@ bool Cell::operator==(const Cell &other) const { if (attrs_.size() != other.attrs_.size()) { return false; } - auto all = std::all_of(attrs_.begin(), attrs_.end(), [&other](const std::pair &item) -> bool { + auto all = std::all_of(attrs_.begin(), attrs_.end(), [&other](const auto &item) { if (item.second == nullptr) { return false; } diff --git a/mindspore/core/ir/cell.h b/mindspore/core/ir/cell.h index 535c4b1b80b..21e8f985d41 100644 --- a/mindspore/core/ir/cell.h +++ b/mindspore/core/ir/cell.h @@ -17,12 +17,12 @@ #ifndef MINDSPORE_CCSRC_IR_CELL_H_ #define MINDSPORE_CCSRC_IR_CELL_H_ -#include #include #include #include #include +#include "utils/hash_map.h" #include "abstract/abstract_value.h" #include "utils/misc.h" @@ -52,12 +52,12 @@ class MS_CORE_API Cell final : public Named { /// \brief Obtain all attributes of Cell. /// /// \return All attributes of Cell. - const std::unordered_map &attrs() const { return attrs_; } + const mindspore::HashMap &attrs() const { return attrs_; } /// \brief Set the attributes of Cell. /// /// \param[in] attributes Attributes. - void set_attrs(const std::unordered_map &attrs_input) { attrs_ = attrs_input; } + void set_attrs(const mindspore::HashMap &attrs_input) { attrs_ = attrs_input; } /// \brief Add a new attribute. /// @@ -110,7 +110,7 @@ class MS_CORE_API Cell final : public Named { ~Cell() override = default; private: - std::unordered_map attrs_; + mindspore::HashMap attrs_; enum MixedPrecisionType mixed_type_ { kNotSet }; }; diff --git a/mindspore/core/ir/dtype.cc b/mindspore/core/ir/dtype.cc index 594cd5c9734..f1993e88e2f 100644 --- a/mindspore/core/ir/dtype.cc +++ b/mindspore/core/ir/dtype.cc @@ -19,6 +19,7 @@ #include #include #include "utils/log_adapter.h" +#include "utils/ms_utils.h" namespace mindspore { TypePtr Keyword::DeepCopy() const { @@ -89,7 +90,8 @@ bool Slice::operator==(const Type &other) const { return false; } auto other_slice = static_cast(other); - return (*start_ == *other_slice.start_ && *stop_ == *other_slice.stop_ && *step_ == *other_slice.step_); + return common::IsEqual(start_, other_slice.start_) && common::IsEqual(stop_, other_slice.stop_) && + common::IsEqual(step_, other_slice.step_); } std::string Slice::DumpText() const { return ToString(); } diff --git a/mindspore/core/ir/dtype.h b/mindspore/core/ir/dtype.h index e9280c07725..1f1f535e9a6 100644 --- a/mindspore/core/ir/dtype.h +++ b/mindspore/core/ir/dtype.h @@ -26,7 +26,6 @@ #include #include #include -#include #include #include "base/base.h" #include "ir/named.h" diff --git a/mindspore/core/ir/dtype/container.cc b/mindspore/core/ir/dtype/container.cc index 6a3147bd007..01f1a245c6b 100644 --- a/mindspore/core/ir/dtype/container.cc +++ b/mindspore/core/ir/dtype/container.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -92,7 +92,7 @@ bool List::operator==(const Type &other) const { } Class::Class(const Named &tag, const ClassAttrVector &attributes, - const std::unordered_map &methods) + const mindspore::HashMap &methods) : Object(kObjectTypeClass, false), attributes_(attributes), tag_(tag), methods_(methods) {} std::string List::DumpContent(bool is_dumptext) const { diff --git a/mindspore/core/ir/dtype/container.h b/mindspore/core/ir/dtype/container.h index 5f7e90e4ff6..2b4818ffdf0 100644 --- a/mindspore/core/ir/dtype/container.h +++ b/mindspore/core/ir/dtype/container.h @@ -27,8 +27,9 @@ #include #include #include -#include #include + +#include "utils/hash_map.h" #include "base/base.h" #include "ir/named.h" #include "ir/dtype/type.h" @@ -101,7 +102,7 @@ class MS_CORE_API Class final : public Object { /// \param[in] tag Define the tag of Class object. /// \param[in] attributes Define the attributes of Class object. /// \param[in] methods Define the methods of Class object. - Class(const Named &tag, const ClassAttrVector &attributes, const std::unordered_map &methods); + Class(const Named &tag, const ClassAttrVector &attributes, const mindspore::HashMap &methods); /// \brief Destructor of Class. ~Class() override {} @@ -116,7 +117,7 @@ class MS_CORE_API Class final : public Object { /// \brief Set attributes value of Class object. /// /// \param[in] v Define the attributes value to be set. - void set_value(const std::unordered_map &v) { attributes_value_ = v; } + void set_value(const mindspore::HashMap &v) { attributes_value_ = v; } /// \brief Get the tag of Class object. /// @@ -126,12 +127,12 @@ class MS_CORE_API Class final : public Object { /// \brief Get the value of Class object. /// /// \return The attributes value of Class object. - std::unordered_map GetValue() { return attributes_value_; } + mindspore::HashMap GetValue() { return attributes_value_; } /// \brief Get the methods of Class object. /// /// \return The methods of Class object. - std::unordered_map methods() { return methods_; } + mindspore::HashMap methods() { return methods_; } /// \brief Get the attributes of Class object. /// @@ -147,9 +148,9 @@ class MS_CORE_API Class final : public Object { /// \return The description of the Class object. std::string DumpContent(bool is_dumptext) const; Named tag_; - std::unordered_map methods_; + mindspore::HashMap methods_; // For AbstractClass build value - std::unordered_map attributes_value_; + mindspore::HashMap attributes_value_; }; using ClassPtr = std::shared_ptr; diff --git a/mindspore/core/ir/dtype/empty.h b/mindspore/core/ir/dtype/empty.h index a2b9689c427..327f4f37c4a 100644 --- a/mindspore/core/ir/dtype/empty.h +++ b/mindspore/core/ir/dtype/empty.h @@ -27,8 +27,9 @@ #include #include #include -#include #include + +#include "utils/hash_map.h" #include "base/base.h" #include "ir/named.h" #include "ir/dtype/type.h" diff --git a/mindspore/core/ir/dtype/number.h b/mindspore/core/ir/dtype/number.h index 07ed3fe31b1..33930ae6b2f 100644 --- a/mindspore/core/ir/dtype/number.h +++ b/mindspore/core/ir/dtype/number.h @@ -27,8 +27,9 @@ #include #include #include -#include #include + +#include "utils/hash_map.h" #include "base/base.h" #include "ir/named.h" #include "ir/dtype/type.h" diff --git a/mindspore/core/ir/dtype/tensor_type.h b/mindspore/core/ir/dtype/tensor_type.h index fe0ead2c043..50be84f7d23 100644 --- a/mindspore/core/ir/dtype/tensor_type.h +++ b/mindspore/core/ir/dtype/tensor_type.h @@ -27,8 +27,9 @@ #include #include #include -#include #include + +#include "utils/hash_map.h" #include "base/base.h" #include "ir/named.h" #include "ir/dtype/type.h" diff --git a/mindspore/core/ir/dtype/type.cc b/mindspore/core/ir/dtype/type.cc index 0e1c0f79ba6..c73e2a97199 100644 --- a/mindspore/core/ir/dtype/type.cc +++ b/mindspore/core/ir/dtype/type.cc @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ namespace mindspore { #define MS_TYPE2LABLE(type_id) #type_id -static std::unordered_map g_type_2_lable{ +static mindspore::HashMap g_type_2_lable{ {kTypeUnknown, MS_TYPE2LABLE(kTypeUnknown)}, {kMetaTypeType, MS_TYPE2LABLE(kMetaTypeType)}, {kMetaTypeAnything, MS_TYPE2LABLE(kMetaTypeAnything)}, diff --git a/mindspore/core/ir/dtype/type.h b/mindspore/core/ir/dtype/type.h index cd9eed97588..44e1a281950 100644 --- a/mindspore/core/ir/dtype/type.h +++ b/mindspore/core/ir/dtype/type.h @@ -29,9 +29,9 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "base/base.h" #include "ir/named.h" #include "ir/dtype/type_id.h" @@ -236,12 +236,12 @@ class MS_CORE_API Object : public Type { // // TypeId name map // -const std::unordered_map type_name_map = { +const mindspore::HashMap type_name_map = { {kNumberTypeBool, "bool_"}, {kNumberTypeInt8, "int8"}, {kNumberTypeUInt8, "uint8"}, {kNumberTypeInt16, "int16"}, {kNumberTypeInt32, "int32"}, {kNumberTypeInt64, "int64"}, {kNumberTypeFloat16, "float16"}, {kNumberTypeFloat32, "float32"}, {kNumberTypeFloat64, "float64"}}; -const std::unordered_map type_priority_map = { +const mindspore::HashMap type_priority_map = { {kNumberTypeBool, 0}, {kNumberTypeUInt8, 1}, {kNumberTypeInt8, 2}, {kNumberTypeInt16, 3}, {kNumberTypeInt32, 4}, {kNumberTypeInt64, 5}, {kNumberTypeFloat16, 6}, {kNumberTypeFloat32, 7}, {kNumberTypeFloat64, 8}}; diff --git a/mindspore/core/ir/dtype_extends.cc b/mindspore/core/ir/dtype_extends.cc index d75e68bf6be..21a083b7444 100644 --- a/mindspore/core/ir/dtype_extends.cc +++ b/mindspore/core/ir/dtype_extends.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -61,7 +61,7 @@ bool TypeListEqual::operator()(TypePtrList const &lhs, TypePtrList const &rhs) c } TypePtr TypeIdToType(TypeId id) { - static std::unordered_map type_id_to_type = { + static mindspore::HashMap type_id_to_type = { {kNumberTypeFloat16, kFloat16}, {kNumberTypeFloat, kFloat32}, {kNumberTypeFloat32, kFloat32}, {kNumberTypeFloat64, kFloat64}, {kNumberTypeComplex64, kComplex64}, {kNumberTypeInt8, kInt8}, {kNumberTypeInt16, kInt16}, {kNumberTypeInt32, kInt32}, {kNumberTypeInt, kInt32}, diff --git a/mindspore/core/ir/func_graph.cc b/mindspore/core/ir/func_graph.cc index 52deb6752ac..9bf15c57962 100644 --- a/mindspore/core/ir/func_graph.cc +++ b/mindspore/core/ir/func_graph.cc @@ -440,7 +440,7 @@ void FuncGraph::DropFuncGraphCNodeIndex(const CNodeIndexPairPtr &pair) { } } -const std::unordered_map &FuncGraph::j_value_nodes() const { return j_value_nodes_; } +const mindspore::HashMap &FuncGraph::j_value_nodes() const { return j_value_nodes_; } void FuncGraph::CopyJValueNodes(const FuncGraphPtr &source) { MS_EXCEPTION_IF_NULL(source); diff --git a/mindspore/core/ir/func_graph.h b/mindspore/core/ir/func_graph.h index 0f1b2b15655..d318d335dd5 100644 --- a/mindspore/core/ir/func_graph.h +++ b/mindspore/core/ir/func_graph.h @@ -25,11 +25,12 @@ #include #include #include -#include -#include #include #include +#include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/anf.h" #include "ir/manager.h" #include "utils/ordered_set.h" @@ -238,8 +239,8 @@ class FuncGraph : public api::FuncGraph, public FuncGraphBase, public EffectInfo void set_is_bprop(bool is_brop) { is_bprop_ = is_brop; } bool is_bprop() const { return is_bprop_; } - std::unordered_map &attrs() { return attrs_; } - void set_attrs(const std::unordered_map &attrs) { + mindspore::HashMap &attrs() { return attrs_; } + void set_attrs(const mindspore::HashMap &attrs) { for (auto &attr : attrs) { attrs_[attr.first] = attr.second; } @@ -252,8 +253,8 @@ class FuncGraph : public api::FuncGraph, public FuncGraphBase, public EffectInfo ValuePtr get_attr(const std::string &key) const final; void set_attr(const std::string &key, const ValuePtr &value) final { attrs_[key] = value; } - std::unordered_map &transforms() { return transforms_; } - void set_transforms(const std::unordered_map &transforms) { + mindspore::HashMap &transforms() { return transforms_; } + void set_transforms(const mindspore::HashMap &transforms) { transforms_ = transforms; } @@ -312,7 +313,7 @@ class FuncGraph : public api::FuncGraph, public FuncGraphBase, public EffectInfo bool DropFuncGraphUsed(const FuncGraphPtr &fg); // Get all value nodes in the inputs of J directly used by this func graph. - const std::unordered_map &j_value_nodes() const; + const mindspore::HashMap &j_value_nodes() const; void CopyJValueNodes(const FuncGraphPtr &source); void ClearJValueNodes(); void AddJValueNode(const AnfNodePtr &value_node, int count = 1); @@ -356,22 +357,22 @@ class FuncGraph : public api::FuncGraph, public FuncGraphBase, public EffectInfo } void GenerateVarParams(const FuncGraphPtr &specialized_graph, int variable_args_count, int pos_args_input_count, std::vector *specialized_parameter_list, - std::unordered_map *repl_nodes) const; + mindspore::HashMap *repl_nodes) const; void GenerateKwParams(const FuncGraphPtr &specialized_graph, const std::vector &kwarg_list, std::vector *specialized_parameter_list, - std::unordered_map *repl_nodes) const; + mindspore::HashMap *repl_nodes) const; void GenerateDefaultValue(const FuncGraphPtr &specialized_graph, const std::vector &specialized_parameter_list, - std::unordered_map *repl_nodes) const; + mindspore::HashMap *repl_nodes) const; const std::vector ¶mter_obj_nodes() const { return paramter_obj_nodes_; } void add_parameter_obj_node(const AnfNodePtr &p) { paramter_obj_nodes_.push_back(p); } - std::unordered_map attrs_; - std::unordered_map transforms_; + mindspore::HashMap attrs_; + mindspore::HashMap transforms_; // Parameter default value. std::map parameter_default_value_; size_t seen_; @@ -415,7 +416,7 @@ class FuncGraph : public api::FuncGraph, public FuncGraphBase, public EffectInfo bool modify_output() const { return modify_output_; } void set_modify_output(bool modify_output) { modify_output_ = modify_output; } - const std::unordered_set &used_forward_nodes() const { return used_forward_nodes_; } + const mindspore::HashSet &used_forward_nodes() const { return used_forward_nodes_; } void set_used_forward_nodes(const std::vector &used_forward_nodes); void ClearUsedForwardNodes() { used_forward_nodes_.clear(); } @@ -443,7 +444,7 @@ class FuncGraph : public api::FuncGraph, public FuncGraphBase, public EffectInfo AnfNodeCounterMap free_variables_; // All value nodes calling J in the function. - std::unordered_map j_value_nodes_; + mindspore::HashMap j_value_nodes_; // All user value nodes of this func graph, recording by CNode and its input's index. CNodeIndexCounterMap func_graph_cnodes_index_; @@ -483,7 +484,7 @@ class FuncGraph : public api::FuncGraph, public FuncGraphBase, public EffectInfo void GenerateKwargReplNode(const FuncGraphPtr &specialized_graph, const std::vector &kwarg_keys_tuple_nodes, const std::vector &kwarg_values_tuple_nodes, - std::unordered_map *repl_nodes) const; + mindspore::HashMap *repl_nodes) const; // CNode order which relates to origin code order. OrderedSet order_; @@ -507,7 +508,7 @@ class FuncGraph : public api::FuncGraph, public FuncGraphBase, public EffectInfo // If the graph is decorated by @ms_function and runs grad process in pynative mode, // forward nodes used in grad graph will be added to output for holding output values. bool modify_output_ = false; - std::unordered_set used_forward_nodes_; + mindspore::HashSet used_forward_nodes_; }; inline CNodePtr NewCNode(const std::vector &inputs, const FuncGraphPtr &fg) { diff --git a/mindspore/core/ir/func_graph_cloner.cc b/mindspore/core/ir/func_graph_cloner.cc index 0bc03063f8a..2d2bfb27d6a 100644 --- a/mindspore/core/ir/func_graph_cloner.cc +++ b/mindspore/core/ir/func_graph_cloner.cc @@ -319,7 +319,7 @@ ParameterPtr Cloner::AddParameter(const FuncGraphPtr &func_graph, const AnfNodeP void Cloner::AddParameters(const FuncGraphPtr &func_graph, const AnfNodePtrList ¶ms, AnfNodePtrList *const lift_params, AnfNodePtrList *const input_params) { AnfNodePtrList parameters; - std::unordered_set old_params; + mindspore::HashSet old_params; for (auto ¶m : func_graph->parameters()) { auto iter = repl_node_.find(param); if (iter != repl_node_.end()) { @@ -457,11 +457,11 @@ void Cloner::AddInputs(const FuncGraphPtr &func_graph_user, const FuncGraphPtr & } void Cloner::OrderParameters(const FuncGraphPtr &func_graph, const AnfNodePtrList &inputs, size_t arg_start_index) { - std::unordered_set old_params; + mindspore::HashSet old_params; for (auto ¶m : func_graph->parameters()) { (void)old_params.insert(repl_node_[param]); } - std::unordered_set new_params; + mindspore::HashSet new_params; AnfNodePtrList parameters; // Ignore the 1st and 2nd param of inputs(such as. partial graph) for (size_t i = arg_start_index; i < inputs.size(); ++i) { diff --git a/mindspore/core/ir/func_graph_cloner.h b/mindspore/core/ir/func_graph_cloner.h index 9ac6db1e614..a16cf2c7261 100644 --- a/mindspore/core/ir/func_graph_cloner.h +++ b/mindspore/core/ir/func_graph_cloner.h @@ -21,11 +21,11 @@ #include #include #include -#include -#include #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/anf.h" #include "ir/func_graph.h" #include "ir/manager.h" @@ -34,7 +34,7 @@ namespace mindspore { class Cloner; using ClonerPtr = std::shared_ptr; -using NodeToNodeMap = std::unordered_map>; +using NodeToNodeMap = mindspore::HashMap>; enum CloneType { kBasic = 0, kInline = 1, kLifting = 2, kDropping = 3 }; @@ -62,7 +62,7 @@ class Cloner { // Map of replicate nodes and graphs const NodeToNodeMap &cloned_nodes() const { return repl_node_; } - const std::unordered_map &cloned_func_graphs() const { return repl_func_graph_; } + const mindspore::HashMap &cloned_func_graphs() const { return repl_func_graph_; } // Scope of cloned graphs void set_scope(const ScopePtr &scope) { scope_ = scope; } @@ -107,16 +107,16 @@ class Cloner { TraceInfoPtr relation_; TraceInfoPtr target_relation_; NodeToNodeMap repl_node_; - std::unordered_map repl_func_graph_; + mindspore::HashMap repl_func_graph_; FuncGraphManagerPtr manager_; FuncGraphSet graph_set_; ScopePtr scope_; CloneType type_; std::vector todo_; - std::unordered_map status_; - std::unordered_map repl_map_node_; - std::unordered_map> repl_map_func_graph_; - std::unordered_map repl_func_graph_params_; + mindspore::HashMap status_; + mindspore::HashMap repl_map_node_; + mindspore::HashMap> repl_map_func_graph_; + mindspore::HashMap repl_func_graph_params_; }; AnfNodePtr InlineClone(const FuncGraphPtr &func_graph, const FuncGraphPtr &target_func_graph, diff --git a/mindspore/core/ir/func_graph_extends.cc b/mindspore/core/ir/func_graph_extends.cc index dc33666cc0f..9c3f3a15141 100644 --- a/mindspore/core/ir/func_graph_extends.cc +++ b/mindspore/core/ir/func_graph_extends.cc @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -84,7 +84,7 @@ void FuncGraph::DumpFuncGraph(const std::string &path) { void FuncGraph::GenerateVarParams(const FuncGraphPtr &specialized_graph, int variable_args_count, int pos_args_input_count, std::vector *specialized_parameter_list, - std::unordered_map *repl_nodes) const { + mindspore::HashMap *repl_nodes) const { // if there is variable argument, pass the input arguments that does not match positional args to it as a tuple MS_EXCEPTION_IF_NULL(specialized_graph); if (specialized_graph->has_vararg()) { @@ -120,7 +120,7 @@ void FuncGraph::GenerateVarParams(const FuncGraphPtr &specialized_graph, int var void FuncGraph::GenerateKwParams(const FuncGraphPtr &specialized_graph, const std::vector &kwarg_list, std::vector *specialized_parameter_list, - std::unordered_map *repl_nodes) const { + mindspore::HashMap *repl_nodes) const { std::vector kwarg_keys_tuple_nodes = {NewValueNode(prim::kPrimMakeTuple)}; std::vector kwarg_values_tuple_nodes = {NewValueNode(prim::kPrimMakeTuple)}; @@ -176,7 +176,7 @@ void FuncGraph::GenerateKwParams(const FuncGraphPtr &specialized_graph, void FuncGraph::GenerateKwargReplNode(const FuncGraphPtr &specialized_graph, const std::vector &kwarg_keys_tuple_nodes, const std::vector &kwarg_values_tuple_nodes, - std::unordered_map *repl_nodes) const { + mindspore::HashMap *repl_nodes) const { if (has_kwarg()) { MS_EXCEPTION_IF_NULL(specialized_graph); TraceGuard guard( @@ -203,7 +203,7 @@ bool FuncGraph::NeedGenerate(const std::vector void FuncGraph::GenerateDefaultValue(const FuncGraphPtr &specialized_graph, const std::vector &specialized_parameter_list, - std::unordered_map *repl_nodes) const { + mindspore::HashMap *repl_nodes) const { MS_EXCEPTION_IF_NULL(specialized_graph); for (size_t i = 0; i < specialized_graph->parameters().size() - hyper_param_count(); ++i) { MS_EXCEPTION_IF_NULL(specialized_graph->parameters()[i]); @@ -256,7 +256,7 @@ FuncGraphPtr FuncGraph::GenerateGraph(const AbstractBasePtrList &args_spec_list) int pos_args_count = std::min(pos_args_input_count, this->GetPositionalArgsCount()); int variable_args_count = pos_args_input_count - pos_args_count; std::vector specialized_parameter_list; - std::unordered_map repl_nodes; + mindspore::HashMap repl_nodes; // the parameters that has arg input, copy from original parameters for (size_t i = 0; i < IntToSize(pos_args_count); ++i) { specialized_parameter_list.push_back(specialized_graph->parameters()[i]); diff --git a/mindspore/core/ir/graph_utils.cc b/mindspore/core/ir/graph_utils.cc index cc999a717e2..d458485bc08 100644 --- a/mindspore/core/ir/graph_utils.cc +++ b/mindspore/core/ir/graph_utils.cc @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,8 +18,6 @@ #include "ir/graph_utils.h" -#include -#include #include #include #include @@ -29,6 +27,8 @@ #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/func_graph.h" #include "utils/log_adapter.h" #include "utils/ms_context.h" diff --git a/mindspore/core/ir/graph_utils.h b/mindspore/core/ir/graph_utils.h index 1750b8843a3..83cd52b5f13 100644 --- a/mindspore/core/ir/graph_utils.h +++ b/mindspore/core/ir/graph_utils.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +19,6 @@ #ifndef MINDSPORE_CORE_IR_GRAPH_UTILS_H_ #define MINDSPORE_CORE_IR_GRAPH_UTILS_H_ -#include -#include #include #include #include @@ -29,6 +27,8 @@ #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/anf.h" #include "ir/primitive.h" #include "ir/scalar.h" diff --git a/mindspore/core/ir/graph_utils_extends.cc b/mindspore/core/ir/graph_utils_extends.cc index f106d7f64a3..fc511af2144 100644 --- a/mindspore/core/ir/graph_utils_extends.cc +++ b/mindspore/core/ir/graph_utils_extends.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,6 @@ #include "ir/graph_utils.h" -#include -#include #include #include #include @@ -25,6 +23,8 @@ #include #include +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "ir/visitor.h" #include "ir/manager.h" #include "ir/func_graph.h" diff --git a/mindspore/core/ir/manager.cc b/mindspore/core/ir/manager.cc index 96c5598a1d9..0926459eb8b 100644 --- a/mindspore/core/ir/manager.cc +++ b/mindspore/core/ir/manager.cc @@ -843,7 +843,7 @@ void DepComputer::Recompute(const FuncGraphPtr &fg) { } FuncGraphSetPtr FuncGraphParentsTotalComputer::SeekParents( - const FuncGraphPtr &fg, std::unordered_map *seen_fgs) { + const FuncGraphPtr &fg, mindspore::HashMap *seen_fgs) { auto iter = seen_fgs->find(fg); if (iter != seen_fgs->end()) { return iter->second; @@ -874,7 +874,7 @@ FuncGraphSetPtr FuncGraphParentsTotalComputer::SeekParents( void FuncGraphParentsTotalComputer::RealRecompute(FuncGraphPtr fg) { MS_EXCEPTION_IF_NULL(fg); - std::unordered_map seen_fgs; + mindspore::HashMap seen_fgs; fg->seen_ = 1; func_graph_parents_total_analysis_[fg].update(SeekParents(fg, &seen_fgs)); fg->seen_ = 0; @@ -1065,20 +1065,19 @@ bool FuncGraphJTotalComputer::SeekJ(const FuncGraphPtr &fg, size_t seen_num) { // Check J FuncGraph input. const auto &j_values = fg->j_value_nodes(); if (!j_values.empty()) { - auto contains_j = - std::find_if(j_values.begin(), j_values.end(), [seen_num](const std::pair &iter) { - // Check g1->J(fg)->g2->g cycle. - if (IsValueNode(iter.first)) { - auto func_graph = GetValueNode(iter.first); - return func_graph->seen_ != seen_num; - } - if (IsValueNode(iter.first)) { - // Exclude the primitive of J itself. - auto prim = GetValueNode(iter.first); - return prim->name() != prim::kPrimJ->name(); - } - return false; - }); + auto contains_j = std::find_if(j_values.begin(), j_values.end(), [seen_num](const auto &iter) { + // Check g1->J(fg)->g2->g cycle. + if (IsValueNode(iter.first)) { + auto func_graph = GetValueNode(iter.first); + return func_graph->seen_ != seen_num; + } + if (IsValueNode(iter.first)) { + // Exclude the primitive of J itself. + auto prim = GetValueNode(iter.first); + return prim->name() != prim::kPrimJ->name(); + } + return false; + }); if (contains_j != j_values.end()) { MS_LOG(DEBUG) << fg->ToString() << " contains J(" << contains_j->first->DebugString() << ")"; return true; @@ -1088,14 +1087,13 @@ bool FuncGraphJTotalComputer::SeekJ(const FuncGraphPtr &fg, size_t seen_num) { // Check J CNode as FV. const auto &fv_nodes = fg->free_variables(); if (!fv_nodes.empty()) { - auto contains_j_cnode = - std::find_if(fv_nodes.begin(), fv_nodes.end(), [seen_num](const std::pair &iter) { - // Check if the FV is a J call CNode. - if (IsPrimitiveCNode(iter.first, prim::kPrimJ)) { - return true; - } - return false; - }); + auto contains_j_cnode = std::find_if(fv_nodes.begin(), fv_nodes.end(), [seen_num](const auto &iter) { + // Check if the FV is a J call CNode. + if (IsPrimitiveCNode(iter.first, prim::kPrimJ)) { + return true; + } + return false; + }); if (contains_j_cnode != fv_nodes.end()) { MS_LOG(DEBUG) << fg->ToString() << " contains FV J(" << contains_j_cnode->first->DebugString() << ")"; return true; diff --git a/mindspore/core/ir/manager.h b/mindspore/core/ir/manager.h index fdd61abcce0..6e7f5f536f0 100644 --- a/mindspore/core/ir/manager.h +++ b/mindspore/core/ir/manager.h @@ -19,8 +19,6 @@ #ifndef MINDSPORE_CORE_IR_MANAGER_H_ #define MINDSPORE_CORE_IR_MANAGER_H_ -#include -#include #include #include #include @@ -33,6 +31,8 @@ #include "utils/any.h" #include "utils/misc.h" #include "utils/signal.h" +#include "utils/hash_map.h" +#include "utils/hash_set.h" #include "utils/ordered_set.h" #include "utils/ordered_map.h" #include "ir/anf.h" @@ -137,7 +137,7 @@ class FuncGraphParentsTotalComputer final : public DepComputer { void RealRecompute(FuncGraphPtr fg) override; private: - FuncGraphSetPtr SeekParents(const FuncGraphPtr &fg, std::unordered_map *seen_fgs); + FuncGraphSetPtr SeekParents(const FuncGraphPtr &fg, mindspore::HashMap *seen_fgs); }; using FuncGraphToFuncGraphMap = OrderedMap; diff --git a/mindspore/core/ir/meta_func_graph.h b/mindspore/core/ir/meta_func_graph.h index 09adc801a1c..ce40ebfc131 100644 --- a/mindspore/core/ir/meta_func_graph.h +++ b/mindspore/core/ir/meta_func_graph.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,12 +19,12 @@ #ifndef MINDSPORE_CORE_IR_META_FUNC_GRAPH_H_ #define MINDSPORE_CORE_IR_META_FUNC_GRAPH_H_ -#include #include #include #include #include #include +#include #include "ir/dtype.h" #include "ir/anf.h" diff --git a/mindspore/core/ir/primal_attr.h b/mindspore/core/ir/primal_attr.h index ffb4b676569..cded329be11 100644 --- a/mindspore/core/ir/primal_attr.h +++ b/mindspore/core/ir/primal_attr.h @@ -19,7 +19,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "ir/anf.h" namespace mindspore { @@ -35,20 +35,20 @@ class PrimalAttrManager { PrimalAttrManager(const PrimalAttrManager &) = delete; PrimalAttrManager &operator=(const PrimalAttrManager &) = delete; ~PrimalAttrManager() = default; - void SetPrimalAttr(const std::unordered_map &primal_attrs) { primal_attrs_ = primal_attrs; } + void SetPrimalAttr(const mindspore::HashMap &primal_attrs) { primal_attrs_ = primal_attrs; } void ClearPrimalAttr() { primal_attrs_.clear(); } - std::unordered_map GetCurrentPrimalAttr() { return primal_attrs_; } + mindspore::HashMap GetCurrentPrimalAttr() { return primal_attrs_; } private: PrimalAttrManager() = default; - std::unordered_map primal_attrs_; + mindspore::HashMap primal_attrs_; }; // PrimalAttrGuard is a class that help generate the back propagation cnode // with specified primal attrs in the current c++ action scope. class PrimalAttrGuard { public: - explicit PrimalAttrGuard(const std::unordered_map &primal_attrs) { + explicit PrimalAttrGuard(const mindspore::HashMap &primal_attrs) { PrimalAttrManager::GetInstance().SetPrimalAttr(primal_attrs); } ~PrimalAttrGuard() { PrimalAttrManager::GetInstance().ClearPrimalAttr(); } diff --git a/mindspore/core/ir/primal_debug_info.h b/mindspore/core/ir/primal_debug_info.h index 932e4217045..f237fb47cc3 100644 --- a/mindspore/core/ir/primal_debug_info.h +++ b/mindspore/core/ir/primal_debug_info.h @@ -20,7 +20,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "utils/info.h" namespace mindspore { diff --git a/mindspore/core/ir/primitive.cc b/mindspore/core/ir/primitive.cc index b21b3ec507b..7bc8c8aa462 100644 --- a/mindspore/core/ir/primitive.cc +++ b/mindspore/core/ir/primitive.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ Primitive::Primitive(const std::string &name, const bool is_base, const PrimType is_const_prim_(false), id_(MakeId()) {} -Primitive::Primitive(const std::string &name, const std::unordered_map &attrs) +Primitive::Primitive(const std::string &name, const mindspore::HashMap &attrs) : Named(name), attrs_(attrs), is_base_(true), @@ -76,7 +76,7 @@ bool Primitive::operator==(const Primitive &other) const { if (attrs_.size() != other.attrs_.size()) { return false; } - auto all = std::all_of(attrs_.begin(), attrs_.end(), [&other](const std::pair &item) -> bool { + auto all = std::all_of(attrs_.begin(), attrs_.end(), [&other](const auto &item) { if (item.second == nullptr) { return false; } diff --git a/mindspore/core/ir/primitive.h b/mindspore/core/ir/primitive.h index bc720de6200..598111d433c 100644 --- a/mindspore/core/ir/primitive.h +++ b/mindspore/core/ir/primitive.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,12 +17,12 @@ #ifndef MINDSPORE_CORE_IR_PRIMITIVE_H_ #define MINDSPORE_CORE_IR_PRIMITIVE_H_ -#include #include #include #include #include +#include "utils/hash_map.h" #include "ir/dtype/type.h" #include "abstract/abstract_value.h" #include "base/base_ref.h" @@ -46,7 +46,7 @@ class MS_CORE_API Primitive : public Named { /// \param[in] is_base True means the basic Primitive without BProp function inside. /// \param[in] prim_type The type of primitive. explicit Primitive(const std::string &name, const bool is_base = true, const PrimType prim_type = kPrimTypeBuiltIn); - Primitive(const std::string &name, const std::unordered_map &attrs); + Primitive(const std::string &name, const mindspore::HashMap &attrs); /// \brief The constructor for Primitive, create a primitive for another primitive. /// /// \param[in] prim The input primitive. @@ -88,7 +88,7 @@ class MS_CORE_API Primitive : public Named { /// /// \param[in] attrs The attribute map needs to be added in the primitive attribute. /// \return The primitive to which attribute has been added. - Primitive &SetAttrs(const std::unordered_map &attrs) { + Primitive &SetAttrs(const mindspore::HashMap &attrs) { for (auto &attr : attrs) { attrs_[attr.first] = attr.second; } @@ -114,15 +114,15 @@ class MS_CORE_API Primitive : public Named { /// \brief Get Primitive's all attributes. /// /// \return The Primitive's all attribute. - const std::unordered_map &attrs() const { return attrs_; } + const mindspore::HashMap &attrs() const { return attrs_; } /// \brief Get the attributes added in MindSpore renormalize stage. /// /// \return Attributes which have been added in MindSpore renormalize stage. - const std::unordered_map &evaluate_added_attrs() const { return evaluate_added_attrs_; } + const mindspore::HashMap &evaluate_added_attrs() const { return evaluate_added_attrs_; } /// \brief Use add attribute using a map,all elements of the map will be added in the primitive's attribute map. /// /// \param[in] attrs The attribute map needs to be added in the primitive attribute. - void set_evaluate_added_attrs(const std::unordered_map &attrs) { + void set_evaluate_added_attrs(const mindspore::HashMap &attrs) { for (auto &attr : attrs) { MS_LOG(DEBUG) << " set evalu attrl " << name() << attr.first; attrs_[attr.first] = attr.second; @@ -220,8 +220,8 @@ class MS_CORE_API Primitive : public Named { uint64_t id() const { return id_; } protected: - std::unordered_map attrs_; - std::unordered_map evaluate_added_attrs_; + mindspore::HashMap attrs_; + mindspore::HashMap evaluate_added_attrs_; private: std::string instance_name_; diff --git a/mindspore/core/load_mindir/anf_model_parser.cc b/mindspore/core/load_mindir/anf_model_parser.cc index 9b1f440fd53..2f354847df4 100644 --- a/mindspore/core/load_mindir/anf_model_parser.cc +++ b/mindspore/core/load_mindir/anf_model_parser.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,13 +22,13 @@ #include #include #include -#include #include #include #include "ir/tensor.h" #include "ir/param_info.h" #include "ops/primitive_c.h" #include "abstract/abstract_value.h" +#include "utils/hash_map.h" #include "utils/log_adapter.h" #include "utils/shape_utils.h" #include "utils/check_convert_utils.h" @@ -59,7 +59,7 @@ static std::map kParseTypeSwitchMap{ {"type", FORM_PARSE_TYPE}, {"scalar", FORM_PARSE_SCALAR}, {"tensor", FORM_PARSE_TENSOR}, {"none", FORM_PARSE_NONE}, {"Monad", FORM_PARSE_MONAD}, {"", FORM_PARSE_UNDEFINE}}; -static std::unordered_map kDefaultValueSwitchMap{ +static mindspore::HashMap kDefaultValueSwitchMap{ {mind_ir::TensorProto_DataType_BOOL, kNumberTypeBool}, {mind_ir::TensorProto_DataType_INT8, kNumberTypeInt8}, {mind_ir::TensorProto_DataType_INT16, kNumberTypeInt16}, @@ -77,7 +77,7 @@ static std::unordered_map kDefaultValueSwitchMap{ }; template -std::shared_ptr ParserAttr(const std::string &str, const std::unordered_map &kv) { +std::shared_ptr ParserAttr(const std::string &str, const mindspore::HashMap &kv) { std::stack rules; std::stack

value; int count = 0; @@ -124,7 +124,7 @@ std::shared_ptr ParserAttr(const std::string &str, const std::unordered_map -std::shared_ptr ParserScalarAttrValue(const std::string &attr_name, const std::unordered_map &kv) { +std::shared_ptr ParserScalarAttrValue(const std::string &attr_name, const mindspore::HashMap &kv) { std::string str = attr_name; auto replace = [&](const string &orgStr, const string &newStr) { std::string::size_type pos(0); @@ -144,7 +144,7 @@ std::shared_ptr ParserScalarAttrValue(const std::string &attr_name, const std } std::shared_ptr ParserAttrShape( - const std::string &attr_name, const std::unordered_map &kv) { + const std::string &attr_name, const mindspore::HashMap &kv) { std::string str = attr_name; auto replace = [&](const string &orgStr, const string &newStr) { std::string::size_type pos(0); @@ -522,13 +522,13 @@ ValuePtr MSANFModelParser::ParseAttrInScalarForm(const mind_ir::AttributeProto & } void MSANFModelParser::ObtainCNodeAttrInScalarForm(const mind_ir::AttributeProto &attr_proto, - std::unordered_map *multi_value_map) { + mindspore::HashMap *multi_value_map) { string name; auto func = [&name, &multi_value_map, this](const mind_ir::AttributeProto &attr_proto, int length) -> void { for (int i = 0; i < length; ++i) { auto res = this->ParseAttrInScalarForm(attr_proto, i); name = "value" + std::to_string(i + 1); - multi_value_map->insert(std::pair(name, res)); + (void)multi_value_map->emplace(name, res); } }; func(attr_proto, attr_proto.ints_size()); @@ -617,7 +617,7 @@ bool MSANFModelParser::GetAttrValueForCNode(const PrimitivePtr &prim, const mind std::size_t pos(0); string type = GetTypeString(ref_attr_name, &pos); - std::unordered_map multi_value_map; + mindspore::HashMap multi_value_map; switch (kParseTypeSwitchMap[type]) { case FORM_PARSE_TYPE: { ObtainCNodeAttrInTypeForm(prim, attr_proto); @@ -797,7 +797,7 @@ bool MSANFModelParser::GetAttrValueForValueNode(const std::string &value_node_na } ValueNodePtr new_value_node; - std::unordered_map multi_value_map; + mindspore::HashMap multi_value_map; switch (kParseTypeSwitchMap[type]) { case FORM_PARSE_TYPE: { ObtainValueNodeInTypeForm(value_node_name, attr_proto.tensors(0)); @@ -870,9 +870,9 @@ bool MSANFModelParser::BuildValueNodeForFuncGraph(const mind_ir::NodeProto &node return GetAttrValueForValueNode(value_node_name, attr_proto); } -std::unordered_map MSANFModelParser::GetAbstractForCNode( +mindspore::HashMap MSANFModelParser::GetAbstractForCNode( const mind_ir::AttributeProto &attr_proto) { - std::unordered_map kv; + mindspore::HashMap kv; for (int i = 0; i < attr_proto.tensors_size(); ++i) { ShapeVector shape_vec; const mind_ir::TensorProto &attr_tensor = attr_proto.tensors(i); @@ -887,11 +887,11 @@ std::unordered_map MSANFModelParser::Get auto abs_ref_key = ref_key->ToAbstract(); auto abs_value = tensor_info->ToAbstract()->Broaden()->cast(); auto abs_ref = std::make_shared(abs_ref_key, abs_value); - kv.insert(std::pair(attr_tensor.name(), abs_ref)); + (void)kv.emplace(attr_tensor.name(), abs_ref); } else { auto abstract = tensor_info->ToAbstract(); MS_EXCEPTION_IF_NULL(abstract); - kv.insert(std::pair(attr_tensor.name(), abstract)); + (void)kv.emplace(attr_tensor.name(), abstract); } } return kv; @@ -1009,7 +1009,7 @@ void MSANFModelParser::SetCNodeAbstract(const mind_ir::NodeProto &node_proto, CN return; } - std::unordered_map kv; + mindspore::HashMap kv; string shape_ref_attr_name; bool is_tuple_or_list = false; @@ -1030,7 +1030,7 @@ void MSANFModelParser::SetCNodeAbstract(const mind_ir::NodeProto &node_proto, CN if (kv.size() == 0) { SetEmptyTensorProtoCNodeAbstract(cnode_ptr, node_type); } else if (kv.size() == 1 && !is_tuple_or_list) { - std::unordered_map::iterator iter = kv.begin(); + auto iter = kv.begin(); if (iter->second != nullptr) { iter->second->set_value(kAnyValue); cnode_ptr->set_abstract(iter->second); diff --git a/mindspore/core/load_mindir/anf_model_parser.h b/mindspore/core/load_mindir/anf_model_parser.h index 2f098f4c030..5855fff0076 100644 --- a/mindspore/core/load_mindir/anf_model_parser.h +++ b/mindspore/core/load_mindir/anf_model_parser.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ #include #include -#include #include +#include "utils/hash_map.h" #include "google/protobuf/io/zero_copy_stream_impl.h" #include "ir/func_graph.h" #include "proto/mind_ir.pb.h" @@ -65,7 +65,7 @@ class MSANFModelParser { bool GetAttrValueForCNode(const PrimitivePtr &prim, const mind_ir::AttributeProto &attr_proto); bool ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim, const mind_ir::AttributeProto &attr_proto); void ObtainCNodeAttrInScalarForm(const mind_ir::AttributeProto &attr_proto, - std::unordered_map *multi_value_map); + mindspore::HashMap *multi_value_map); ValuePtr ParseAttrInScalarForm(const mind_ir::AttributeProto &attr_proto, int index); ValuePtr ObtainCNodeAttrInSingleScalarForm(const mind_ir::AttributeProto &attr_proto); bool ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const mind_ir::AttributeProto &attr_proto); @@ -81,7 +81,7 @@ class MSANFModelParser { bool ObtainValueNodeInNoneForm(const std::string &value_node_name, const mind_ir::AttributeProto &attr_proto); bool ObtainValueNodeInMonadForm(const std::string &value_node_name, const mind_ir::AttributeProto &attr_proto); bool little_endian() { return little_endian_; } - std::unordered_map GetAbstractForCNode( + mindspore::HashMap GetAbstractForCNode( const mind_ir::AttributeProto &attr_proto); AnfNodePtr GetAnfNode(const std::string &node_name); @@ -92,7 +92,7 @@ class MSANFModelParser { bool is_lite_ = false; bool inc_load_ = false; bool need_renormalize_ = true; - std::unordered_map anfnode_build_map_; + mindspore::HashMap anfnode_build_map_; std::string mindir_path_; const unsigned char *mindir_dec_key_{nullptr}; size_t mindir_key_size_; diff --git a/mindspore/core/mindrt/include/actor/op_actor.h b/mindspore/core/mindrt/include/actor/op_actor.h index 760f7134f1d..469dfe16152 100644 --- a/mindspore/core/mindrt/include/actor/op_actor.h +++ b/mindspore/core/mindrt/include/actor/op_actor.h @@ -21,7 +21,7 @@ #include #include #include -#include +#include "utils/hash_map.h" #include "actor/actor.h" #include "async/uuid_base.h" #include "async/future.h" @@ -110,11 +110,11 @@ class OpActor : public ActorBase { protected: // The op data. - std::unordered_map *>> input_op_datas_; + mindspore::HashMap *>> input_op_datas_; std::vector output_data_arrows_; // The op controls. - std::unordered_map> input_op_controls_; + mindspore::HashMap> input_op_controls_; std::vector output_control_arrows_; }; diff --git a/mindspore/core/mindrt/include/actor/switch_actor.h b/mindspore/core/mindrt/include/actor/switch_actor.h index c64a792ecc6..61ce15929a5 100644 --- a/mindspore/core/mindrt/include/actor/switch_actor.h +++ b/mindspore/core/mindrt/include/actor/switch_actor.h @@ -19,7 +19,7 @@ #include #include -#include +#include "utils/hash_map.h" #include "actor/actor.h" #include "actor/op_actor.h" diff --git a/mindspore/core/ops/custom.h b/mindspore/core/ops/custom.h index 4010a5c9f2a..b27adfa5f15 100644 --- a/mindspore/core/ops/custom.h +++ b/mindspore/core/ops/custom.h @@ -21,7 +21,6 @@ #include #include #include -#include #include "ops/primitive_c.h" #include "ops/op_utils.h" #include "ir/anf.h" diff --git a/mindspore/core/utils/counter.h b/mindspore/core/utils/counter.h index 70905120b40..1d71ab67f8d 100644 --- a/mindspore/core/utils/counter.h +++ b/mindspore/core/utils/counter.h @@ -20,8 +20,8 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "utils/ordered_map.h" namespace mindspore { @@ -41,7 +41,7 @@ class Counter { struct KeyEqual { bool operator()(const key_type lhs, const key_type rhs) const noexcept { return Equal{}(*lhs, *rhs); } }; - using map_type = std::unordered_map; + using map_type = mindspore::HashMap; public: Counter() = default; diff --git a/mindspore/core/utils/hash_map.h b/mindspore/core/utils/hash_map.h new file mode 100644 index 00000000000..150d5243944 --- /dev/null +++ b/mindspore/core/utils/hash_map.h @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_UTILS_HASH_MAP_H_ +#define MINDSPORE_CORE_UTILS_HASH_MAP_H_ + +#include +#if (ENABLE_FAST_HASH_TABLE) && __has_include("robin_hood/robin_hood.h") +#include "robin_hood/robin_hood.h" + +namespace mindspore { +template , typename KeyEqual = std::equal_to> +using HashMap = robin_hood::unordered_map; + +#else +#include + +namespace mindspore { +template , typename KeyEqual = std::equal_to> +using HashMap = std::unordered_map; + +#endif +} // namespace mindspore + +#endif // MINDSPORE_CORE_UTILS_HASH_MAP_H_ diff --git a/mindspore/core/utils/hash_set.h b/mindspore/core/utils/hash_set.h new file mode 100644 index 00000000000..fbdf3a15907 --- /dev/null +++ b/mindspore/core/utils/hash_set.h @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_UTILS_HASH_SET_H_ +#define MINDSPORE_CORE_UTILS_HASH_SET_H_ + +#include +#if (ENABLE_FAST_HASH_TABLE) && __has_include("robin_hood/robin_hood.h") +#include "robin_hood/robin_hood.h" + +namespace mindspore { +template , typename Equal = std::equal_to> +using HashSet = robin_hood::unordered_set; + +#else +#include + +namespace mindspore { +template , typename Equal = std::equal_to> +using HashSet = std::unordered_set; + +#endif +} // namespace mindspore + +#endif // MINDSPORE_CORE_UTILS_HASH_SET_H_ diff --git a/mindspore/core/utils/interpret_node_recorder.h b/mindspore/core/utils/interpret_node_recorder.h index 75646ca5579..fe7813baecc 100644 --- a/mindspore/core/utils/interpret_node_recorder.h +++ b/mindspore/core/utils/interpret_node_recorder.h @@ -17,7 +17,6 @@ #ifndef MINDSPORE_CORE_UTILS_InterpretNodeRecorder_H_ #define MINDSPORE_CORE_UTILS_InterpretNodeRecorder_H_ -#include #include namespace mindspore { @@ -34,7 +33,7 @@ class InterpretNodeRecorder { void PushLineInfo(const std::string &line) { interpret_nodes_lines_.emplace(line); } - const std::unordered_set &LineInfos() const { return interpret_nodes_lines_; } + const mindspore::HashSet &LineInfos() const { return interpret_nodes_lines_; } void Clear() { interpret_nodes_lines_.clear(); } @@ -43,7 +42,7 @@ class InterpretNodeRecorder { virtual ~InterpretNodeRecorder() = default; private: - std::unordered_set interpret_nodes_lines_; + mindspore::HashSet interpret_nodes_lines_; }; } // namespace mindspore #endif // MINDSPORE_CORE_UTILS_InterpretNodeRecorder_H_ diff --git a/mindspore/core/utils/label.h b/mindspore/core/utils/label.h index 21f3493c367..58c79fbbfc1 100644 --- a/mindspore/core/utils/label.h +++ b/mindspore/core/utils/label.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ #include #include #include -#include #include +#include "utils/hash_map.h" #include "ir/anf.h" namespace mindspore { diff --git a/mindspore/core/utils/ms_utils.h b/mindspore/core/utils/ms_utils.h index 25c21b9bb68..b9e4630a166 100644 --- a/mindspore/core/utils/ms_utils.h +++ b/mindspore/core/utils/ms_utils.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -89,6 +89,17 @@ static inline bool IsLittleByteOrder() { } return false; } + +template +bool IsEqual(const std::shared_ptr &a, const std::shared_ptr &b) { + if (a == b) { + return true; + } + if (a == nullptr || b == nullptr) { + return false; + } + return *a == *b; +} } // namespace common } // namespace mindspore diff --git a/mindspore/core/utils/ordered_map.h b/mindspore/core/utils/ordered_map.h index 00e498c60bb..9d1287cd829 100644 --- a/mindspore/core/utils/ordered_map.h +++ b/mindspore/core/utils/ordered_map.h @@ -18,16 +18,16 @@ #define MINDSPORE_CORE_UTILS_ORDERED_MAP_H_ #include -#include #include #include #include #include #include "utils/hashing.h" +#include "utils/hash_map.h" namespace mindspore { // Implementation of OrderedMap that keeps insertion order -// using unordered_map to improve the performance of find/erase, and use list to keep insertion order +// using hash map to improve the performance of find/erase, and use list to keep insertion order template , class Equal = std::equal_to> class OrderedMap { using key_ptr_t = const KeyT *; @@ -47,7 +47,7 @@ class OrderedMap { using const_iterator = typename sequential_type::const_iterator; using reverse_iterator = typename sequential_type::reverse_iterator; using const_reverse_iterator = typename sequential_type::const_reverse_iterator; - using map_type = std::unordered_map; + using map_type = mindspore::HashMap; using value_type = typename sequential_type::value_type; using size_type = typename sequential_type::size_type; @@ -221,7 +221,7 @@ class OrderedMap, ValueT> { using const_iterator = typename sequential_type::const_iterator; using reverse_iterator = typename sequential_type::reverse_iterator; using const_reverse_iterator = typename sequential_type::const_reverse_iterator; - using map_type = std::unordered_map; + using map_type = mindspore::HashMap; using value_type = typename sequential_type::value_type; using size_type = typename sequential_type::size_type; diff --git a/mindspore/core/utils/ordered_set.h b/mindspore/core/utils/ordered_set.h index 6e30bcffbbe..c6b1ced2899 100644 --- a/mindspore/core/utils/ordered_set.h +++ b/mindspore/core/utils/ordered_set.h @@ -18,13 +18,13 @@ #define MINDSPORE_CORE_UTILS_ORDERED_SET_H_ #include -#include #include #include #include #include #include #include "utils/hashing.h" +#include "utils/hash_map.h" namespace mindspore { // Implementation of OrderedSet that keeps insertion order @@ -41,7 +41,7 @@ class OrderedSet { using const_iterator = typename sequential_type::const_iterator; using reverse_iterator = typename sequential_type::reverse_iterator; using const_reverse_iterator = typename sequential_type::const_reverse_iterator; - using map_type = std::unordered_map; + using map_type = mindspore::HashMap; using ordered_set_type = OrderedSet; OrderedSet() = default; @@ -309,7 +309,7 @@ class OrderedSet> { using const_iterator = typename sequential_type::const_iterator; using reverse_iterator = typename sequential_type::reverse_iterator; using const_reverse_iterator = typename sequential_type::const_reverse_iterator; - using map_type = std::unordered_map; + using map_type = mindspore::HashMap; using ordered_set_type = OrderedSet>; OrderedSet() = default; diff --git a/mindspore/core/utils/overload.h b/mindspore/core/utils/overload.h index baeb1346977..aa23b88f1c7 100644 --- a/mindspore/core/utils/overload.h +++ b/mindspore/core/utils/overload.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,10 +22,10 @@ #include #include #include -#include #include #include #include +#include "utils/hash_map.h" namespace mindspore { template @@ -99,8 +99,8 @@ std::ostream &operator<<(std::ostream &os, const std::pair &pair) { } template -std::ostream &operator<<(std::ostream &os, const std::unordered_map &map) { - os << "[const unordered_map]"; +std::ostream &operator<<(std::ostream &os, const mindspore::HashMap &map) { + os << "[const hash_map]"; return os; } @@ -119,7 +119,7 @@ std::string ToString(const std::vector &vec) { } template -std::string ToString(const std::unordered_map &map) { +std::string ToString(const mindspore::HashMap &map) { std::ostringstream buffer; buffer << map; diff --git a/mindspore/core/utils/symbolic.cc b/mindspore/core/utils/symbolic.cc index 4bc20ff617e..412d34e030d 100644 --- a/mindspore/core/utils/symbolic.cc +++ b/mindspore/core/utils/symbolic.cc @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,10 +35,9 @@ bool EnvInstance::operator==(const EnvInstance &other) const { if (Len() != other.Len()) { return false; } - bool equal = std::all_of(contents_.begin(), contents_.end(), - [&other](const std::pair &item) -> bool { - return other.contents_.find(item.first) != other.contents_.end(); - }); + bool equal = std::all_of(contents_.begin(), contents_.end(), [&other](const auto &item) { + return other.contents_.find(item.first) != other.contents_.end(); + }); return equal; } bool EnvInstance::operator==(const Value &other) const { diff --git a/mindspore/core/utils/symbolic.h b/mindspore/core/utils/symbolic.h index d6cd7c0bce3..2e4d2505483 100644 --- a/mindspore/core/utils/symbolic.h +++ b/mindspore/core/utils/symbolic.h @@ -1,7 +1,7 @@ /** * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,12 +19,12 @@ #ifndef MINDSPORE_CORE_UTILS_SYMBOLIC_H_ #define MINDSPORE_CORE_UTILS_SYMBOLIC_H_ -#include #include #include #include #include +#include "utils/hash_map.h" #include "ir/anf.h" #include "abstract/abstract_value.h" @@ -98,7 +98,7 @@ struct SymbolicKeyInstanceEqual { }; using EnvInstanceContentsMap = - std::unordered_map; + mindspore::HashMap; // Environment mapping keys to values. // Keys are SymbolicKeyInstances, which represent nodes in the graph along diff --git a/mindspore/lite/cmake/file_list.cmake b/mindspore/lite/cmake/file_list.cmake index 3c7bff25935..f66c93a62f3 100644 --- a/mindspore/lite/cmake/file_list.cmake +++ b/mindspore/lite/cmake/file_list.cmake @@ -54,6 +54,8 @@ set(UTILS_HEADER ${CORE_DIR}/utils/convert_utils_base.h ${CORE_DIR}/utils/flags.h ${CORE_DIR}/utils/hashing.h + ${CORE_DIR}/utils/hash_map.h + ${CORE_DIR}/utils/hash_set.h ${CORE_DIR}/utils/info.h ${CORE_DIR}/utils/label.h ${CORE_DIR}/utils/log_adapter.h @@ -67,4 +69,4 @@ set(UTILS_HEADER ${CORE_DIR}/utils/signal.h ${CORE_DIR}/utils/trace_info.h ${CORE_DIR}/utils/visible.h - ) \ No newline at end of file + ) diff --git a/scripts/build/build_mindspore.sh b/scripts/build/build_mindspore.sh index ecee047a97e..b6260cf3ef8 100755 --- a/scripts/build/build_mindspore.sh +++ b/scripts/build/build_mindspore.sh @@ -101,6 +101,11 @@ build_mindspore() if [[ "X$ENABLE_TRT" == "Xon" ]]; then CMAKE_ARGS="${CMAKE_ARGS} -DTENSORRT_HOME=${TENSORRT_HOME}" fi + if [[ "X$ENABLE_FAST_HASH_TABLE" == "Xon" ]]; then + CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_FAST_HASH_TABLE=ON" + else + CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_FAST_HASH_TABLE=OFF" + fi echo "${CMAKE_ARGS}" if [[ "X$INC_BUILD" = "Xoff" ]]; then cmake ${CMAKE_ARGS} ${BASEPATH} diff --git a/scripts/build/default_options.sh b/scripts/build/default_options.sh index f2bfb38b794..a5a3b41d0db 100755 --- a/scripts/build/default_options.sh +++ b/scripts/build/default_options.sh @@ -63,4 +63,5 @@ init_default_options() export USER_ENABLE_DUMP_IR=false export USER_ENABLE_DEBUGGER=false export ENABLE_SYM_FILE="off" + export ENABLE_FAST_HASH_TABLE="on" } diff --git a/scripts/build/option_proc_mindspore.sh b/scripts/build/option_proc_mindspore.sh index 2c517308f17..3211a1f32f1 100755 --- a/scripts/build/option_proc_mindspore.sh +++ b/scripts/build/option_proc_mindspore.sh @@ -73,6 +73,12 @@ build_option_proc_upper_s() echo "enable download from gitee" } +build_option_proc_upper_f() +{ + check_on_off $OPTARG F + export ENABLE_FAST_HASH_TABLE="$OPTARG" +} + build_option_proc_z() { eval ARG=\$\{$OPTIND\} @@ -86,4 +92,4 @@ build_option_proc_z() if [[ "X$OPTARG" == "Xoff" ]]; then export COMPILE_MINDDATA="off" fi -} \ No newline at end of file +} diff --git a/scripts/build/process_options.sh b/scripts/build/process_options.sh index 8a486c963a2..8330498b9e4 100755 --- a/scripts/build/process_options.sh +++ b/scripts/build/process_options.sh @@ -20,7 +20,7 @@ set -e process_options() { # Process the options - while getopts 'drvj:c:t:hb:s:a:g:p:ie:m:l:I:RP:D:zM:V:K:B:En:A:S:k:W:H:L:y' opt + while getopts 'drvj:c:t:hb:s:a:g:p:ie:m:l:I:RP:D:zM:V:K:B:En:A:S:k:W:F:H:L:y' opt do CASE_SENSIVE_ARG=${OPTARG} OPTARG=$(echo ${OPTARG} | tr '[A-Z]' '[a-z]') @@ -97,6 +97,8 @@ process_options() build_option_proc_upper_a ;; W) build_option_proc_upper_w ;; + F) + build_option_proc_upper_f ;; H) check_on_off $OPTARG H export ENABLE_HIDDEN="$OPTARG" @@ -111,4 +113,4 @@ process_options() exit 1 esac done -} \ No newline at end of file +} diff --git a/scripts/build/usage.sh b/scripts/build/usage.sh index e50eab2aaca..8b844c93ae1 100755 --- a/scripts/build/usage.sh +++ b/scripts/build/usage.sh @@ -24,7 +24,7 @@ usage() echo " [-P on|off] [-z [on|off]] [-M on|off] [-V 10.1|11.1|310|910] [-I arm64|arm32|x86_64] [-K on|off] \\" echo " [-B on|off] [-E] [-l on|off] [-n full|lite|off] [-H on|off] \\" echo " [-A on|off] [-S on|off] [-k on|off] [-W sse|neon|avx|avx512|off] \\" - echo " [-L Tensor-RT path] [-y on|off] \\" + echo " [-L Tensor-RT path] [-y on|off] [-F on|off] \\" echo "" echo "Options:" echo " -d Debug mode" @@ -62,4 +62,5 @@ usage() echo " -H Enable hidden" echo " -L Link and specify Tensor-RT library path, default disable Tensor-RT lib linking" echo " -y Compile the symbol table switch and save the symbol table to the directory output" + echo " -F Use fast hash table in mindspore compiler, default on" } diff --git a/tests/ut/cpp/abstract/abstract_test.cc b/tests/ut/cpp/abstract/abstract_test.cc index 59f9bf0c0d5..5cfd19c6b77 100644 --- a/tests/ut/cpp/abstract/abstract_test.cc +++ b/tests/ut/cpp/abstract/abstract_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -65,7 +65,7 @@ TEST_F(TestAbstract, TestParseDataClass) { } } - std::unordered_map methods = cls->methods(); + auto methods = cls->methods(); ASSERT_EQ(methods.size(), 4); int counts = 0; for (auto &v : methods) { diff --git a/tests/ut/cpp/operator/ops_test.cc b/tests/ut/cpp/operator/ops_test.cc index f3dc363879b..22d77698789 100644 --- a/tests/ut/cpp/operator/ops_test.cc +++ b/tests/ut/cpp/operator/ops_test.cc @@ -432,7 +432,7 @@ TEST_F(TestOps, GetConv2DPrimPyTest) { py::object conv2d_pyobj = parse::python_adapter::GetPyFn("gtest_input.pynative", "conv2d_prim"); py::dict opAttrs = py::getattr(conv2d_pyobj, "attrs"); - std::unordered_map attrs{}; + mindspore::HashMap attrs{}; for (auto item : opAttrs) { if (!py::isinstance(item.first)) { MS_LOG(EXCEPTION) << "type error in py dict convert"; diff --git a/tests/ut/cpp/optimizer/clean_test.cc b/tests/ut/cpp/optimizer/clean_test.cc index 545d1e4d398..58e38461de2 100644 --- a/tests/ut/cpp/optimizer/clean_test.cc +++ b/tests/ut/cpp/optimizer/clean_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -106,7 +106,7 @@ TEST_F(TestClean, TestEraseClassGetAttr) { if (IsValueNode(input0)) { std::vector attr = {{"x", std::make_shared(kFloat64)}, {"y", std::make_shared(kFloat64)}}; - std::unordered_map methods; + mindspore::HashMap methods; AbstractBasePtr abs_ptr = std::make_shared(Named("Point"), attr, methods); node->set_abstract(abs_ptr); } @@ -141,7 +141,7 @@ TEST_F(TestClean, TestEraseClassMakeRecord) { para2->set_abstract(std::make_shared(kAnyValue, kInt64)); std::vector attr = {{"x", std::make_shared(kAnyValue, kInt64)}, {"y", std::make_shared(kAnyValue, kInt64)}}; - std::unordered_map methods; + mindspore::HashMap methods; AbstractBasePtr abs_ptr = std::make_shared(Named("Point"), attr, methods); auto cons_class = NewValueNode(abs_ptr->BuildValue()); cons_class->set_abstract(abs_ptr); @@ -176,7 +176,7 @@ TEST_F(TestClean, TestEraseClassPartial) { std::vector attr = {{"x", std::make_shared(kAnyValue, kInt64)}, {"y", std::make_shared(kAnyValue, kInt64)}}; - std::unordered_map methods; + mindspore::HashMap methods; AbstractBasePtr abs_ptr = std::make_shared(Named("Point"), attr, methods); auto cons_class = NewValueNode(abs_ptr->BuildValue()); cons_class->set_abstract(abs_ptr); diff --git a/tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc b/tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc index ceab5a433bc..3ce0b12fbd4 100644 --- a/tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc +++ b/tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -172,7 +172,7 @@ void TestDPAlgo::SetUp() { // matmul0 ValuePtr transpose_a_0 = MakeValue(false); ValuePtr transpose_b_0 = MakeValue(false); - std::unordered_map attr_0 = {{"transpose_a", transpose_a_0}, {"transpose_b", transpose_b_0}}; + mindspore::HashMap attr_0 = {{"transpose_a", transpose_a_0}, {"transpose_b", transpose_b_0}}; Shapes inputs_shape_0 = {{128, 1024}, {1024, 4096}}; Shapes outputs_shape_0 = {{4096, 1024}}; matmul0 = std::make_shared("matmul_info", inputs_shape_0, outputs_shape_0, attr_0); @@ -182,7 +182,7 @@ void TestDPAlgo::SetUp() { // matmul1 ValuePtr transpose_a_1 = MakeValue(false); ValuePtr transpose_b_1 = MakeValue(false); - std::unordered_map attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; + mindspore::HashMap attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; Shapes inputs_shape_1 = {{128, 1024}, {1024, 4096}}; Shapes outputs_shape_1 = {{128, 4096}}; matmul1 = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); @@ -192,7 +192,7 @@ void TestDPAlgo::SetUp() { // matmul2 ValuePtr transpose_a_2 = MakeValue(false); ValuePtr transpose_b_2 = MakeValue(false); - std::unordered_map attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; + mindspore::HashMap attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; Shapes inputs_shape_2 = {{128, 4096}, {4096, 1024}}; Shapes outputs_shape_2 = {{128, 1024}}; matmul2 = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); @@ -202,7 +202,7 @@ void TestDPAlgo::SetUp() { // matmul3 ValuePtr transpose_a_3 = MakeValue(false); ValuePtr transpose_b_3 = MakeValue(false); - std::unordered_map attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; + mindspore::HashMap attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; Shapes inputs_shape_3 = {{1024, 128}, {128, 4096}}; Shapes outputs_shape_3 = {{1024, 4096}}; matmul3 = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); @@ -212,7 +212,7 @@ void TestDPAlgo::SetUp() { // matmul4 ValuePtr transpose_a_4 = MakeValue(false); ValuePtr transpose_b_4 = MakeValue(false); - std::unordered_map attr_4 = {{"transpose_a", transpose_a_4}, {"transpose_b", transpose_b_4}}; + mindspore::HashMap attr_4 = {{"transpose_a", transpose_a_4}, {"transpose_b", transpose_b_4}}; Shapes inputs_shape_4 = {{128, 1024}, {1024, 4096}}; Shapes outputs_shape_4 = {{128, 4096}}; matmul4 = std::make_shared("matmul_info", inputs_shape_4, outputs_shape_4, attr_4); @@ -222,7 +222,7 @@ void TestDPAlgo::SetUp() { // matmul5 ValuePtr transpose_a_5 = MakeValue(false); ValuePtr transpose_b_5 = MakeValue(false); - std::unordered_map attr_5 = {{"transpose_a", transpose_a_5}, {"transpose_b", transpose_b_5}}; + mindspore::HashMap attr_5 = {{"transpose_a", transpose_a_5}, {"transpose_b", transpose_b_5}}; Shapes inputs_shape_5 = {{128, 4096}, {4096, 4096}}; Shapes outputs_shape_5 = {{128, 4096}}; matmul5 = std::make_shared("matmul_info", inputs_shape_5, outputs_shape_5, attr_5); @@ -232,7 +232,7 @@ void TestDPAlgo::SetUp() { // matmul6 ValuePtr transpose_a_6 = MakeValue(false); ValuePtr transpose_b_6 = MakeValue(false); - std::unordered_map attr_6 = {{"transpose_a", transpose_a_6}, {"transpose_b", transpose_b_6}}; + mindspore::HashMap attr_6 = {{"transpose_a", transpose_a_6}, {"transpose_b", transpose_b_6}}; Shapes inputs_shape_6 = {{4096, 128}, {128, 1024}}; Shapes outputs_shape_6 = {{4096, 1024}}; matmul6 = std::make_shared("matmul_info", inputs_shape_6, outputs_shape_6, attr_6); @@ -242,7 +242,7 @@ void TestDPAlgo::SetUp() { // matmul7 ValuePtr transpose_a_7 = MakeValue(false); ValuePtr transpose_b_7 = MakeValue(true); - std::unordered_map attr_7 = {{"transpose_a", transpose_a_7}, {"transpose_b", transpose_b_7}}; + mindspore::HashMap attr_7 = {{"transpose_a", transpose_a_7}, {"transpose_b", transpose_b_7}}; Shapes inputs_shape_7 = {{64, 128}, {4096, 128}}; Shapes outputs_shape_7 = {{64, 4096}}; matmul7 = std::make_shared("matmul_info", inputs_shape_7, outputs_shape_7, attr_7); @@ -252,7 +252,7 @@ void TestDPAlgo::SetUp() { // matmul8 ValuePtr transpose_a_8 = MakeValue(false); ValuePtr transpose_b_8 = MakeValue(true); - std::unordered_map attr_8 = {{"transpose_a", transpose_a_8}, {"transpose_b", transpose_b_8}}; + mindspore::HashMap attr_8 = {{"transpose_a", transpose_a_8}, {"transpose_b", transpose_b_8}}; Shapes inputs_shape_8 = {{64, 4096}, {40960, 4096}}; Shapes outputs_shape_8 = {{64, 40960}}; matmul8 = std::make_shared("matmul_info", inputs_shape_8, outputs_shape_8, attr_8); @@ -281,7 +281,7 @@ void TestDPAlgo::ConstructBatmanGraph() { std::string edge_matmul_matmul_name = "MatMul-MatMul"; std::string edge_iden_matmul_name = "TmpIdentity-MatMul"; - std::unordered_map attr = {}; + mindspore::HashMap attr = {}; Shapes inputs_shape = {{64, 64}}; Shapes outputs_shape = {{64, 64}}; tmp_identity_ptr1 = std::make_shared(inputs_shape, outputs_shape, attr); @@ -299,7 +299,7 @@ void TestDPAlgo::ConstructBatmanGraph() { // mm1_ptr ValuePtr transpose_a_1 = MakeValue(false); ValuePtr transpose_b_1 = MakeValue(false); - std::unordered_map attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; + mindspore::HashMap attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; Shapes inputs_shape_1 = {{64, 64}, {64, 64}}; Shapes outputs_shape_1 = {{64, 64}}; mm1_ptr = std::make_shared("matmul_info1", inputs_shape_1, outputs_shape_1, attr_1); @@ -308,7 +308,7 @@ void TestDPAlgo::ConstructBatmanGraph() { // mm2_ptr ValuePtr transpose_a_2 = MakeValue(false); ValuePtr transpose_b_2 = MakeValue(false); - std::unordered_map attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; + mindspore::HashMap attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; Shapes inputs_shape_2 = {{64, 64}, {64, 64}}; Shapes outputs_shape_2 = {{64, 64}}; mm2_ptr = std::make_shared("matmul_info2", inputs_shape_2, outputs_shape_2, attr_2); @@ -317,7 +317,7 @@ void TestDPAlgo::ConstructBatmanGraph() { // mm3_ptr ValuePtr transpose_a_3 = MakeValue(false); ValuePtr transpose_b_3 = MakeValue(false); - std::unordered_map attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; + mindspore::HashMap attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; Shapes inputs_shape_3 = {{64, 64}, {64, 64}}; Shapes outputs_shape_3 = {{64, 64}}; mm3_ptr = std::make_shared("matmul_info3", inputs_shape_3, outputs_shape_3, attr_3); @@ -326,7 +326,7 @@ void TestDPAlgo::ConstructBatmanGraph() { // mm4_ptr ValuePtr transpose_a_4 = MakeValue(false); ValuePtr transpose_b_4 = MakeValue(false); - std::unordered_map attr_4 = {{"transpose_a", transpose_a_4}, {"transpose_b", transpose_b_4}}; + mindspore::HashMap attr_4 = {{"transpose_a", transpose_a_4}, {"transpose_b", transpose_b_4}}; Shapes inputs_shape_4 = {{64, 64}, {64, 64}}; Shapes outputs_shape_4 = {{64, 64}}; mm4_ptr = std::make_shared("matmul_info4", inputs_shape_4, outputs_shape_4, attr_4); @@ -335,7 +335,7 @@ void TestDPAlgo::ConstructBatmanGraph() { // mm5_ptr ValuePtr transpose_a_5 = MakeValue(false); ValuePtr transpose_b_5 = MakeValue(false); - std::unordered_map attr_5 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_5}}; + mindspore::HashMap attr_5 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_5}}; Shapes inputs_shape_5 = {{64, 64}, {64, 64}}; Shapes outputs_shape_5 = {{64, 64}}; mm5_ptr = std::make_shared("matmul_info5", inputs_shape_5, outputs_shape_5, attr_5); @@ -344,7 +344,7 @@ void TestDPAlgo::ConstructBatmanGraph() { // mm6_ptr ValuePtr transpose_a_6 = MakeValue(false); ValuePtr transpose_b_6 = MakeValue(false); - std::unordered_map attr_6 = {{"transpose_a", transpose_a_6}, {"transpose_b", transpose_b_6}}; + mindspore::HashMap attr_6 = {{"transpose_a", transpose_a_6}, {"transpose_b", transpose_b_6}}; Shapes inputs_shape_6 = {{64, 64}, {64, 64}}; Shapes outputs_shape_6 = {{64, 64}}; mm6_ptr = std::make_shared("matmul_info6", inputs_shape_6, outputs_shape_6, attr_6); @@ -353,7 +353,7 @@ void TestDPAlgo::ConstructBatmanGraph() { // mm7_ptr ValuePtr transpose_a_7 = MakeValue(false); ValuePtr transpose_b_7 = MakeValue(false); - std::unordered_map attr_7 = {{"transpose_a", transpose_a_7}, {"transpose_b", transpose_a_7}}; + mindspore::HashMap attr_7 = {{"transpose_a", transpose_a_7}, {"transpose_b", transpose_a_7}}; Shapes inputs_shape_7 = {{64, 64}, {64, 64}}; Shapes outputs_shape_7 = {{64, 64}}; mm7_ptr = std::make_shared("matmul_info7", inputs_shape_7, outputs_shape_7, attr_7); @@ -464,7 +464,7 @@ void TestDPAlgo::ConstructBatmanGraph() { } void TestDPAlgo::ConstructTriangleGraph() { - std::unordered_map attr = {}; + mindspore::HashMap attr = {}; Shapes inputs_shape = {{64, 64}}; Shapes outputs_shape = {{64, 64}}; tmp_identity_ptr1 = std::make_shared(inputs_shape, outputs_shape, attr); @@ -474,7 +474,7 @@ void TestDPAlgo::ConstructTriangleGraph() { // mm6_ptr ValuePtr transpose_a_6 = MakeValue(false); ValuePtr transpose_b_6 = MakeValue(false); - std::unordered_map attr_6 = {{"transpose_a", transpose_a_6}, {"transpose_b", transpose_b_6}}; + mindspore::HashMap attr_6 = {{"transpose_a", transpose_a_6}, {"transpose_b", transpose_b_6}}; Shapes inputs_shape_6 = {{64, 64}, {64, 64}}; Shapes outputs_shape_6 = {{64, 64}}; mm6_ptr = std::make_shared("matmul_info", inputs_shape_6, outputs_shape_6, attr_6); @@ -487,7 +487,7 @@ void TestDPAlgo::ConstructTriangleGraph() { // mm1_ptr ValuePtr transpose_a_1 = MakeValue(false); ValuePtr transpose_b_1 = MakeValue(false); - std::unordered_map attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; + mindspore::HashMap attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; Shapes inputs_shape_1 = {{64, 64}, {64, 64}}; Shapes outputs_shape_1 = {{64, 64}}; mm1_ptr = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); @@ -496,7 +496,7 @@ void TestDPAlgo::ConstructTriangleGraph() { // mm2_ptr ValuePtr transpose_a_2 = MakeValue(false); ValuePtr transpose_b_2 = MakeValue(false); - std::unordered_map attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; + mindspore::HashMap attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; Shapes inputs_shape_2 = {{64, 64}, {64, 64}}; Shapes outputs_shape_2 = {{64, 64}}; mm2_ptr = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); @@ -505,7 +505,7 @@ void TestDPAlgo::ConstructTriangleGraph() { // mm3_ptr ValuePtr transpose_a_3 = MakeValue(false); ValuePtr transpose_b_3 = MakeValue(false); - std::unordered_map attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; + mindspore::HashMap attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; Shapes inputs_shape_3 = {{64, 64}, {64, 64}}; Shapes outputs_shape_3 = {{64, 64}}; mm3_ptr = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); @@ -514,7 +514,7 @@ void TestDPAlgo::ConstructTriangleGraph() { // mm4_ptr ValuePtr transpose_a_4 = MakeValue(false); ValuePtr transpose_b_4 = MakeValue(false); - std::unordered_map attr_4 = {{"transpose_a", transpose_a_4}, {"transpose_b", transpose_b_4}}; + mindspore::HashMap attr_4 = {{"transpose_a", transpose_a_4}, {"transpose_b", transpose_b_4}}; Shapes inputs_shape_4 = {{64, 64}, {64, 64}}; Shapes outputs_shape_4 = {{64, 64}}; mm4_ptr = std::make_shared("matmul_info", inputs_shape_4, outputs_shape_4, attr_4); @@ -523,7 +523,7 @@ void TestDPAlgo::ConstructTriangleGraph() { // mm5_ptr ValuePtr transpose_a_5 = MakeValue(false); ValuePtr transpose_b_5 = MakeValue(false); - std::unordered_map attr_5 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_5}}; + mindspore::HashMap attr_5 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_5}}; Shapes inputs_shape_5 = {{64, 64}, {64, 64}}; Shapes outputs_shape_5 = {{64, 64}}; mm5_ptr = std::make_shared("matmul_info", inputs_shape_5, outputs_shape_5, attr_5); @@ -605,7 +605,7 @@ void TestDPAlgo::ConstructTriangleGraph() { } void TestDPAlgo::ConstructTriangleGraph2() { - std::unordered_map attr = {}; + mindspore::HashMap attr = {}; Shapes inputs_shape = {{64, 64}}; Shapes outputs_shape = {{64, 64}}; tmp_identity_ptr1 = std::make_shared(inputs_shape, outputs_shape, attr); @@ -615,7 +615,7 @@ void TestDPAlgo::ConstructTriangleGraph2() { // mm1_ptr ValuePtr transpose_a_1 = MakeValue(false); ValuePtr transpose_b_1 = MakeValue(false); - std::unordered_map attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; + mindspore::HashMap attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; Shapes inputs_shape_1 = {{64, 64}, {64, 64}}; Shapes outputs_shape_1 = {{64, 64}}; mm1_ptr = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); @@ -624,7 +624,7 @@ void TestDPAlgo::ConstructTriangleGraph2() { // mm2_ptr ValuePtr transpose_a_2 = MakeValue(false); ValuePtr transpose_b_2 = MakeValue(false); - std::unordered_map attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; + mindspore::HashMap attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; Shapes inputs_shape_2 = {{64, 64}, {64, 64}}; Shapes outputs_shape_2 = {{64, 64}}; mm2_ptr = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); @@ -633,7 +633,7 @@ void TestDPAlgo::ConstructTriangleGraph2() { // mm3_ptr ValuePtr transpose_a_3 = MakeValue(false); ValuePtr transpose_b_3 = MakeValue(false); - std::unordered_map attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; + mindspore::HashMap attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; Shapes inputs_shape_3 = {{64, 64}, {64, 64}}; Shapes outputs_shape_3 = {{64, 64}}; mm3_ptr = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); @@ -979,7 +979,7 @@ void TestDPAlgo::ConstructMMRGraph() { // mm1_ptr ValuePtr transpose_a_1 = MakeValue(false); ValuePtr transpose_b_1 = MakeValue(false); - std::unordered_map attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; + mindspore::HashMap attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; Shapes inputs_shape_1 = {{32, 16}, {16, 32}}; Shapes outputs_shape_1 = {{32, 32}}; mm1_ptr = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); @@ -988,7 +988,7 @@ void TestDPAlgo::ConstructMMRGraph() { // mm2_ptr ValuePtr transpose_a_2 = MakeValue(false); ValuePtr transpose_b_2 = MakeValue(false); - std::unordered_map attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; + mindspore::HashMap attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; Shapes inputs_shape_2 = {{8, 32}, {32, 32}}; Shapes outputs_shape_2 = {{8, 32}}; mm2_ptr = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); @@ -997,7 +997,7 @@ void TestDPAlgo::ConstructMMRGraph() { // mm3_ptr ValuePtr transpose_a_3 = MakeValue(false); ValuePtr transpose_b_3 = MakeValue(false); - std::unordered_map attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; + mindspore::HashMap attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; Shapes inputs_shape_3 = {{32, 32}, {32, 64}}; Shapes outputs_shape_3 = {{32, 64}}; mm3_ptr = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); @@ -1006,7 +1006,7 @@ void TestDPAlgo::ConstructMMRGraph() { // mm4_ptr ValuePtr transpose_a_4 = MakeValue(false); ValuePtr transpose_b_4 = MakeValue(false); - std::unordered_map attr_4 = {{"transpose_a", transpose_a_4}, {"transpose_b", transpose_b_4}}; + mindspore::HashMap attr_4 = {{"transpose_a", transpose_a_4}, {"transpose_b", transpose_b_4}}; Shapes inputs_shape_4 = {{64, 32}, {32, 32}}; Shapes outputs_shape_4 = {{64, 32}}; mm4_ptr = std::make_shared("matmul_info", inputs_shape_4, outputs_shape_4, attr_4); @@ -1015,7 +1015,7 @@ void TestDPAlgo::ConstructMMRGraph() { // mm5_ptr ValuePtr transpose_a_5 = MakeValue(false); ValuePtr transpose_b_5 = MakeValue(false); - std::unordered_map attr_5 = {{"transpose_a", transpose_a_5}, {"transpose_b", transpose_b_5}}; + mindspore::HashMap attr_5 = {{"transpose_a", transpose_a_5}, {"transpose_b", transpose_b_5}}; Shapes inputs_shape_5 = {{8, 32}, {32, 64}}; Shapes outputs_shape_5 = {{8, 64}}; mm5_ptr = std::make_shared("matmul_info", inputs_shape_5, outputs_shape_5, attr_5); @@ -1024,14 +1024,14 @@ void TestDPAlgo::ConstructMMRGraph() { // mm5_ptr ValuePtr transpose_a_6 = MakeValue(false); ValuePtr transpose_b_6 = MakeValue(false); - std::unordered_map attr_6 = {{"transpose_a", transpose_a_6}, {"transpose_b", transpose_b_6}}; + mindspore::HashMap attr_6 = {{"transpose_a", transpose_a_6}, {"transpose_b", transpose_b_6}}; Shapes inputs_shape_6 = {{8, 64}, {64, 32}}; Shapes outputs_shape_6 = {{8, 32}}; mm6_ptr = std::make_shared("matmul_info", inputs_shape_6, outputs_shape_6, attr_6); mm6_ptr->set_outputs_type({kFloat32}); ValuePtr relu = MakeValue(std::string("relu")); - std::unordered_map relu_attr = {{"activation_type", relu}}; + mindspore::HashMap relu_attr = {{"activation_type", relu}}; // relu1_ptr Shapes relu1_inputs_shape = {{8, 32}}; @@ -1171,7 +1171,7 @@ void TestDPAlgo::ConstructMMRGraph() { } void TestDPAlgo::ConstructIdentityDiamondGraph() { - std::unordered_map attr = {}; + mindspore::HashMap attr = {}; Shapes inputs_shape = {{32, 64}}; Shapes outputs_shape = {{32, 64}}; tmp_identity_ptr = std::make_shared(inputs_shape, outputs_shape, attr); @@ -1180,7 +1180,7 @@ void TestDPAlgo::ConstructIdentityDiamondGraph() { // mm1_ptr ValuePtr transpose_a_1 = MakeValue(false); ValuePtr transpose_b_1 = MakeValue(false); - std::unordered_map attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; + mindspore::HashMap attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; Shapes inputs_shape_1 = {{32, 64}, {64, 128}}; Shapes outputs_shape_1 = {{32, 128}}; mm1_ptr = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); @@ -1189,7 +1189,7 @@ void TestDPAlgo::ConstructIdentityDiamondGraph() { // mm2_ptr ValuePtr transpose_a_2 = MakeValue(false); ValuePtr transpose_b_2 = MakeValue(false); - std::unordered_map attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; + mindspore::HashMap attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; Shapes inputs_shape_2 = {{128, 32}, {32, 64}}; Shapes outputs_shape_2 = {{128, 64}}; mm2_ptr = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); @@ -1198,7 +1198,7 @@ void TestDPAlgo::ConstructIdentityDiamondGraph() { // mm3_ptr ValuePtr transpose_a_3 = MakeValue(false); ValuePtr transpose_b_3 = MakeValue(false); - std::unordered_map attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; + mindspore::HashMap attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; Shapes inputs_shape_3 = {{32, 128}, {128, 64}}; Shapes outputs_shape_3 = {{32, 64}}; mm3_ptr = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); diff --git a/tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc b/tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc index c5c13851b18..da4cbbc6af5 100644 --- a/tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc +++ b/tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -62,7 +62,7 @@ void TestEdgeCostModel::SetUp() { // matmul1 ValuePtr transpose_a_1 = MakeValue(false); ValuePtr transpose_b_1 = MakeValue(false); - std::unordered_map attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; + mindspore::HashMap attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; Shapes inputs_shape_1 = {{8, 16}, {16, 32}}; Shapes outputs_shape_1 = {{8, 32}}; matmul1 = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); @@ -71,7 +71,7 @@ void TestEdgeCostModel::SetUp() { // matmul2 ValuePtr transpose_a_2 = MakeValue(false); ValuePtr transpose_b_2 = MakeValue(false); - std::unordered_map attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; + mindspore::HashMap attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; Shapes inputs_shape_2 = {{8, 32}, {32, 16}}; Shapes outputs_shape_2 = {{8, 16}}; matmul2 = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); @@ -80,7 +80,7 @@ void TestEdgeCostModel::SetUp() { // matmul3 ValuePtr transpose_a_3 = MakeValue(false); ValuePtr transpose_b_3 = MakeValue(false); - std::unordered_map attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; + mindspore::HashMap attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; Shapes inputs_shape_3 = {{16, 8}, {8, 32}}; Shapes outputs_shape_3 = {{16, 32}}; matmul3 = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); @@ -89,7 +89,7 @@ void TestEdgeCostModel::SetUp() { // matmul4 ValuePtr transpose_a_4 = MakeValue(false); ValuePtr transpose_b_4 = MakeValue(false); - std::unordered_map attr_4 = {{"transpose_a", transpose_a_4}, {"transpose_b", transpose_b_4}}; + mindspore::HashMap attr_4 = {{"transpose_a", transpose_a_4}, {"transpose_b", transpose_b_4}}; Shapes inputs_shape_4 = {{8, 16}, {16, 32}}; Shapes outputs_shape_4 = {{8, 32}}; matmul4 = std::make_shared("matmul_info", inputs_shape_4, outputs_shape_4, attr_4); @@ -98,7 +98,7 @@ void TestEdgeCostModel::SetUp() { // matmul5 ValuePtr transpose_a_5 = MakeValue(false); ValuePtr transpose_b_5 = MakeValue(true); - std::unordered_map attr_5 = {{"transpose_a", transpose_a_5}, {"transpose_b", transpose_b_5}}; + mindspore::HashMap attr_5 = {{"transpose_a", transpose_a_5}, {"transpose_b", transpose_b_5}}; Shapes inputs_shape_5 = {{8, 32}, {8, 32}}; Shapes outputs_shape_5 = {{8, 8}}; matmul5 = std::make_shared("matmul_info", inputs_shape_5, outputs_shape_5, attr_5); diff --git a/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc b/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc index 38946d80256..d0bf0019b16 100644 --- a/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc +++ b/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -71,7 +71,7 @@ void TestCostGraph::SetUp() { // matmul0 ValuePtr transpose_a_0 = MakeValue(false); ValuePtr transpose_b_0 = MakeValue(false); - std::unordered_map attr_0 = {{"transpose_a", transpose_a_0}, {"transpose_b", transpose_b_0}}; + mindspore::HashMap attr_0 = {{"transpose_a", transpose_a_0}, {"transpose_b", transpose_b_0}}; Shapes inputs_shape_0 = {{32, 16}, {16, 16}}; Shapes outputs_shape_0 = {{32, 16}}; matmul0 = std::make_shared("matmul_info", inputs_shape_0, outputs_shape_0, attr_0); @@ -80,7 +80,7 @@ void TestCostGraph::SetUp() { // matmul1 ValuePtr transpose_a_1 = MakeValue(false); ValuePtr transpose_b_1 = MakeValue(false); - std::unordered_map attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; + mindspore::HashMap attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; Shapes inputs_shape_1 = {{8, 16}, {16, 32}}; Shapes outputs_shape_1 = {{8, 32}}; matmul1 = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); @@ -89,7 +89,7 @@ void TestCostGraph::SetUp() { // matmul2 ValuePtr transpose_a_2 = MakeValue(false); ValuePtr transpose_b_2 = MakeValue(false); - std::unordered_map attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; + mindspore::HashMap attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpose_b_2}}; Shapes inputs_shape_2 = {{8, 32}, {32, 16}}; Shapes outputs_shape_2 = {{8, 16}}; matmul2 = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); @@ -98,7 +98,7 @@ void TestCostGraph::SetUp() { // matmul3 ValuePtr transpose_a_3 = MakeValue(false); ValuePtr transpose_b_3 = MakeValue(false); - std::unordered_map attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; + mindspore::HashMap attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpose_b_3}}; Shapes inputs_shape_3 = {{16, 8}, {8, 32}}; Shapes outputs_shape_3 = {{16, 32}}; matmul3 = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); @@ -107,7 +107,7 @@ void TestCostGraph::SetUp() { // matmul4 ValuePtr transpose_a_4 = MakeValue(false); ValuePtr transpose_b_4 = MakeValue(false); - std::unordered_map attr_4 = {{"transpose_a", transpose_a_4}, {"transpose_b", transpose_b_4}}; + mindspore::HashMap attr_4 = {{"transpose_a", transpose_a_4}, {"transpose_b", transpose_b_4}}; Shapes inputs_shape_4 = {{8, 16}, {16, 32}}; Shapes outputs_shape_4 = {{8, 32}}; matmul4 = std::make_shared("matmul_info", inputs_shape_4, outputs_shape_4, attr_4); @@ -116,7 +116,7 @@ void TestCostGraph::SetUp() { // matmul5 ValuePtr transpose_a_5 = MakeValue(false); ValuePtr transpose_b_5 = MakeValue(true); - std::unordered_map attr_5 = {{"transpose_a", transpose_a_5}, {"transpose_b", transpose_b_5}}; + mindspore::HashMap attr_5 = {{"transpose_a", transpose_a_5}, {"transpose_b", transpose_b_5}}; Shapes inputs_shape_5 = {{8, 32}, {8, 32}}; Shapes outputs_shape_5 = {{8, 8}}; matmul5 = std::make_shared("matmul_info", inputs_shape_5, outputs_shape_5, attr_5); diff --git a/tests/ut/cpp/parallel/ops_info/activation_info_test.cc b/tests/ut/cpp/parallel/ops_info/activation_info_test.cc index 1e02f1952d6..f4b08e83e76 100644 --- a/tests/ut/cpp/parallel/ops_info/activation_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/activation_info_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,7 +55,7 @@ void TestActivationInfo::SetUp() { g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); ValuePtr relu = MakeValue(std::string("relu")); - std::unordered_map attr = {{"activation_type", relu}}; + mindspore::HashMap attr = {{"activation_type", relu}}; Shapes inputs_shape = {{2, 4, 8, 16}}; Shapes outputs_shape = {{2, 4, 8, 16}}; diff --git a/tests/ut/cpp/parallel/ops_info/activation_test.cc b/tests/ut/cpp/parallel/ops_info/activation_test.cc index d406b53c058..9f09ef35288 100644 --- a/tests/ut/cpp/parallel/ops_info/activation_test.cc +++ b/tests/ut/cpp/parallel/ops_info/activation_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -57,10 +57,10 @@ void TestActivation::SetUp() { g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); ValuePtr relu = MakeValue(std::string("relu")); - std::unordered_map relu_attr = {{"activation_type", relu}}; + mindspore::HashMap relu_attr = {{"activation_type", relu}}; ValuePtr sm = MakeValue(std::string("softmax")); ValuePtr axix = MakeValue(std::int64_t(2)); - std::unordered_map softmax_attr = {{"activation_type", sm}, {"axis", axix}}; + mindspore::HashMap softmax_attr = {{"activation_type", sm}, {"axis", axix}}; Shapes relu_inputs_shape = {{2, 4, 8, 16}}; Shapes relu_outputs_shape = {{2, 4, 8, 16}}; diff --git a/tests/ut/cpp/parallel/ops_info/gelu_info_test.cc b/tests/ut/cpp/parallel/ops_info/gelu_info_test.cc index 5d45dea9028..b4630f6c08e 100644 --- a/tests/ut/cpp/parallel/ops_info/gelu_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/gelu_info_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ void TestGeLUInfo::SetUp() { g_device_manager = std::make_shared(); g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); - std::unordered_map attr; + mindspore::HashMap attr; Shapes inputs_shape = {{2, 4, 8, 16}}; Shapes outputs_shape = {{2, 4, 8, 16}}; diff --git a/tests/ut/cpp/parallel/ops_info/l2_normalize_info_test.cc b/tests/ut/cpp/parallel/ops_info/l2_normalize_info_test.cc index 426f8041f45..7e5818d4f34 100644 --- a/tests/ut/cpp/parallel/ops_info/l2_normalize_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/l2_normalize_info_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,7 +55,7 @@ void TestL2NormalizeInfo::SetUp() { g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); ValuePtr axis = MakeValue(std::vector{1}); - std::unordered_map attr = {{AXIS, axis}}; + mindspore::HashMap attr = {{AXIS, axis}}; Shapes inputs_shape = {{32, 64, 96}}; Shapes outputs_shape = {{32, 64, 96}}; diff --git a/tests/ut/cpp/parallel/ops_info/log_softmax_info_test.cc b/tests/ut/cpp/parallel/ops_info/log_softmax_info_test.cc index deec9503784..a93e8746865 100644 --- a/tests/ut/cpp/parallel/ops_info/log_softmax_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/log_softmax_info_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,7 +55,7 @@ void TestLogSoftmaxInfo::SetUp() { g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); ValuePtr axis = MakeValue(static_cast(-2)); - std::unordered_map attr = {{"axis", axis}}; + mindspore::HashMap attr = {{"axis", axis}}; Shapes inputs_shape = {{2, 4, 8, 16}}; Shapes outputs_shape = {{2, 4, 8, 16}}; diff --git a/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc b/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc index 13dafa35038..6dca27682a0 100644 --- a/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -61,7 +61,7 @@ void TestMatmulInfo::SetUp() { // matmul1 ValuePtr transpose_a_1 = MakeValue(false); ValuePtr transpoce_b_1 = MakeValue(false); - std::unordered_map attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpoce_b_1}}; + mindspore::HashMap attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpoce_b_1}}; Shapes inputs_shape_1 = {{2, 4, 8, 16}, {2, 4, 16, 32}}; Shapes outputs_shape_1 = {{2, 4, 8, 32}}; @@ -71,7 +71,7 @@ void TestMatmulInfo::SetUp() { // matmul2 ValuePtr transpose_a_2 = MakeValue(false); ValuePtr transpoce_b_2 = MakeValue(true); - std::unordered_map attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpoce_b_2}}; + mindspore::HashMap attr_2 = {{"transpose_a", transpose_a_2}, {"transpose_b", transpoce_b_2}}; Shapes inputs_shape_2 = {{2, 4, 8, 16}, {32, 16}}; Shapes outputs_shape_2 = {{2, 4, 8, 32}}; @@ -81,7 +81,7 @@ void TestMatmulInfo::SetUp() { // matmul3 ValuePtr transpose_a_3 = MakeValue(false); ValuePtr transpoce_b_3 = MakeValue(true); - std::unordered_map attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpoce_b_3}}; + mindspore::HashMap attr_3 = {{"transpose_a", transpose_a_3}, {"transpose_b", transpoce_b_3}}; Shapes inputs_shape_3 = {{8, 16}, {2, 4, 32, 16}}; Shapes outputs_shape_3 = {{2, 4, 8, 32}}; @@ -89,7 +89,7 @@ void TestMatmulInfo::SetUp() { matmul3 = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); // matmul4 - std::unordered_map attr_4 = {{"transpose_a", transpose_a_3}}; + mindspore::HashMap attr_4 = {{"transpose_a", transpose_a_3}}; matmul4 = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_4); } diff --git a/tests/ut/cpp/parallel/ops_info/onehot_info_test.cc b/tests/ut/cpp/parallel/ops_info/onehot_info_test.cc index 06867b39640..4698bcd4211 100644 --- a/tests/ut/cpp/parallel/ops_info/onehot_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/onehot_info_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,7 +55,7 @@ void TestOneHotInfo::SetUp() { g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); ValuePtr axis = MakeValue(std::int64_t(-1)); - std::unordered_map attr = {{"axis", axis}}; + mindspore::HashMap attr = {{"axis", axis}}; Shapes inputs_shape = {{64}, {}, {}}; Shapes outputs_shape = {{64, 10}}; diff --git a/tests/ut/cpp/parallel/ops_info/onehot_info_test_axis_0.cc b/tests/ut/cpp/parallel/ops_info/onehot_info_test_axis_0.cc index 00f0bfa4882..70ddda8d136 100644 --- a/tests/ut/cpp/parallel/ops_info/onehot_info_test_axis_0.cc +++ b/tests/ut/cpp/parallel/ops_info/onehot_info_test_axis_0.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,7 +55,7 @@ void TestOneHotInfo2::SetUp() { g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); ValuePtr axis = MakeValue(std::int64_t(0)); - std::unordered_map attr = {{"axis", axis}}; + mindspore::HashMap attr = {{"axis", axis}}; Shapes inputs_shape = {{64}, {}, {}}; Shapes outputs_shape = {{10, 64}}; diff --git a/tests/ut/cpp/parallel/ops_info/pow_info_test.cc b/tests/ut/cpp/parallel/ops_info/pow_info_test.cc index a9244e20c2b..59489ad3fcc 100644 --- a/tests/ut/cpp/parallel/ops_info/pow_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/pow_info_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ void TestPowInfo::SetUp() { g_device_manager = std::make_shared(); g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); - std::unordered_map attr; + mindspore::HashMap attr; Shapes inputs_shape = {{32, 64, 128}, {32, 64, 128}}; Shapes outputs_shape = {{32, 64, 128}}; diff --git a/tests/ut/cpp/parallel/ops_info/prelu_test.cc b/tests/ut/cpp/parallel/ops_info/prelu_test.cc index e51c37b6720..11d27ba7a49 100644 --- a/tests/ut/cpp/parallel/ops_info/prelu_test.cc +++ b/tests/ut/cpp/parallel/ops_info/prelu_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,12 +54,12 @@ void TestPReLUInfo::SetUp() { g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); Shapes inputs_shape = {{64, 4, 8, 16}, {4}}; Shapes outputs_shape = {{64, 4, 8, 16}}; - std::unordered_map attr; + mindspore::HashMap attr; prelu = std::make_shared("prelu_info", inputs_shape, outputs_shape, attr); Shapes inputs_shape_2d = {{1024, 4}, {4}}; Shapes outputs_shape_2d = {{1024, 4}}; - std::unordered_map attr_2d; + mindspore::HashMap attr_2d; prelu_2d = std::make_shared("prelu_info", inputs_shape_2d, outputs_shape_2d, attr_2d); } diff --git a/tests/ut/cpp/parallel/ops_info/reduce_method_test.cc b/tests/ut/cpp/parallel/ops_info/reduce_method_test.cc index 756d3fb6401..1b299c75399 100644 --- a/tests/ut/cpp/parallel/ops_info/reduce_method_test.cc +++ b/tests/ut/cpp/parallel/ops_info/reduce_method_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -61,7 +61,7 @@ void TestReduceSumInfo::SetUp() { ValuePtr value0; std::vector val = {value0, value}; ValuePtr keep_dims = MakeValue(false); - std::unordered_map attr = {{KEEP_DIMS, keep_dims}}; + mindspore::HashMap attr = {{KEEP_DIMS, keep_dims}}; reduce_sum = std::make_shared("sum_info", inputs_shape, outputs_shape, attr); reduce_sum->set_input_value(val); diff --git a/tests/ut/cpp/parallel/ops_info/reshape_test.cc b/tests/ut/cpp/parallel/ops_info/reshape_test.cc index a4dffe11a09..b030f88acf0 100644 --- a/tests/ut/cpp/parallel/ops_info/reshape_test.cc +++ b/tests/ut/cpp/parallel/ops_info/reshape_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ void TestReshapeInfo::SetUp() { g_device_manager = std::make_shared(); g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); - std::unordered_map attr; + mindspore::HashMap attr; Shapes inputs_shape = {{32, 512, 7, 7}}; Shapes outputs_shape = {{32, 25088}}; diff --git a/tests/ut/cpp/parallel/ops_info/softmax_entropy_loss_info_test.cc b/tests/ut/cpp/parallel/ops_info/softmax_entropy_loss_info_test.cc index a98d82682a0..2d46dc0239b 100644 --- a/tests/ut/cpp/parallel/ops_info/softmax_entropy_loss_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/softmax_entropy_loss_info_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,7 +55,7 @@ void TestSoftmaxLoss::SetUp() { g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); ValuePtr is_grad = MakeValue(true); - std::unordered_map attr = {{"is_grad", is_grad}}; + mindspore::HashMap attr = {{"is_grad", is_grad}}; Shapes inputs_shape = {{2, 4, 8, 16}, {2, 4, 8, 16}}; Shapes outputs_shape = {{2}, {2, 4, 8, 16}}; diff --git a/tests/ut/cpp/parallel/ops_info/softmax_info_test.cc b/tests/ut/cpp/parallel/ops_info/softmax_info_test.cc index 8c521e4204e..a717978882a 100644 --- a/tests/ut/cpp/parallel/ops_info/softmax_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/softmax_info_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,10 +55,10 @@ void TestSoftmaxInfo::SetUp() { g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); ValuePtr axis1 = MakeValue(static_cast(-2)); - std::unordered_map attr1 = {{"axis", axis1}}; + mindspore::HashMap attr1 = {{"axis", axis1}}; ValuePtr axis2 = MakeValue(static_cast(4)); - std::unordered_map attr2 = {{"axis", axis2}}; + mindspore::HashMap attr2 = {{"axis", axis2}}; Shapes inputs_shape = {{2, 4, 8, 16}}; Shapes outputs_shape = {{2, 4, 8, 16}}; diff --git a/tests/ut/cpp/parallel/ops_info/tanh_info_test.cc b/tests/ut/cpp/parallel/ops_info/tanh_info_test.cc index c7c05209e2d..d6e0cd98902 100644 --- a/tests/ut/cpp/parallel/ops_info/tanh_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/tanh_info_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ void TestTanhInfo::SetUp() { g_device_manager = std::make_shared(); g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); - std::unordered_map attr; + mindspore::HashMap attr; Shapes inputs_shape = {{2, 4, 8, 16}}; Shapes outputs_shape = {{2, 4, 8, 16}}; diff --git a/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc b/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc index 7d4d97669e3..50d8489ebb0 100644 --- a/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ void TestTensorAddInfo::SetUp() { g_device_manager = std::make_shared(); g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); - std::unordered_map attr; + mindspore::HashMap attr; Shapes inputs_shape = {{32, 64, 96}, {32, 64, 96}}; Shapes outputs_shape = {{32, 64, 96}}; diff --git a/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc b/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc index 1f7c7118145..4619efce59c 100644 --- a/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc +++ b/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ void TestTmpIdentityInfo::SetUp() { g_device_manager = std::make_shared(); g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); - std::unordered_map attr = {}; + mindspore::HashMap attr = {}; Shapes inputs_shape = {{2, 4, 8, 16}}; Shapes outputs_shape = {{2, 4, 8, 16}}; identity_ptr = std::make_shared(inputs_shape, outputs_shape, attr); diff --git a/tests/ut/cpp/parallel/ops_info/transpose_test.cc b/tests/ut/cpp/parallel/ops_info/transpose_test.cc index 86abd9cf945..8faf1654c4d 100644 --- a/tests/ut/cpp/parallel/ops_info/transpose_test.cc +++ b/tests/ut/cpp/parallel/ops_info/transpose_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ void TestTransposeInfo::SetUp() { g_device_manager = std::make_shared(); g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); - std::unordered_map attr; + mindspore::HashMap attr; Shapes inputs_shape = {{128, 64}}; Shapes outputs_shape = {{64, 128}}; diff --git a/tests/ut/cpp/parallel/step_parallel_test.cc b/tests/ut/cpp/parallel/step_parallel_test.cc index 15b42fe301d..20d629be8e6 100644 --- a/tests/ut/cpp/parallel/step_parallel_test.cc +++ b/tests/ut/cpp/parallel/step_parallel_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -220,7 +220,7 @@ TEST_F(TestStepParallel, GetPythonPath2) { TEST_F(TestStepParallel, ExtractStrategy) { Dimensions v1 = {2, 2}; Dimensions v2 = {4, 4}; - std::unordered_map attrs; + mindspore::HashMap attrs; // stage ValuePtr val1 = MakeValue(v1); ValuePtr val2 = MakeValue(v2); @@ -295,7 +295,7 @@ TEST_F(TestStepParallel, CreatOpInstance) { py::object allreduce_pyobj = parse::python_adapter::CallPyFn( "mindspore.parallel._utils", "_get_python_op", "AllReduce", "mindspore.ops.operations", "test", arglist); py::dict opAttr = py::getattr(allreduce_pyobj, "attrs"); - std::unordered_map attributes{}; + mindspore::HashMap attributes{}; for (auto item : opAttr) { if (!py::isinstance(item.first)) { MS_LOG(EXCEPTION) << "type error in py dict convert"; diff --git a/tests/ut/cpp/parallel/tensor_layout/construct_operator_test.cc b/tests/ut/cpp/parallel/tensor_layout/construct_operator_test.cc index 49f20e8c036..dc7ca26fae8 100644 --- a/tests/ut/cpp/parallel/tensor_layout/construct_operator_test.cc +++ b/tests/ut/cpp/parallel/tensor_layout/construct_operator_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,7 +55,7 @@ void TestConstructOperator::SetUp() { ValuePtr transpose_a_1 = MakeValue(false); ValuePtr transpose_b_1 = MakeValue(false); - std::unordered_map attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; + mindspore::HashMap attr_1 = {{"transpose_a", transpose_a_1}, {"transpose_b", transpose_b_1}}; Shapes inputs_shape_1 = {{2, 4, 8, 16}, {2, 4, 16, 32}}; Shapes outputs_shape_1 = {{2, 4, 8, 32}}; diff --git a/tests/ut/cpp/parallel/virtual_dataset_test.cc b/tests/ut/cpp/parallel/virtual_dataset_test.cc index 0aa38c7a578..47aabb0baa2 100644 --- a/tests/ut/cpp/parallel/virtual_dataset_test.cc +++ b/tests/ut/cpp/parallel/virtual_dataset_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -53,7 +53,7 @@ void TestVirtualDatasetInfo::SetUp() { g_device_manager = std::make_shared(); g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); - std::unordered_map attr; + mindspore::HashMap attr; Shapes inputs_shape = {{128, 32}, {1280, 320}, {12800, 3200}}; Shapes outputs_shape = {{128, 32}, {1280, 320}, {12800, 3200}}; diff --git a/tests/ut/cpp/pipeline/resource_test.cc b/tests/ut/cpp/pipeline/resource_test.cc index f2dfbb0023f..1a77125edb1 100644 --- a/tests/ut/cpp/pipeline/resource_test.cc +++ b/tests/ut/cpp/pipeline/resource_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ namespace mindspore { namespace pipeline { -using MethodMap = std::unordered_map>; +using MethodMap = mindspore::HashMap>; extern MethodMap& GetMethodMap(); diff --git a/tests/ut/cpp/pipeline/static_analysis/data_test.cc b/tests/ut/cpp/pipeline/static_analysis/data_test.cc index a163b9db147..cb3776250d0 100644 --- a/tests/ut/cpp/pipeline/static_analysis/data_test.cc +++ b/tests/ut/cpp/pipeline/static_analysis/data_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -158,7 +158,7 @@ TEST_F(TestData, test_clone) { std::vector attr = {{"x", std::make_shared(kAnyValue, kInt64)}, {"y", std::make_shared(kAnyValue, kInt64)}}; - std::unordered_map methods; + mindspore::HashMap methods; AbstractBasePtr c1 = std::make_shared(Named("Point"), attr, methods); AbstractBasePtr c2 = c1->Clone(); ASSERT_EQ(*c1, *c2); diff --git a/tests/ut/cpp/pre_activate/ascend/ir_fission/topk_split_test.cc b/tests/ut/cpp/pre_activate/ascend/ir_fission/topk_split_test.cc index 619ae00dee4..2db2e77f9ca 100644 --- a/tests/ut/cpp/pre_activate/ascend/ir_fission/topk_split_test.cc +++ b/tests/ut/cpp/pre_activate/ascend/ir_fission/topk_split_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -109,7 +109,7 @@ TEST_F(TestHWTopKSplit, test_topk_no_split) { EXPECT_EQ(topk_cnode->inputs().size(), 3); auto input_names_vec = AnfAlgo::GetNodeAttr>(topk_cnode, kAttrInputNames); EXPECT_EQ(input_names_vec.size(), 2); - std::unordered_set attr_index{1}; + mindspore::HashSet attr_index{1}; ConstInputToAttr(topk_cnode, attr_index); EXPECT_EQ(topk_cnode->inputs().size(), 2); input_names_vec = AnfAlgo::GetNodeAttr>(topk_cnode, kAttrInputNames); diff --git a/tests/ut/cpp/transform/op_adapter_test.cc b/tests/ut/cpp/transform/op_adapter_test.cc index bc79c5c79c1..045e30e592d 100644 --- a/tests/ut/cpp/transform/op_adapter_test.cc +++ b/tests/ut/cpp/transform/op_adapter_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ * limitations under the License. */ #include -#include #include #include "common/common_test.h" @@ -26,7 +25,6 @@ using std::cout; using std::endl; using std::string; -using std::unordered_map; namespace mindspore { namespace transform { @@ -73,7 +71,7 @@ TEST_F(TestOpAdapter, TestSetAttr_Conv2d_Primitive) { ASSERT_EQ(adpt->setAttr(conv, "pad_mode", string("same")), 0); ASSERT_EQ(adpt->setAttr(conv, "nothing", "test"), NOT_FOUND); - const unordered_map attrs = { + const mindspore::HashMap attrs = { {"padding", MakeValue(2)}, {"padding_mode", MakeValue(string("normal"))}, {"stride", MakeValue(8)} diff --git a/third_party/robin_hood/LICENSE b/third_party/robin_hood/LICENSE new file mode 100644 index 00000000000..2065a8e5207 --- /dev/null +++ b/third_party/robin_hood/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018-2021 Martin Ankerl + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/robin_hood/README.md b/third_party/robin_hood/README.md new file mode 100644 index 00000000000..7fae4ad6de5 --- /dev/null +++ b/third_party/robin_hood/README.md @@ -0,0 +1,72 @@ +➵ robin_hood unordered map & set [![Release](https://img.shields.io/github/release/martinus/robin-hood-hashing.svg)](https://github.com/martinus/robin-hood-hashing/releases) [![GitHub license](https://img.shields.io/github/license/martinus/robin-hood-hashing.svg)](https://raw.githubusercontent.com/martinus/robin-hood-hashing/master/LICENSE) +============ + + +[![Travis CI Build Status](https://travis-ci.com/martinus/robin-hood-hashing.svg?branch=master)](https://travis-ci.com/martinus/robin-hood-hashing) +[![Appveyor Build Status](https://ci.appveyor.com/api/projects/status/github/martinus/robin-hood-hashing?branch=master&svg=true)](https://ci.appveyor.com/project/martinus/robin-hood-hashing) +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/9308495247b542c9802016caa6fd3461)](https://www.codacy.com/app/martinus/robin-hood-hashing?utm_source=github.com&utm_medium=referral&utm_content=martinus/robin-hood-hashing&utm_campaign=Badge_Grade) +[![Total alerts](https://img.shields.io/lgtm/alerts/g/martinus/robin-hood-hashing.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/martinus/robin-hood-hashing/alerts/) +[![Language grade: C/C++](https://img.shields.io/lgtm/grade/cpp/g/martinus/robin-hood-hashing.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/martinus/robin-hood-hashing/context:cpp) +[![Coverage Status](https://coveralls.io/repos/github/martinus/robin-hood-hashing/badge.svg)](https://coveralls.io/github/martinus/robin-hood-hashing) + +`robin_hood::unordered_map` and `robin_hood::unordered_set` is a platform independent replacement for `std::unordered_map` / `std::unordered_set` which is both faster and more memory efficient for real-world use cases. + +## Installation & Usage + +### Direct Inclusion + +1. Add [`robin_hood.h`](https://github.com/martinus/robin-hood-hashing/releases) to your C++ project. +1. Use `robin_hood::unordered_map` instead of `std::unordered_map` +1. Use `robin_hood::unordered_set` instead of `std::unordered_set` + +### [Conan](https://conan.io/), the C/C++ Package Manager + +1. Setup your `CMakeLists.txt` (see [Conan documentation](https://docs.conan.io/en/latest/integrations/build_system.html) on how to use MSBuild, Meson and others) like this: + ```CMake + project(myproject CXX) + + add_executable(${PROJECT_NAME} main.cpp) + + include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) # Include Conan-generated file + conan_basic_setup(TARGETS) # Introduce Conan-generated targets + + target_link_libraries(${PROJECT_NAME} CONAN_PKG::robin-hood-hashing) + ``` +1. Create `conanfile.txt` in your source dir (don't forget to update the version) + ```ini + [requires] + robin-hood-hashing/3.10.0 + + [generators] + cmake + ``` +1. Install and run Conan, then build your project as always: + ```Bash + pip install conan + mkdir build + cd build + conan install ../ --build=missing + cmake ../ + cmake --build . + ``` + The `robin-hood-hashing` package in Conan is kept up to date by Conan contributors. If the version is out of date, please [create an issue or pull request](https://github.com/conan-io/conan-center-index) on the `conan-center-index` repository. + +## Benchmarks + +Please see extensive benchmarks at [Hashmaps Benchmarks](https://martin.ankerl.com/2019/04/01/hashmap-benchmarks-01-overview/). In short: `robin_hood` is always among the fastest maps and uses far less memory than `std::unordered_map`. + +## Design Choices + +- **Two memory layouts**. Data is either stored in a flat array, or with node indirection. Access for `unordered_flat_map` is extremely fast due to no indirection, but references to elements are not stable. It also causes allocation spikes when the map resizes, and will need plenty of memory for large objects. Node based map has stable references & pointers (NOT iterators! Similar to [std::unordered_map](https://en.cppreference.com/w/cpp/container/unordered_map)) and uses `const Key` in the pair. It is a bit slower due to indirection. The choice is yours; you can either use `robin_hood::unordered_flat_map` or `robin_hood::unordered_node_map` directly. If you use `robin_hood::unordered_map` It tries to choose the layout that seems appropriate for your data. + +- **Custom allocator**. Node based representation has a custom bulk allocator that tries to make few memory allocations. All allocated memory is reused, so there won't be any allocation spikes. It's very fast as well. + +- **Optimized hash**. `robin_hood::hash` has custom implementations for integer types and for `std::string` that are very fast and falls back to `std::hash` for everything else. + +- **Depends on good Hashing**. For a really bad hash the performance will not only degrade like in `std::unordered_map`, the map will simply fail with an `std::overflow_error`. In practice, when using the standard `robin_hood::hash`, I have never seen this happening. + +## License + +Licensed under the MIT License. See the [LICENSE](https://github.com/martinus/robin-hood-hashing/blob/master/LICENSE) file for details. + +by martinus diff --git a/third_party/robin_hood/include/robin_hood/robin_hood.h b/third_party/robin_hood/include/robin_hood/robin_hood.h new file mode 100644 index 00000000000..511a308d32b --- /dev/null +++ b/third_party/robin_hood/include/robin_hood/robin_hood.h @@ -0,0 +1,2529 @@ +// ______ _____ ______ _________ +// ______________ ___ /_ ___(_)_______ ___ /_ ______ ______ ______ / +// __ ___/_ __ \__ __ \__ / __ __ \ __ __ \_ __ \_ __ \_ __ / +// _ / / /_/ /_ /_/ /_ / _ / / / _ / / // /_/ // /_/ // /_/ / +// /_/ \____/ /_.___/ /_/ /_/ /_/ ________/_/ /_/ \____/ \____/ \__,_/ +// _/_____/ +// +// Fast & memory efficient hashtable based on robin hood hashing for C++11/14/17/20 +// https://github.com/martinus/robin-hood-hashing +// +// Licensed under the MIT License . +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2021 Martin Ankerl +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef ROBIN_HOOD_H_INCLUDED +#define ROBIN_HOOD_H_INCLUDED + +// see https://semver.org/ +#define ROBIN_HOOD_VERSION_MAJOR 3 // for incompatible API changes +#define ROBIN_HOOD_VERSION_MINOR 11 // for adding functionality in a backwards-compatible manner +#define ROBIN_HOOD_VERSION_PATCH 3 // for backwards-compatible bug fixes + +#include +#include +#include +#include +#include +#include // only to support hash of smart pointers +#include +#include +#include +#include +#if __cplusplus >= 201703L +# include +#endif + +// #define ROBIN_HOOD_LOG_ENABLED +#ifdef ROBIN_HOOD_LOG_ENABLED +# include +# define ROBIN_HOOD_LOG(...) \ + std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << __VA_ARGS__ << std::endl; +#else +# define ROBIN_HOOD_LOG(x) +#endif + +// #define ROBIN_HOOD_TRACE_ENABLED +#ifdef ROBIN_HOOD_TRACE_ENABLED +# include +# define ROBIN_HOOD_TRACE(...) \ + std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << __VA_ARGS__ << std::endl; +#else +# define ROBIN_HOOD_TRACE(x) +#endif + +// #define ROBIN_HOOD_COUNT_ENABLED +#ifdef ROBIN_HOOD_COUNT_ENABLED +# include +# define ROBIN_HOOD_COUNT(x) ++counts().x; +namespace robin_hood { +struct Counts { + uint64_t shiftUp{}; + uint64_t shiftDown{}; +}; +inline std::ostream& operator<<(std::ostream& os, Counts const& c) { + return os << c.shiftUp << " shiftUp" << std::endl << c.shiftDown << " shiftDown" << std::endl; +} + +static Counts& counts() { + static Counts counts{}; + return counts; +} +} // namespace robin_hood +#else +# define ROBIN_HOOD_COUNT(x) +#endif + +// all non-argument macros should use this facility. See +// https://www.fluentcpp.com/2019/05/28/better-macros-better-flags/ +#define ROBIN_HOOD(x) ROBIN_HOOD_PRIVATE_DEFINITION_##x() + +// mark unused members with this macro +#define ROBIN_HOOD_UNUSED(identifier) + +// bitness +#if SIZE_MAX == UINT32_MAX +# define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 32 +#elif SIZE_MAX == UINT64_MAX +# define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 64 +#else +# error Unsupported bitness +#endif + +// endianess +#ifdef _MSC_VER +# define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() 1 +# define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() 0 +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() \ + (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +# define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +#endif + +// inline +#ifdef _MSC_VER +# define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __declspec(noinline) +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __attribute__((noinline)) +#endif + +// exceptions +#if !defined(__cpp_exceptions) && !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) +# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 0 +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 1 +#endif + +// count leading/trailing bits +#if !defined(ROBIN_HOOD_DISABLE_INTRINSICS) +# ifdef _MSC_VER +# if ROBIN_HOOD(BITNESS) == 32 +# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward +# else +# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward64 +# endif +# include +# pragma intrinsic(ROBIN_HOOD(BITSCANFORWARD)) +# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) \ + [](size_t mask) noexcept -> int { \ + unsigned long index; \ + return ROBIN_HOOD(BITSCANFORWARD)(&index, mask) ? static_cast(index) \ + : ROBIN_HOOD(BITNESS); \ + }(x) +# else +# if ROBIN_HOOD(BITNESS) == 32 +# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzl +# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzl +# else +# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzll +# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzll +# endif +# define ROBIN_HOOD_COUNT_LEADING_ZEROES(x) ((x) ? ROBIN_HOOD(CLZ)(x) : ROBIN_HOOD(BITNESS)) +# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) ((x) ? ROBIN_HOOD(CTZ)(x) : ROBIN_HOOD(BITNESS)) +# endif +#endif + +// fallthrough +#ifndef __has_cpp_attribute // For backwards compatibility +# define __has_cpp_attribute(x) 0 +#endif +#if __has_cpp_attribute(clang::fallthrough) +# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[clang::fallthrough]] +#elif __has_cpp_attribute(gnu::fallthrough) +# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[gnu::fallthrough]] +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() +#endif + +// likely/unlikely +#ifdef _MSC_VER +# define ROBIN_HOOD_LIKELY(condition) condition +# define ROBIN_HOOD_UNLIKELY(condition) condition +#else +# define ROBIN_HOOD_LIKELY(condition) __builtin_expect(condition, 1) +# define ROBIN_HOOD_UNLIKELY(condition) __builtin_expect(condition, 0) +#endif + +// detect if native wchar_t type is availiable in MSVC +#ifdef _MSC_VER +# ifdef _NATIVE_WCHAR_T_DEFINED +# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1 +# else +# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 0 +# endif +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1 +#endif + +// detect if MSVC supports the pair(std::piecewise_construct_t,...) consructor being constexpr +#ifdef _MSC_VER +# if _MSC_VER <= 1900 +# define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 1 +# else +# define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 0 +# endif +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 0 +#endif + +// workaround missing "is_trivially_copyable" in g++ < 5.0 +// See https://stackoverflow.com/a/31798726/48181 +#if defined(__GNUC__) && __GNUC__ < 5 +# define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__) +#else +# define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value +#endif + +// helpers for C++ versions, see https://gcc.gnu.org/onlinedocs/cpp/Standard-Predefined-Macros.html +#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX() __cplusplus +#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX98() 199711L +#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX11() 201103L +#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX14() 201402L +#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX17() 201703L + +#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17) +# define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD() [[nodiscard]] +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD() +#endif + +namespace robin_hood { + +#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14) +# define ROBIN_HOOD_STD std +#else + +// c++11 compatibility layer +namespace ROBIN_HOOD_STD { +template +struct alignment_of + : std::integral_constant::type)> {}; + +template +class integer_sequence { +public: + using value_type = T; + static_assert(std::is_integral::value, "not integral type"); + static constexpr std::size_t size() noexcept { + return sizeof...(Ints); + } +}; +template +using index_sequence = integer_sequence; + +namespace detail_ { +template +struct IntSeqImpl { + using TValue = T; + static_assert(std::is_integral::value, "not integral type"); + static_assert(Begin >= 0 && Begin < End, "unexpected argument (Begin<0 || Begin<=End)"); + + template + struct IntSeqCombiner; + + template + struct IntSeqCombiner, integer_sequence> { + using TResult = integer_sequence; + }; + + using TResult = + typename IntSeqCombiner::TResult, + typename IntSeqImpl::TResult>::TResult; +}; + +template +struct IntSeqImpl { + using TValue = T; + static_assert(std::is_integral::value, "not integral type"); + static_assert(Begin >= 0, "unexpected argument (Begin<0)"); + using TResult = integer_sequence; +}; + +template +struct IntSeqImpl { + using TValue = T; + static_assert(std::is_integral::value, "not integral type"); + static_assert(Begin >= 0, "unexpected argument (Begin<0)"); + using TResult = integer_sequence; +}; +} // namespace detail_ + +template +using make_integer_sequence = typename detail_::IntSeqImpl::TResult; + +template +using make_index_sequence = make_integer_sequence; + +template +using index_sequence_for = make_index_sequence; + +} // namespace ROBIN_HOOD_STD + +#endif + +namespace detail { + +// make sure we static_cast to the correct type for hash_int +#if ROBIN_HOOD(BITNESS) == 64 +using SizeT = uint64_t; +#else +using SizeT = uint32_t; +#endif + +template +T rotr(T x, unsigned k) { + return (x >> k) | (x << (8U * sizeof(T) - k)); +} + +// This cast gets rid of warnings like "cast from 'uint8_t*' {aka 'unsigned char*'} to +// 'uint64_t*' {aka 'long unsigned int*'} increases required alignment of target type". Use with +// care! +template +inline T reinterpret_cast_no_cast_align_warning(void* ptr) noexcept { + return reinterpret_cast(ptr); +} + +template +inline T reinterpret_cast_no_cast_align_warning(void const* ptr) noexcept { + return reinterpret_cast(ptr); +} + +// make sure this is not inlined as it is slow and dramatically enlarges code, thus making other +// inlinings more difficult. Throws are also generally the slow path. +template +[[noreturn]] ROBIN_HOOD(NOINLINE) +#if ROBIN_HOOD(HAS_EXCEPTIONS) + void doThrow(Args&&... args) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) + throw E(std::forward(args)...); +} +#else + void doThrow(Args&&... ROBIN_HOOD_UNUSED(args) /*unused*/) { + abort(); +} +#endif + +template +T* assertNotNull(T* t, Args&&... args) { + if (ROBIN_HOOD_UNLIKELY(nullptr == t)) { + doThrow(std::forward(args)...); + } + return t; +} + +template +inline T unaligned_load(void const* ptr) noexcept { + // using memcpy so we don't get into unaligned load problems. + // compiler should optimize this very well anyways. + T t; + std::memcpy(&t, ptr, sizeof(T)); + return t; +} + +// Allocates bulks of memory for objects of type T. This deallocates the memory in the destructor, +// and keeps a linked list of the allocated memory around. Overhead per allocation is the size of a +// pointer. +template +class BulkPoolAllocator { +public: + BulkPoolAllocator() noexcept = default; + + // does not copy anything, just creates a new allocator. + BulkPoolAllocator(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept + : mHead(nullptr) + , mListForFree(nullptr) {} + + BulkPoolAllocator(BulkPoolAllocator&& o) noexcept + : mHead(o.mHead) + , mListForFree(o.mListForFree) { + o.mListForFree = nullptr; + o.mHead = nullptr; + } + + BulkPoolAllocator& operator=(BulkPoolAllocator&& o) noexcept { + reset(); + mHead = o.mHead; + mListForFree = o.mListForFree; + o.mListForFree = nullptr; + o.mHead = nullptr; + return *this; + } + + BulkPoolAllocator& + // NOLINTNEXTLINE(bugprone-unhandled-self-assignment,cert-oop54-cpp) + operator=(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept { + // does not do anything + return *this; + } + + ~BulkPoolAllocator() noexcept { + reset(); + } + + // Deallocates all allocated memory. + void reset() noexcept { + while (mListForFree) { + T* tmp = *mListForFree; + ROBIN_HOOD_LOG("std::free") + std::free(mListForFree); + mListForFree = reinterpret_cast_no_cast_align_warning(tmp); + } + mHead = nullptr; + } + + // allocates, but does NOT initialize. Use in-place new constructor, e.g. + // T* obj = pool.allocate(); + // ::new (static_cast(obj)) T(); + T* allocate() { + T* tmp = mHead; + if (!tmp) { + tmp = performAllocation(); + } + + mHead = *reinterpret_cast_no_cast_align_warning(tmp); + return tmp; + } + + // does not actually deallocate but puts it in store. + // make sure you have already called the destructor! e.g. with + // obj->~T(); + // pool.deallocate(obj); + void deallocate(T* obj) noexcept { + *reinterpret_cast_no_cast_align_warning(obj) = mHead; + mHead = obj; + } + + // Adds an already allocated block of memory to the allocator. This allocator is from now on + // responsible for freeing the data (with free()). If the provided data is not large enough to + // make use of, it is immediately freed. Otherwise it is reused and freed in the destructor. + void addOrFree(void* ptr, const size_t numBytes) noexcept { + // calculate number of available elements in ptr + if (numBytes < ALIGNMENT + ALIGNED_SIZE) { + // not enough data for at least one element. Free and return. + ROBIN_HOOD_LOG("std::free") + std::free(ptr); + } else { + ROBIN_HOOD_LOG("add to buffer") + add(ptr, numBytes); + } + } + + void swap(BulkPoolAllocator& other) noexcept { + using std::swap; + swap(mHead, other.mHead); + swap(mListForFree, other.mListForFree); + } + +private: + // iterates the list of allocated memory to calculate how many to alloc next. + // Recalculating this each time saves us a size_t member. + // This ignores the fact that memory blocks might have been added manually with addOrFree. In + // practice, this should not matter much. + ROBIN_HOOD(NODISCARD) size_t calcNumElementsToAlloc() const noexcept { + auto tmp = mListForFree; + size_t numAllocs = MinNumAllocs; + + while (numAllocs * 2 <= MaxNumAllocs && tmp) { + auto x = reinterpret_cast(tmp); + tmp = *x; + numAllocs *= 2; + } + + return numAllocs; + } + + // WARNING: Underflow if numBytes < ALIGNMENT! This is guarded in addOrFree(). + void add(void* ptr, const size_t numBytes) noexcept { + const size_t numElements = (numBytes - ALIGNMENT) / ALIGNED_SIZE; + + auto data = reinterpret_cast(ptr); + + // link free list + auto x = reinterpret_cast(data); + *x = mListForFree; + mListForFree = data; + + // create linked list for newly allocated data + auto* const headT = + reinterpret_cast_no_cast_align_warning(reinterpret_cast(ptr) + ALIGNMENT); + + auto* const head = reinterpret_cast(headT); + + // Visual Studio compiler automatically unrolls this loop, which is pretty cool + for (size_t i = 0; i < numElements; ++i) { + *reinterpret_cast_no_cast_align_warning(head + i * ALIGNED_SIZE) = + head + (i + 1) * ALIGNED_SIZE; + } + + // last one points to 0 + *reinterpret_cast_no_cast_align_warning(head + (numElements - 1) * ALIGNED_SIZE) = + mHead; + mHead = headT; + } + + // Called when no memory is available (mHead == 0). + // Don't inline this slow path. + ROBIN_HOOD(NOINLINE) T* performAllocation() { + size_t const numElementsToAlloc = calcNumElementsToAlloc(); + + // alloc new memory: [prev |T, T, ... T] + size_t const bytes = ALIGNMENT + ALIGNED_SIZE * numElementsToAlloc; + ROBIN_HOOD_LOG("std::malloc " << bytes << " = " << ALIGNMENT << " + " << ALIGNED_SIZE + << " * " << numElementsToAlloc) + add(assertNotNull(std::malloc(bytes)), bytes); + return mHead; + } + + // enforce byte alignment of the T's +#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14) + static constexpr size_t ALIGNMENT = + (std::max)(std::alignment_of::value, std::alignment_of::value); +#else + static const size_t ALIGNMENT = + (ROBIN_HOOD_STD::alignment_of::value > ROBIN_HOOD_STD::alignment_of::value) + ? ROBIN_HOOD_STD::alignment_of::value + : +ROBIN_HOOD_STD::alignment_of::value; // the + is for walkarround +#endif + + static constexpr size_t ALIGNED_SIZE = ((sizeof(T) - 1) / ALIGNMENT + 1) * ALIGNMENT; + + static_assert(MinNumAllocs >= 1, "MinNumAllocs"); + static_assert(MaxNumAllocs >= MinNumAllocs, "MaxNumAllocs"); + static_assert(ALIGNED_SIZE >= sizeof(T*), "ALIGNED_SIZE"); + static_assert(0 == (ALIGNED_SIZE % sizeof(T*)), "ALIGNED_SIZE mod"); + static_assert(ALIGNMENT >= sizeof(T*), "ALIGNMENT"); + + T* mHead{nullptr}; + T** mListForFree{nullptr}; +}; + +template +struct NodeAllocator; + +// dummy allocator that does nothing +template +struct NodeAllocator { + + // we are not using the data, so just free it. + void addOrFree(void* ptr, size_t ROBIN_HOOD_UNUSED(numBytes) /*unused*/) noexcept { + ROBIN_HOOD_LOG("std::free") + std::free(ptr); + } +}; + +template +struct NodeAllocator : public BulkPoolAllocator {}; + +// c++14 doesn't have is_nothrow_swappable, and clang++ 6.0.1 doesn't like it either, so I'm making +// my own here. +namespace swappable { +#if ROBIN_HOOD(CXX) < ROBIN_HOOD(CXX17) +using std::swap; +template +struct nothrow { + static const bool value = noexcept(swap(std::declval(), std::declval())); +}; +#else +template +struct nothrow { + static const bool value = std::is_nothrow_swappable::value; +}; +#endif +} // namespace swappable + +} // namespace detail + +struct is_transparent_tag {}; + +// A custom pair implementation is used in the map because std::pair is not is_trivially_copyable, +// which means it would not be allowed to be used in std::memcpy. This struct is copyable, which is +// also tested. +template +struct pair { + using first_type = T1; + using second_type = T2; + + template ::value && + std::is_default_constructible::value>::type> + constexpr pair() noexcept(noexcept(U1()) && noexcept(U2())) + : first() + , second() {} + + // pair constructors are explicit so we don't accidentally call this ctor when we don't have to. + explicit constexpr pair(std::pair const& o) noexcept( + noexcept(T1(std::declval())) && noexcept(T2(std::declval()))) + : first(o.first) + , second(o.second) {} + + // pair constructors are explicit so we don't accidentally call this ctor when we don't have to. + explicit constexpr pair(std::pair&& o) noexcept(noexcept( + T1(std::move(std::declval()))) && noexcept(T2(std::move(std::declval())))) + : first(std::move(o.first)) + , second(std::move(o.second)) {} + + constexpr pair(T1&& a, T2&& b) noexcept(noexcept( + T1(std::move(std::declval()))) && noexcept(T2(std::move(std::declval())))) + : first(std::move(a)) + , second(std::move(b)) {} + + template + constexpr pair(U1&& a, U2&& b) noexcept(noexcept(T1(std::forward( + std::declval()))) && noexcept(T2(std::forward(std::declval())))) + : first(std::forward(a)) + , second(std::forward(b)) {} + + template + // MSVC 2015 produces error "C2476: ‘constexpr’ constructor does not initialize all members" + // if this constructor is constexpr +#if !ROBIN_HOOD(BROKEN_CONSTEXPR) + constexpr +#endif + pair(std::piecewise_construct_t /*unused*/, std::tuple a, + std::tuple + b) noexcept(noexcept(pair(std::declval&>(), + std::declval&>(), + ROBIN_HOOD_STD::index_sequence_for(), + ROBIN_HOOD_STD::index_sequence_for()))) + : pair(a, b, ROBIN_HOOD_STD::index_sequence_for(), + ROBIN_HOOD_STD::index_sequence_for()) { + } + + // constructor called from the std::piecewise_construct_t ctor + template + pair(std::tuple& a, std::tuple& b, ROBIN_HOOD_STD::index_sequence /*unused*/, ROBIN_HOOD_STD::index_sequence /*unused*/) noexcept( + noexcept(T1(std::forward(std::get( + std::declval&>()))...)) && noexcept(T2(std:: + forward(std::get( + std::declval&>()))...))) + : first(std::forward(std::get(a))...) + , second(std::forward(std::get(b))...) { + // make visual studio compiler happy about warning about unused a & b. + // Visual studio's pair implementation disables warning 4100. + (void)a; + (void)b; + } + + void swap(pair& o) noexcept((detail::swappable::nothrow::value) && + (detail::swappable::nothrow::value)) { + using std::swap; + swap(first, o.first); + swap(second, o.second); + } + + T1 first; // NOLINT(misc-non-private-member-variables-in-classes) + T2 second; // NOLINT(misc-non-private-member-variables-in-classes) +}; + +template +inline void swap(pair& a, pair& b) noexcept( + noexcept(std::declval&>().swap(std::declval&>()))) { + a.swap(b); +} + +template +inline constexpr bool operator==(pair const& x, pair const& y) { + return (x.first == y.first) && (x.second == y.second); +} +template +inline constexpr bool operator!=(pair const& x, pair const& y) { + return !(x == y); +} +template +inline constexpr bool operator<(pair const& x, pair const& y) noexcept(noexcept( + std::declval() < std::declval()) && noexcept(std::declval() < + std::declval())) { + return x.first < y.first || (!(y.first < x.first) && x.second < y.second); +} +template +inline constexpr bool operator>(pair const& x, pair const& y) { + return y < x; +} +template +inline constexpr bool operator<=(pair const& x, pair const& y) { + return !(x > y); +} +template +inline constexpr bool operator>=(pair const& x, pair const& y) { + return !(x < y); +} + +inline size_t hash_bytes(void const* ptr, size_t len) noexcept { + static constexpr uint64_t m = UINT64_C(0xc6a4a7935bd1e995); + static constexpr uint64_t seed = UINT64_C(0xe17a1465); + static constexpr unsigned int r = 47; + + auto const* const data64 = static_cast(ptr); + uint64_t h = seed ^ (len * m); + + size_t const n_blocks = len / 8; + for (size_t i = 0; i < n_blocks; ++i) { + auto k = detail::unaligned_load(data64 + i); + + k *= m; + k ^= k >> r; + k *= m; + + h ^= k; + h *= m; + } + + auto const* const data8 = reinterpret_cast(data64 + n_blocks); + switch (len & 7U) { + case 7: + h ^= static_cast(data8[6]) << 48U; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + case 6: + h ^= static_cast(data8[5]) << 40U; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + case 5: + h ^= static_cast(data8[4]) << 32U; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + case 4: + h ^= static_cast(data8[3]) << 24U; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + case 3: + h ^= static_cast(data8[2]) << 16U; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + case 2: + h ^= static_cast(data8[1]) << 8U; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + case 1: + h ^= static_cast(data8[0]); + h *= m; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + default: + break; + } + + h ^= h >> r; + + // not doing the final step here, because this will be done by keyToIdx anyways + // h *= m; + // h ^= h >> r; + return static_cast(h); +} + +inline size_t hash_int(uint64_t x) noexcept { + // tried lots of different hashes, let's stick with murmurhash3. It's simple, fast, well tested, + // and doesn't need any special 128bit operations. + x ^= x >> 33U; + x *= UINT64_C(0xff51afd7ed558ccd); + x ^= x >> 33U; + + // not doing the final step here, because this will be done by keyToIdx anyways + // x *= UINT64_C(0xc4ceb9fe1a85ec53); + // x ^= x >> 33U; + return static_cast(x); +} + +// A thin wrapper around std::hash, performing an additional simple mixing step of the result. +template +struct hash : public std::hash { + size_t operator()(T const& obj) const + noexcept(noexcept(std::declval>().operator()(std::declval()))) { + // call base hash + auto result = std::hash::operator()(obj); + // return mixed of that, to be save against identity has + return hash_int(static_cast(result)); + } +}; + +template +struct hash> { + size_t operator()(std::basic_string const& str) const noexcept { + return hash_bytes(str.data(), sizeof(CharT) * str.size()); + } +}; + +#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17) +template +struct hash> { + size_t operator()(std::basic_string_view const& sv) const noexcept { + return hash_bytes(sv.data(), sizeof(CharT) * sv.size()); + } +}; +#endif + +template +struct hash { + size_t operator()(T* ptr) const noexcept { + return hash_int(reinterpret_cast(ptr)); + } +}; + +template +struct hash> { + size_t operator()(std::unique_ptr const& ptr) const noexcept { + return hash_int(reinterpret_cast(ptr.get())); + } +}; + +template +struct hash> { + size_t operator()(std::shared_ptr const& ptr) const noexcept { + return hash_int(reinterpret_cast(ptr.get())); + } +}; + +template +struct hash::value>::type> { + size_t operator()(Enum e) const noexcept { + using Underlying = typename std::underlying_type::type; + return hash{}(static_cast(e)); + } +}; + +#define ROBIN_HOOD_HASH_INT(T) \ + template <> \ + struct hash { \ + size_t operator()(T const& obj) const noexcept { \ + return hash_int(static_cast(obj)); \ + } \ + } + +#if defined(__GNUC__) && !defined(__clang__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wuseless-cast" +#endif +// see https://en.cppreference.com/w/cpp/utility/hash +ROBIN_HOOD_HASH_INT(bool); +ROBIN_HOOD_HASH_INT(char); +ROBIN_HOOD_HASH_INT(signed char); +ROBIN_HOOD_HASH_INT(unsigned char); +ROBIN_HOOD_HASH_INT(char16_t); +ROBIN_HOOD_HASH_INT(char32_t); +#if ROBIN_HOOD(HAS_NATIVE_WCHART) +ROBIN_HOOD_HASH_INT(wchar_t); +#endif +ROBIN_HOOD_HASH_INT(short); +ROBIN_HOOD_HASH_INT(unsigned short); +ROBIN_HOOD_HASH_INT(int); +ROBIN_HOOD_HASH_INT(unsigned int); +ROBIN_HOOD_HASH_INT(long); +ROBIN_HOOD_HASH_INT(long long); +ROBIN_HOOD_HASH_INT(unsigned long); +ROBIN_HOOD_HASH_INT(unsigned long long); +#if defined(__GNUC__) && !defined(__clang__) +# pragma GCC diagnostic pop +#endif +namespace detail { + +template +struct void_type { + using type = void; +}; + +template +struct has_is_transparent : public std::false_type {}; + +template +struct has_is_transparent::type> + : public std::true_type {}; + +// using wrapper classes for hash and key_equal prevents the diamond problem when the same type +// is used. see https://stackoverflow.com/a/28771920/48181 +template +struct WrapHash : public T { + WrapHash() = default; + explicit WrapHash(T const& o) noexcept(noexcept(T(std::declval()))) + : T(o) {} +}; + +template +struct WrapKeyEqual : public T { + WrapKeyEqual() = default; + explicit WrapKeyEqual(T const& o) noexcept(noexcept(T(std::declval()))) + : T(o) {} +}; + +// A highly optimized hashmap implementation, using the Robin Hood algorithm. +// +// In most cases, this map should be usable as a drop-in replacement for std::unordered_map, but +// be about 2x faster in most cases and require much less allocations. +// +// This implementation uses the following memory layout: +// +// [Node, Node, ... Node | info, info, ... infoSentinel ] +// +// * Node: either a DataNode that directly has the std::pair as member, +// or a DataNode with a pointer to std::pair. Which DataNode representation to use +// depends on how fast the swap() operation is. Heuristically, this is automatically choosen +// based on sizeof(). there are always 2^n Nodes. +// +// * info: Each Node in the map has a corresponding info byte, so there are 2^n info bytes. +// Each byte is initialized to 0, meaning the corresponding Node is empty. Set to 1 means the +// corresponding node contains data. Set to 2 means the corresponding Node is filled, but it +// actually belongs to the previous position and was pushed out because that place is already +// taken. +// +// * infoSentinel: Sentinel byte set to 1, so that iterator's ++ can stop at end() without the +// need for a idx variable. +// +// According to STL, order of templates has effect on throughput. That's why I've moved the +// boolean to the front. +// https://www.reddit.com/r/cpp/comments/ahp6iu/compile_time_binary_size_reductions_and_cs_future/eeguck4/ +template +class Table + : public WrapHash, + public WrapKeyEqual, + detail::NodeAllocator< + typename std::conditional< + std::is_void::value, Key, + robin_hood::pair::type, T>>::type, + 4, 16384, IsFlat> { +public: + static constexpr bool is_flat = IsFlat; + static constexpr bool is_map = !std::is_void::value; + static constexpr bool is_set = !is_map; + static constexpr bool is_transparent = + has_is_transparent::value && has_is_transparent::value; + + using key_type = Key; + using mapped_type = T; + using value_type = typename std::conditional< + is_set, Key, + robin_hood::pair::type, T>>::type; + using size_type = size_t; + using hasher = Hash; + using key_equal = KeyEqual; + using Self = Table; + +private: + static_assert(MaxLoadFactor100 > 10 && MaxLoadFactor100 < 100, + "MaxLoadFactor100 needs to be >10 && < 100"); + + using WHash = WrapHash; + using WKeyEqual = WrapKeyEqual; + + // configuration defaults + + // make sure we have 8 elements, needed to quickly rehash mInfo + static constexpr size_t InitialNumElements = sizeof(uint64_t); + static constexpr uint32_t InitialInfoNumBits = 5; + static constexpr uint8_t InitialInfoInc = 1U << InitialInfoNumBits; + static constexpr size_t InfoMask = InitialInfoInc - 1U; + static constexpr uint8_t InitialInfoHashShift = 0; + using DataPool = detail::NodeAllocator; + + // type needs to be wider than uint8_t. + using InfoType = uint32_t; + + // DataNode //////////////////////////////////////////////////////// + + // Primary template for the data node. We have special implementations for small and big + // objects. For large objects it is assumed that swap() is fairly slow, so we allocate these + // on the heap so swap merely swaps a pointer. + template + class DataNode {}; + + // Small: just allocate on the stack. + template + class DataNode final { + public: + template + explicit DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, Args&&... args) noexcept( + noexcept(value_type(std::forward(args)...))) + : mData(std::forward(args)...) {} + + DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode&& n) noexcept( + std::is_nothrow_move_constructible::value) + : mData(std::move(n.mData)) {} + + // doesn't do anything + void destroy(M& ROBIN_HOOD_UNUSED(map) /*unused*/) noexcept {} + void destroyDoNotDeallocate() noexcept {} + + value_type const* operator->() const noexcept { + return &mData; + } + value_type* operator->() noexcept { + return &mData; + } + + const value_type& operator*() const noexcept { + return mData; + } + + value_type& operator*() noexcept { + return mData; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getFirst() noexcept { + return mData.first; + } + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getFirst() noexcept { + return mData; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type + getFirst() const noexcept { + return mData.first; + } + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getFirst() const noexcept { + return mData; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getSecond() noexcept { + return mData.second; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getSecond() const noexcept { + return mData.second; + } + + void swap(DataNode& o) noexcept( + noexcept(std::declval().swap(std::declval()))) { + mData.swap(o.mData); + } + + private: + value_type mData; + }; + + // big object: allocate on heap. + template + class DataNode { + public: + template + explicit DataNode(M& map, Args&&... args) + : mData(map.allocate()) { + ::new (static_cast(mData)) value_type(std::forward(args)...); + } + + DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode&& n) noexcept + : mData(std::move(n.mData)) {} + + void destroy(M& map) noexcept { + // don't deallocate, just put it into list of datapool. + mData->~value_type(); + map.deallocate(mData); + } + + void destroyDoNotDeallocate() noexcept { + mData->~value_type(); + } + + value_type const* operator->() const noexcept { + return mData; + } + + value_type* operator->() noexcept { + return mData; + } + + const value_type& operator*() const { + return *mData; + } + + value_type& operator*() { + return *mData; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getFirst() noexcept { + return mData->first; + } + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getFirst() noexcept { + return *mData; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type + getFirst() const noexcept { + return mData->first; + } + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getFirst() const noexcept { + return *mData; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getSecond() noexcept { + return mData->second; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getSecond() const noexcept { + return mData->second; + } + + void swap(DataNode& o) noexcept { + using std::swap; + swap(mData, o.mData); + } + + private: + value_type* mData; + }; + + using Node = DataNode; + + // helpers for insertKeyPrepareEmptySpot: extract first entry (only const required) + ROBIN_HOOD(NODISCARD) key_type const& getFirstConst(Node const& n) const noexcept { + return n.getFirst(); + } + + // in case we have void mapped_type, we are not using a pair, thus we just route k through. + // No need to disable this because it's just not used if not applicable. + ROBIN_HOOD(NODISCARD) key_type const& getFirstConst(key_type const& k) const noexcept { + return k; + } + + // in case we have non-void mapped_type, we have a standard robin_hood::pair + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::value, key_type const&>::type + getFirstConst(value_type const& vt) const noexcept { + return vt.first; + } + + // Cloner ////////////////////////////////////////////////////////// + + template + struct Cloner; + + // fast path: Just copy data, without allocating anything. + template + struct Cloner { + void operator()(M const& source, M& target) const { + auto const* const src = reinterpret_cast(source.mKeyVals); + auto* tgt = reinterpret_cast(target.mKeyVals); + auto const numElementsWithBuffer = target.calcNumElementsWithBuffer(target.mMask + 1); + std::copy(src, src + target.calcNumBytesTotal(numElementsWithBuffer), tgt); + } + }; + + template + struct Cloner { + void operator()(M const& s, M& t) const { + auto const numElementsWithBuffer = t.calcNumElementsWithBuffer(t.mMask + 1); + std::copy(s.mInfo, s.mInfo + t.calcNumBytesInfo(numElementsWithBuffer), t.mInfo); + + for (size_t i = 0; i < numElementsWithBuffer; ++i) { + if (t.mInfo[i]) { + ::new (static_cast(t.mKeyVals + i)) Node(t, *s.mKeyVals[i]); + } + } + } + }; + + // Destroyer /////////////////////////////////////////////////////// + + template + struct Destroyer {}; + + template + struct Destroyer { + void nodes(M& m) const noexcept { + m.mNumElements = 0; + } + + void nodesDoNotDeallocate(M& m) const noexcept { + m.mNumElements = 0; + } + }; + + template + struct Destroyer { + void nodes(M& m) const noexcept { + m.mNumElements = 0; + // clear also resets mInfo to 0, that's sometimes not necessary. + auto const numElementsWithBuffer = m.calcNumElementsWithBuffer(m.mMask + 1); + + for (size_t idx = 0; idx < numElementsWithBuffer; ++idx) { + if (0 != m.mInfo[idx]) { + Node& n = m.mKeyVals[idx]; + n.destroy(m); + n.~Node(); + } + } + } + + void nodesDoNotDeallocate(M& m) const noexcept { + m.mNumElements = 0; + // clear also resets mInfo to 0, that's sometimes not necessary. + auto const numElementsWithBuffer = m.calcNumElementsWithBuffer(m.mMask + 1); + for (size_t idx = 0; idx < numElementsWithBuffer; ++idx) { + if (0 != m.mInfo[idx]) { + Node& n = m.mKeyVals[idx]; + n.destroyDoNotDeallocate(); + n.~Node(); + } + } + } + }; + + // Iter //////////////////////////////////////////////////////////// + + struct fast_forward_tag {}; + + // generic iterator for both const_iterator and iterator. + template + // NOLINTNEXTLINE(hicpp-special-member-functions,cppcoreguidelines-special-member-functions) + class Iter { + private: + using NodePtr = typename std::conditional::type; + + public: + using difference_type = std::ptrdiff_t; + using value_type = typename Self::value_type; + using reference = typename std::conditional::type; + using pointer = typename std::conditional::type; + using iterator_category = std::forward_iterator_tag; + + // default constructed iterator can be compared to itself, but WON'T return true when + // compared to end(). + Iter() = default; + + // Rule of zero: nothing specified. The conversion constructor is only enabled for + // iterator to const_iterator, so it doesn't accidentally work as a copy ctor. + + // Conversion constructor from iterator to const_iterator. + template ::type> + // NOLINTNEXTLINE(hicpp-explicit-conversions) + Iter(Iter const& other) noexcept + : mKeyVals(other.mKeyVals) + , mInfo(other.mInfo) {} + + Iter(NodePtr valPtr, uint8_t const* infoPtr) noexcept + : mKeyVals(valPtr) + , mInfo(infoPtr) {} + + Iter(NodePtr valPtr, uint8_t const* infoPtr, + fast_forward_tag ROBIN_HOOD_UNUSED(tag) /*unused*/) noexcept + : mKeyVals(valPtr) + , mInfo(infoPtr) { + fastForward(); + } + + template ::type> + Iter& operator=(Iter const& other) noexcept { + mKeyVals = other.mKeyVals; + mInfo = other.mInfo; + return *this; + } + + // prefix increment. Undefined behavior if we are at end()! + Iter& operator++() noexcept { + mInfo++; + mKeyVals++; + fastForward(); + return *this; + } + + Iter operator++(int) noexcept { + Iter tmp = *this; + ++(*this); + return tmp; + } + + reference operator*() const { + return **mKeyVals; + } + + pointer operator->() const { + return &**mKeyVals; + } + + template + bool operator==(Iter const& o) const noexcept { + return mKeyVals == o.mKeyVals; + } + + template + bool operator!=(Iter const& o) const noexcept { + return mKeyVals != o.mKeyVals; + } + + private: + // fast forward to the next non-free info byte + // I've tried a few variants that don't depend on intrinsics, but unfortunately they are + // quite a bit slower than this one. So I've reverted that change again. See map_benchmark. + void fastForward() noexcept { + size_t n = 0; + while (0U == (n = detail::unaligned_load(mInfo))) { + mInfo += sizeof(size_t); + mKeyVals += sizeof(size_t); + } +#if defined(ROBIN_HOOD_DISABLE_INTRINSICS) + // we know for certain that within the next 8 bytes we'll find a non-zero one. + if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load(mInfo))) { + mInfo += 4; + mKeyVals += 4; + } + if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load(mInfo))) { + mInfo += 2; + mKeyVals += 2; + } + if (ROBIN_HOOD_UNLIKELY(0U == *mInfo)) { + mInfo += 1; + mKeyVals += 1; + } +#else +# if ROBIN_HOOD(LITTLE_ENDIAN) + auto inc = ROBIN_HOOD_COUNT_TRAILING_ZEROES(n) / 8; +# else + auto inc = ROBIN_HOOD_COUNT_LEADING_ZEROES(n) / 8; +# endif + mInfo += inc; + mKeyVals += inc; +#endif + } + + friend class Table; + NodePtr mKeyVals{nullptr}; + uint8_t const* mInfo{nullptr}; + }; + + //////////////////////////////////////////////////////////////////// + + // highly performance relevant code. + // Lower bits are used for indexing into the array (2^n size) + // The upper 1-5 bits need to be a reasonable good hash, to save comparisons. + template + void keyToIdx(HashKey&& key, size_t* idx, InfoType* info) const { + // In addition to whatever hash is used, add another mul & shift so we get better hashing. + // This serves as a bad hash prevention, if the given data is + // badly mixed. + auto h = static_cast(WHash::operator()(key)); + + h *= mHashMultiplier; + h ^= h >> 33U; + + // the lower InitialInfoNumBits are reserved for info. + *info = mInfoInc + static_cast((h & InfoMask) >> mInfoHashShift); + *idx = (static_cast(h) >> InitialInfoNumBits) & mMask; + } + + // forwards the index by one, wrapping around at the end + void next(InfoType* info, size_t* idx) const noexcept { + *idx = *idx + 1; + *info += mInfoInc; + } + + void nextWhileLess(InfoType* info, size_t* idx) const noexcept { + // unrolling this by hand did not bring any speedups. + while (*info < mInfo[*idx]) { + next(info, idx); + } + } + + // Shift everything up by one element. Tries to move stuff around. + void + shiftUp(size_t startIdx, + size_t const insertion_idx) noexcept(std::is_nothrow_move_assignable::value) { + auto idx = startIdx; + ::new (static_cast(mKeyVals + idx)) Node(std::move(mKeyVals[idx - 1])); + while (--idx != insertion_idx) { + mKeyVals[idx] = std::move(mKeyVals[idx - 1]); + } + + idx = startIdx; + while (idx != insertion_idx) { + ROBIN_HOOD_COUNT(shiftUp) + mInfo[idx] = static_cast(mInfo[idx - 1] + mInfoInc); + if (ROBIN_HOOD_UNLIKELY(mInfo[idx] + mInfoInc > 0xFF)) { + mMaxNumElementsAllowed = 0; + } + --idx; + } + } + + void shiftDown(size_t idx) noexcept(std::is_nothrow_move_assignable::value) { + // until we find one that is either empty or has zero offset. + // TODO(martinus) we don't need to move everything, just the last one for the same + // bucket. + mKeyVals[idx].destroy(*this); + + // until we find one that is either empty or has zero offset. + while (mInfo[idx + 1] >= 2 * mInfoInc) { + ROBIN_HOOD_COUNT(shiftDown) + mInfo[idx] = static_cast(mInfo[idx + 1] - mInfoInc); + mKeyVals[idx] = std::move(mKeyVals[idx + 1]); + ++idx; + } + + mInfo[idx] = 0; + // don't destroy, we've moved it + // mKeyVals[idx].destroy(*this); + mKeyVals[idx].~Node(); + } + + // copy of find(), except that it returns iterator instead of const_iterator. + template + ROBIN_HOOD(NODISCARD) + size_t findIdx(Other const& key) const { + size_t idx{}; + InfoType info{}; + keyToIdx(key, &idx, &info); + + do { + // unrolling this twice gives a bit of a speedup. More unrolling did not help. + if (info == mInfo[idx] && + ROBIN_HOOD_LIKELY(WKeyEqual::operator()(key, mKeyVals[idx].getFirst()))) { + return idx; + } + next(&info, &idx); + if (info == mInfo[idx] && + ROBIN_HOOD_LIKELY(WKeyEqual::operator()(key, mKeyVals[idx].getFirst()))) { + return idx; + } + next(&info, &idx); + } while (info <= mInfo[idx]); + + // nothing found! + return mMask == 0 ? 0 + : static_cast(std::distance( + mKeyVals, reinterpret_cast_no_cast_align_warning(mInfo))); + } + + void cloneData(const Table& o) { + Cloner()(o, *this); + } + + // inserts a keyval that is guaranteed to be new, e.g. when the hashmap is resized. + // @return True on success, false if something went wrong + void insert_move(Node&& keyval) { + // we don't retry, fail if overflowing + // don't need to check max num elements + if (0 == mMaxNumElementsAllowed && !try_increase_info()) { + throwOverflowError(); + } + + size_t idx{}; + InfoType info{}; + keyToIdx(keyval.getFirst(), &idx, &info); + + // skip forward. Use <= because we are certain that the element is not there. + while (info <= mInfo[idx]) { + idx = idx + 1; + info += mInfoInc; + } + + // key not found, so we are now exactly where we want to insert it. + auto const insertion_idx = idx; + auto const insertion_info = static_cast(info); + if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) { + mMaxNumElementsAllowed = 0; + } + + // find an empty spot + while (0 != mInfo[idx]) { + next(&info, &idx); + } + + auto& l = mKeyVals[insertion_idx]; + if (idx == insertion_idx) { + ::new (static_cast(&l)) Node(std::move(keyval)); + } else { + shiftUp(idx, insertion_idx); + l = std::move(keyval); + } + + // put at empty spot + mInfo[insertion_idx] = insertion_info; + + ++mNumElements; + } + +public: + using iterator = Iter; + using const_iterator = Iter; + + Table() noexcept(noexcept(Hash()) && noexcept(KeyEqual())) + : WHash() + , WKeyEqual() { + ROBIN_HOOD_TRACE(this) + } + + // Creates an empty hash map. Nothing is allocated yet, this happens at the first insert. + // This tremendously speeds up ctor & dtor of a map that never receives an element. The + // penalty is payed at the first insert, and not before. Lookup of this empty map works + // because everybody points to DummyInfoByte::b. parameter bucket_count is dictated by the + // standard, but we can ignore it. + explicit Table( + size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/, const Hash& h = Hash{}, + const KeyEqual& equal = KeyEqual{}) noexcept(noexcept(Hash(h)) && noexcept(KeyEqual(equal))) + : WHash(h) + , WKeyEqual(equal) { + ROBIN_HOOD_TRACE(this) + } + + template + Table(Iter first, Iter last, size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0, + const Hash& h = Hash{}, const KeyEqual& equal = KeyEqual{}) + : WHash(h) + , WKeyEqual(equal) { + ROBIN_HOOD_TRACE(this) + insert(first, last); + } + + Table(std::initializer_list initlist, + size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0, const Hash& h = Hash{}, + const KeyEqual& equal = KeyEqual{}) + : WHash(h) + , WKeyEqual(equal) { + ROBIN_HOOD_TRACE(this) + insert(initlist.begin(), initlist.end()); + } + + Table(Table&& o) noexcept + : WHash(std::move(static_cast(o))) + , WKeyEqual(std::move(static_cast(o))) + , DataPool(std::move(static_cast(o))) { + ROBIN_HOOD_TRACE(this) + if (o.mMask) { + mHashMultiplier = std::move(o.mHashMultiplier); + mKeyVals = std::move(o.mKeyVals); + mInfo = std::move(o.mInfo); + mNumElements = std::move(o.mNumElements); + mMask = std::move(o.mMask); + mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed); + mInfoInc = std::move(o.mInfoInc); + mInfoHashShift = std::move(o.mInfoHashShift); + // set other's mask to 0 so its destructor won't do anything + o.init(); + } + } + + Table& operator=(Table&& o) noexcept { + ROBIN_HOOD_TRACE(this) + if (&o != this) { + if (o.mMask) { + // only move stuff if the other map actually has some data + destroy(); + mHashMultiplier = std::move(o.mHashMultiplier); + mKeyVals = std::move(o.mKeyVals); + mInfo = std::move(o.mInfo); + mNumElements = std::move(o.mNumElements); + mMask = std::move(o.mMask); + mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed); + mInfoInc = std::move(o.mInfoInc); + mInfoHashShift = std::move(o.mInfoHashShift); + WHash::operator=(std::move(static_cast(o))); + WKeyEqual::operator=(std::move(static_cast(o))); + DataPool::operator=(std::move(static_cast(o))); + + o.init(); + + } else { + // nothing in the other map => just clear us. + clear(); + } + } + return *this; + } + + Table(const Table& o) + : WHash(static_cast(o)) + , WKeyEqual(static_cast(o)) + , DataPool(static_cast(o)) { + ROBIN_HOOD_TRACE(this) + if (!o.empty()) { + // not empty: create an exact copy. it is also possible to just iterate through all + // elements and insert them, but copying is probably faster. + + auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1); + auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer); + + ROBIN_HOOD_LOG("std::malloc " << numBytesTotal << " = calcNumBytesTotal(" + << numElementsWithBuffer << ")") + mHashMultiplier = o.mHashMultiplier; + mKeyVals = static_cast( + detail::assertNotNull(std::malloc(numBytesTotal))); + // no need for calloc because clonData does memcpy + mInfo = reinterpret_cast(mKeyVals + numElementsWithBuffer); + mNumElements = o.mNumElements; + mMask = o.mMask; + mMaxNumElementsAllowed = o.mMaxNumElementsAllowed; + mInfoInc = o.mInfoInc; + mInfoHashShift = o.mInfoHashShift; + cloneData(o); + } + } + + // Creates a copy of the given map. Copy constructor of each entry is used. + // Not sure why clang-tidy thinks this doesn't handle self assignment, it does + // NOLINTNEXTLINE(bugprone-unhandled-self-assignment,cert-oop54-cpp) + Table& operator=(Table const& o) { + ROBIN_HOOD_TRACE(this) + if (&o == this) { + // prevent assigning of itself + return *this; + } + + // we keep using the old allocator and not assign the new one, because we want to keep + // the memory available. when it is the same size. + if (o.empty()) { + if (0 == mMask) { + // nothing to do, we are empty too + return *this; + } + + // not empty: destroy what we have there + // clear also resets mInfo to 0, that's sometimes not necessary. + destroy(); + init(); + WHash::operator=(static_cast(o)); + WKeyEqual::operator=(static_cast(o)); + DataPool::operator=(static_cast(o)); + + return *this; + } + + // clean up old stuff + Destroyer::value>{}.nodes(*this); + + if (mMask != o.mMask) { + // no luck: we don't have the same array size allocated, so we need to realloc. + if (0 != mMask) { + // only deallocate if we actually have data! + ROBIN_HOOD_LOG("std::free") + std::free(mKeyVals); + } + + auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1); + auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer); + ROBIN_HOOD_LOG("std::malloc " << numBytesTotal << " = calcNumBytesTotal(" + << numElementsWithBuffer << ")") + mKeyVals = static_cast( + detail::assertNotNull(std::malloc(numBytesTotal))); + + // no need for calloc here because cloneData performs a memcpy. + mInfo = reinterpret_cast(mKeyVals + numElementsWithBuffer); + // sentinel is set in cloneData + } + WHash::operator=(static_cast(o)); + WKeyEqual::operator=(static_cast(o)); + DataPool::operator=(static_cast(o)); + mHashMultiplier = o.mHashMultiplier; + mNumElements = o.mNumElements; + mMask = o.mMask; + mMaxNumElementsAllowed = o.mMaxNumElementsAllowed; + mInfoInc = o.mInfoInc; + mInfoHashShift = o.mInfoHashShift; + cloneData(o); + + return *this; + } + + // Swaps everything between the two maps. + void swap(Table& o) { + ROBIN_HOOD_TRACE(this) + using std::swap; + swap(o, *this); + } + + // Clears all data, without resizing. + void clear() { + ROBIN_HOOD_TRACE(this) + if (empty()) { + // don't do anything! also important because we don't want to write to + // DummyInfoByte::b, even though we would just write 0 to it. + return; + } + + Destroyer::value>{}.nodes(*this); + + auto const numElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1); + // clear everything, then set the sentinel again + uint8_t const z = 0; + std::fill(mInfo, mInfo + calcNumBytesInfo(numElementsWithBuffer), z); + mInfo[numElementsWithBuffer] = 1; + + mInfoInc = InitialInfoInc; + mInfoHashShift = InitialInfoHashShift; + } + + // Destroys the map and all it's contents. + ~Table() { + ROBIN_HOOD_TRACE(this) + destroy(); + } + + // Checks if both tables contain the same entries. Order is irrelevant. + bool operator==(const Table& other) const { + ROBIN_HOOD_TRACE(this) + if (other.size() != size()) { + return false; + } + for (auto const& otherEntry : other) { + if (!has(otherEntry)) { + return false; + } + } + + return true; + } + + bool operator!=(const Table& other) const { + ROBIN_HOOD_TRACE(this) + return !operator==(other); + } + + template + typename std::enable_if::value, Q&>::type operator[](const key_type& key) { + ROBIN_HOOD_TRACE(this) + auto idxAndState = insertKeyPrepareEmptySpot(key); + switch (idxAndState.second) { + case InsertionState::key_found: + break; + + case InsertionState::new_node: + ::new (static_cast(&mKeyVals[idxAndState.first])) + Node(*this, std::piecewise_construct, std::forward_as_tuple(key), + std::forward_as_tuple()); + break; + + case InsertionState::overwrite_node: + mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct, + std::forward_as_tuple(key), std::forward_as_tuple()); + break; + + case InsertionState::overflow_error: + throwOverflowError(); + } + + return mKeyVals[idxAndState.first].getSecond(); + } + + template + typename std::enable_if::value, Q&>::type operator[](key_type&& key) { + ROBIN_HOOD_TRACE(this) + auto idxAndState = insertKeyPrepareEmptySpot(key); + switch (idxAndState.second) { + case InsertionState::key_found: + break; + + case InsertionState::new_node: + ::new (static_cast(&mKeyVals[idxAndState.first])) + Node(*this, std::piecewise_construct, std::forward_as_tuple(std::move(key)), + std::forward_as_tuple()); + break; + + case InsertionState::overwrite_node: + mKeyVals[idxAndState.first] = + Node(*this, std::piecewise_construct, std::forward_as_tuple(std::move(key)), + std::forward_as_tuple()); + break; + + case InsertionState::overflow_error: + throwOverflowError(); + } + + return mKeyVals[idxAndState.first].getSecond(); + } + + template + void insert(Iter first, Iter last) { + for (; first != last; ++first) { + // value_type ctor needed because this might be called with std::pair's + insert(value_type(*first)); + } + } + + void insert(std::initializer_list ilist) { + for (auto&& vt : ilist) { + insert(std::move(vt)); + } + } + + template + std::pair emplace(Args&&... args) { + ROBIN_HOOD_TRACE(this) + Node n{*this, std::forward(args)...}; + auto idxAndState = insertKeyPrepareEmptySpot(getFirstConst(n)); + switch (idxAndState.second) { + case InsertionState::key_found: + n.destroy(*this); + break; + + case InsertionState::new_node: + ::new (static_cast(&mKeyVals[idxAndState.first])) Node(*this, std::move(n)); + break; + + case InsertionState::overwrite_node: + mKeyVals[idxAndState.first] = std::move(n); + break; + + case InsertionState::overflow_error: + n.destroy(*this); + throwOverflowError(); + break; + } + + return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first), + InsertionState::key_found != idxAndState.second); + } + + template + std::pair try_emplace(const key_type& key, Args&&... args) { + return try_emplace_impl(key, std::forward(args)...); + } + + template + std::pair try_emplace(key_type&& key, Args&&... args) { + return try_emplace_impl(std::move(key), std::forward(args)...); + } + + template + std::pair try_emplace(const_iterator hint, const key_type& key, + Args&&... args) { + (void)hint; + return try_emplace_impl(key, std::forward(args)...); + } + + template + std::pair try_emplace(const_iterator hint, key_type&& key, Args&&... args) { + (void)hint; + return try_emplace_impl(std::move(key), std::forward(args)...); + } + + template + std::pair insert_or_assign(const key_type& key, Mapped&& obj) { + return insertOrAssignImpl(key, std::forward(obj)); + } + + template + std::pair insert_or_assign(key_type&& key, Mapped&& obj) { + return insertOrAssignImpl(std::move(key), std::forward(obj)); + } + + template + std::pair insert_or_assign(const_iterator hint, const key_type& key, + Mapped&& obj) { + (void)hint; + return insertOrAssignImpl(key, std::forward(obj)); + } + + template + std::pair insert_or_assign(const_iterator hint, key_type&& key, Mapped&& obj) { + (void)hint; + return insertOrAssignImpl(std::move(key), std::forward(obj)); + } + + std::pair insert(const value_type& keyval) { + ROBIN_HOOD_TRACE(this) + return emplace(keyval); + } + + std::pair insert(value_type&& keyval) { + return emplace(std::move(keyval)); + } + + // Returns 1 if key is found, 0 otherwise. + size_t count(const key_type& key) const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + auto kv = mKeyVals + findIdx(key); + if (kv != reinterpret_cast_no_cast_align_warning(mInfo)) { + return 1; + } + return 0; + } + + template + // NOLINTNEXTLINE(modernize-use-nodiscard) + typename std::enable_if::type count(const OtherKey& key) const { + ROBIN_HOOD_TRACE(this) + auto kv = mKeyVals + findIdx(key); + if (kv != reinterpret_cast_no_cast_align_warning(mInfo)) { + return 1; + } + return 0; + } + + bool contains(const key_type& key) const { // NOLINT(modernize-use-nodiscard) + return 1U == count(key); + } + + template + // NOLINTNEXTLINE(modernize-use-nodiscard) + typename std::enable_if::type contains(const OtherKey& key) const { + return 1U == count(key); + } + + // Returns a reference to the value found for key. + // Throws std::out_of_range if element cannot be found + template + // NOLINTNEXTLINE(modernize-use-nodiscard) + typename std::enable_if::value, Q&>::type at(key_type const& key) { + ROBIN_HOOD_TRACE(this) + auto kv = mKeyVals + findIdx(key); + if (kv == reinterpret_cast_no_cast_align_warning(mInfo)) { + doThrow("key not found"); + } + return kv->getSecond(); + } + + // Returns a reference to the value found for key. + // Throws std::out_of_range if element cannot be found + template + // NOLINTNEXTLINE(modernize-use-nodiscard) + typename std::enable_if::value, Q const&>::type at(key_type const& key) const { + ROBIN_HOOD_TRACE(this) + auto kv = mKeyVals + findIdx(key); + if (kv == reinterpret_cast_no_cast_align_warning(mInfo)) { + doThrow("key not found"); + } + return kv->getSecond(); + } + + const_iterator find(const key_type& key) const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + const size_t idx = findIdx(key); + return const_iterator{mKeyVals + idx, mInfo + idx}; + } + + template + const_iterator find(const OtherKey& key, is_transparent_tag /*unused*/) const { + ROBIN_HOOD_TRACE(this) + const size_t idx = findIdx(key); + return const_iterator{mKeyVals + idx, mInfo + idx}; + } + + template + typename std::enable_if::type // NOLINT(modernize-use-nodiscard) + find(const OtherKey& key) const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + const size_t idx = findIdx(key); + return const_iterator{mKeyVals + idx, mInfo + idx}; + } + + iterator find(const key_type& key) { + ROBIN_HOOD_TRACE(this) + const size_t idx = findIdx(key); + return iterator{mKeyVals + idx, mInfo + idx}; + } + + template + iterator find(const OtherKey& key, is_transparent_tag /*unused*/) { + ROBIN_HOOD_TRACE(this) + const size_t idx = findIdx(key); + return iterator{mKeyVals + idx, mInfo + idx}; + } + + template + typename std::enable_if::type find(const OtherKey& key) { + ROBIN_HOOD_TRACE(this) + const size_t idx = findIdx(key); + return iterator{mKeyVals + idx, mInfo + idx}; + } + + iterator begin() { + ROBIN_HOOD_TRACE(this) + if (empty()) { + return end(); + } + return iterator(mKeyVals, mInfo, fast_forward_tag{}); + } + const_iterator begin() const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return cbegin(); + } + const_iterator cbegin() const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + if (empty()) { + return cend(); + } + return const_iterator(mKeyVals, mInfo, fast_forward_tag{}); + } + + iterator end() { + ROBIN_HOOD_TRACE(this) + // no need to supply valid info pointer: end() must not be dereferenced, and only node + // pointer is compared. + return iterator{reinterpret_cast_no_cast_align_warning(mInfo), nullptr}; + } + const_iterator end() const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return cend(); + } + const_iterator cend() const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return const_iterator{reinterpret_cast_no_cast_align_warning(mInfo), nullptr}; + } + + iterator erase(const_iterator pos) { + ROBIN_HOOD_TRACE(this) + // its safe to perform const cast here + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + return erase(iterator{const_cast(pos.mKeyVals), const_cast(pos.mInfo)}); + } + + // Erases element at pos, returns iterator to the next element. + iterator erase(iterator pos) { + ROBIN_HOOD_TRACE(this) + // we assume that pos always points to a valid entry, and not end(). + auto const idx = static_cast(pos.mKeyVals - mKeyVals); + + shiftDown(idx); + --mNumElements; + + if (*pos.mInfo) { + // we've backward shifted, return this again + return pos; + } + + // no backward shift, return next element + return ++pos; + } + + size_t erase(const key_type& key) { + ROBIN_HOOD_TRACE(this) + size_t idx{}; + InfoType info{}; + keyToIdx(key, &idx, &info); + + // check while info matches with the source idx + do { + if (info == mInfo[idx] && WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) { + shiftDown(idx); + --mNumElements; + return 1; + } + next(&info, &idx); + } while (info <= mInfo[idx]); + + // nothing found to delete + return 0; + } + + // reserves space for the specified number of elements. Makes sure the old data fits. + // exactly the same as reserve(c). + void rehash(size_t c) { + // forces a reserve + reserve(c, true); + } + + // reserves space for the specified number of elements. Makes sure the old data fits. + // Exactly the same as rehash(c). Use rehash(0) to shrink to fit. + void reserve(size_t c) { + // reserve, but don't force rehash + reserve(c, false); + } + + // If possible reallocates the map to a smaller one. This frees the underlying table. + // Does not do anything if load_factor is too large for decreasing the table's size. + void compact() { + ROBIN_HOOD_TRACE(this) + auto newSize = InitialNumElements; + while (calcMaxNumElementsAllowed(newSize) < mNumElements && newSize != 0) { + newSize *= 2; + } + if (ROBIN_HOOD_UNLIKELY(newSize == 0)) { + throwOverflowError(); + } + + ROBIN_HOOD_LOG("newSize > mMask + 1: " << newSize << " > " << mMask << " + 1") + + // only actually do anything when the new size is bigger than the old one. This prevents to + // continuously allocate for each reserve() call. + if (newSize < mMask + 1) { + rehashPowerOfTwo(newSize, true); + } + } + + size_type size() const noexcept { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return mNumElements; + } + + size_type max_size() const noexcept { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return static_cast(-1); + } + + ROBIN_HOOD(NODISCARD) bool empty() const noexcept { + ROBIN_HOOD_TRACE(this) + return 0 == mNumElements; + } + + float max_load_factor() const noexcept { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return MaxLoadFactor100 / 100.0F; + } + + // Average number of elements per bucket. Since we allow only 1 per bucket + float load_factor() const noexcept { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return static_cast(size()) / static_cast(mMask + 1); + } + + ROBIN_HOOD(NODISCARD) size_t mask() const noexcept { + ROBIN_HOOD_TRACE(this) + return mMask; + } + + ROBIN_HOOD(NODISCARD) size_t calcMaxNumElementsAllowed(size_t maxElements) const noexcept { + if (ROBIN_HOOD_LIKELY(maxElements <= (std::numeric_limits::max)() / 100)) { + return maxElements * MaxLoadFactor100 / 100; + } + + // we might be a bit inprecise, but since maxElements is quite large that doesn't matter + return (maxElements / 100) * MaxLoadFactor100; + } + + ROBIN_HOOD(NODISCARD) size_t calcNumBytesInfo(size_t numElements) const noexcept { + // we add a uint64_t, which houses the sentinel (first byte) and padding so we can load + // 64bit types. + return numElements + sizeof(uint64_t); + } + + ROBIN_HOOD(NODISCARD) + size_t calcNumElementsWithBuffer(size_t numElements) const noexcept { + auto maxNumElementsAllowed = calcMaxNumElementsAllowed(numElements); + return numElements + (std::min)(maxNumElementsAllowed, (static_cast(0xFF))); + } + + // calculation only allowed for 2^n values + ROBIN_HOOD(NODISCARD) size_t calcNumBytesTotal(size_t numElements) const { +#if ROBIN_HOOD(BITNESS) == 64 + return numElements * sizeof(Node) + calcNumBytesInfo(numElements); +#else + // make sure we're doing 64bit operations, so we are at least safe against 32bit overflows. + auto const ne = static_cast(numElements); + auto const s = static_cast(sizeof(Node)); + auto const infos = static_cast(calcNumBytesInfo(numElements)); + + auto const total64 = ne * s + infos; + auto const total = static_cast(total64); + + if (ROBIN_HOOD_UNLIKELY(static_cast(total) != total64)) { + throwOverflowError(); + } + return total; +#endif + } + +private: + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::value, bool>::type has(const value_type& e) const { + ROBIN_HOOD_TRACE(this) + auto it = find(e.first); + return it != end() && it->second == e.second; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::value, bool>::type has(const value_type& e) const { + ROBIN_HOOD_TRACE(this) + return find(e) != end(); + } + + void reserve(size_t c, bool forceRehash) { + ROBIN_HOOD_TRACE(this) + auto const minElementsAllowed = (std::max)(c, mNumElements); + auto newSize = InitialNumElements; + while (calcMaxNumElementsAllowed(newSize) < minElementsAllowed && newSize != 0) { + newSize *= 2; + } + if (ROBIN_HOOD_UNLIKELY(newSize == 0)) { + throwOverflowError(); + } + + ROBIN_HOOD_LOG("newSize > mMask + 1: " << newSize << " > " << mMask << " + 1") + + // only actually do anything when the new size is bigger than the old one. This prevents to + // continuously allocate for each reserve() call. + if (forceRehash || newSize > mMask + 1) { + rehashPowerOfTwo(newSize, false); + } + } + + // reserves space for at least the specified number of elements. + // only works if numBuckets if power of two + // True on success, false otherwise + void rehashPowerOfTwo(size_t numBuckets, bool forceFree) { + ROBIN_HOOD_TRACE(this) + + Node* const oldKeyVals = mKeyVals; + uint8_t const* const oldInfo = mInfo; + + const size_t oldMaxElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1); + + // resize operation: move stuff + initData(numBuckets); + if (oldMaxElementsWithBuffer > 1) { + for (size_t i = 0; i < oldMaxElementsWithBuffer; ++i) { + if (oldInfo[i] != 0) { + // might throw an exception, which is really bad since we are in the middle of + // moving stuff. + insert_move(std::move(oldKeyVals[i])); + // destroy the node but DON'T destroy the data. + oldKeyVals[i].~Node(); + } + } + + // this check is not necessary as it's guarded by the previous if, but it helps + // silence g++'s overeager "attempt to free a non-heap object 'map' + // [-Werror=free-nonheap-object]" warning. + if (oldKeyVals != reinterpret_cast_no_cast_align_warning(&mMask)) { + // don't destroy old data: put it into the pool instead + if (forceFree) { + std::free(oldKeyVals); + } else { + DataPool::addOrFree(oldKeyVals, calcNumBytesTotal(oldMaxElementsWithBuffer)); + } + } + } + } + + ROBIN_HOOD(NOINLINE) void throwOverflowError() const { +#if ROBIN_HOOD(HAS_EXCEPTIONS) + throw std::overflow_error("robin_hood::map overflow"); +#else + abort(); +#endif + } + + template + std::pair try_emplace_impl(OtherKey&& key, Args&&... args) { + ROBIN_HOOD_TRACE(this) + auto idxAndState = insertKeyPrepareEmptySpot(key); + switch (idxAndState.second) { + case InsertionState::key_found: + break; + + case InsertionState::new_node: + ::new (static_cast(&mKeyVals[idxAndState.first])) Node( + *this, std::piecewise_construct, std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)); + break; + + case InsertionState::overwrite_node: + mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)); + break; + + case InsertionState::overflow_error: + throwOverflowError(); + break; + } + + return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first), + InsertionState::key_found != idxAndState.second); + } + + template + std::pair insertOrAssignImpl(OtherKey&& key, Mapped&& obj) { + ROBIN_HOOD_TRACE(this) + auto idxAndState = insertKeyPrepareEmptySpot(key); + switch (idxAndState.second) { + case InsertionState::key_found: + mKeyVals[idxAndState.first].getSecond() = std::forward(obj); + break; + + case InsertionState::new_node: + ::new (static_cast(&mKeyVals[idxAndState.first])) Node( + *this, std::piecewise_construct, std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(obj))); + break; + + case InsertionState::overwrite_node: + mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(obj))); + break; + + case InsertionState::overflow_error: + throwOverflowError(); + break; + } + + return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first), + InsertionState::key_found != idxAndState.second); + } + + void initData(size_t max_elements) { + mNumElements = 0; + mMask = max_elements - 1; + mMaxNumElementsAllowed = calcMaxNumElementsAllowed(max_elements); + + auto const numElementsWithBuffer = calcNumElementsWithBuffer(max_elements); + + // calloc also zeroes everything + auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer); + ROBIN_HOOD_LOG("std::calloc " << numBytesTotal << " = calcNumBytesTotal(" + << numElementsWithBuffer << ")") + mKeyVals = reinterpret_cast( + detail::assertNotNull(std::calloc(1, numBytesTotal))); + mInfo = reinterpret_cast(mKeyVals + numElementsWithBuffer); + + // set sentinel + mInfo[numElementsWithBuffer] = 1; + + mInfoInc = InitialInfoInc; + mInfoHashShift = InitialInfoHashShift; + } + + enum class InsertionState { overflow_error, key_found, new_node, overwrite_node }; + + // Finds key, and if not already present prepares a spot where to pot the key & value. + // This potentially shifts nodes out of the way, updates mInfo and number of inserted + // elements, so the only operation left to do is create/assign a new node at that spot. + template + std::pair insertKeyPrepareEmptySpot(OtherKey&& key) { + for (int i = 0; i < 256; ++i) { + size_t idx{}; + InfoType info{}; + keyToIdx(key, &idx, &info); + nextWhileLess(&info, &idx); + + // while we potentially have a match + while (info == mInfo[idx]) { + if (WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) { + // key already exists, do NOT insert. + // see http://en.cppreference.com/w/cpp/container/unordered_map/insert + return std::make_pair(idx, InsertionState::key_found); + } + next(&info, &idx); + } + + // unlikely that this evaluates to true + if (ROBIN_HOOD_UNLIKELY(mNumElements >= mMaxNumElementsAllowed)) { + if (!increase_size()) { + return std::make_pair(size_t(0), InsertionState::overflow_error); + } + continue; + } + + // key not found, so we are now exactly where we want to insert it. + auto const insertion_idx = idx; + auto const insertion_info = info; + if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) { + mMaxNumElementsAllowed = 0; + } + + // find an empty spot + while (0 != mInfo[idx]) { + next(&info, &idx); + } + + if (idx != insertion_idx) { + shiftUp(idx, insertion_idx); + } + // put at empty spot + mInfo[insertion_idx] = static_cast(insertion_info); + ++mNumElements; + return std::make_pair(insertion_idx, idx == insertion_idx + ? InsertionState::new_node + : InsertionState::overwrite_node); + } + + // enough attempts failed, so finally give up. + return std::make_pair(size_t(0), InsertionState::overflow_error); + } + + bool try_increase_info() { + ROBIN_HOOD_LOG("mInfoInc=" << mInfoInc << ", numElements=" << mNumElements + << ", maxNumElementsAllowed=" + << calcMaxNumElementsAllowed(mMask + 1)) + if (mInfoInc <= 2) { + // need to be > 2 so that shift works (otherwise undefined behavior!) + return false; + } + // we got space left, try to make info smaller + mInfoInc = static_cast(mInfoInc >> 1U); + + // remove one bit of the hash, leaving more space for the distance info. + // This is extremely fast because we can operate on 8 bytes at once. + ++mInfoHashShift; + auto const numElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1); + + for (size_t i = 0; i < numElementsWithBuffer; i += 8) { + auto val = unaligned_load(mInfo + i); + val = (val >> 1U) & UINT64_C(0x7f7f7f7f7f7f7f7f); + std::memcpy(mInfo + i, &val, sizeof(val)); + } + // update sentinel, which might have been cleared out! + mInfo[numElementsWithBuffer] = 1; + + mMaxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1); + return true; + } + + // True if resize was possible, false otherwise + bool increase_size() { + // nothing allocated yet? just allocate InitialNumElements + if (0 == mMask) { + initData(InitialNumElements); + return true; + } + + auto const maxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1); + if (mNumElements < maxNumElementsAllowed && try_increase_info()) { + return true; + } + + ROBIN_HOOD_LOG("mNumElements=" << mNumElements << ", maxNumElementsAllowed=" + << maxNumElementsAllowed << ", load=" + << (static_cast(mNumElements) * 100.0 / + (static_cast(mMask) + 1))) + + if (mNumElements * 2 < calcMaxNumElementsAllowed(mMask + 1)) { + // we have to resize, even though there would still be plenty of space left! + // Try to rehash instead. Delete freed memory so we don't steadyily increase mem in case + // we have to rehash a few times + nextHashMultiplier(); + rehashPowerOfTwo(mMask + 1, true); + } else { + // we've reached the capacity of the map, so the hash seems to work nice. Keep using it. + rehashPowerOfTwo((mMask + 1) * 2, false); + } + return true; + } + + void nextHashMultiplier() { + // adding an *even* number, so that the multiplier will always stay odd. This is necessary + // so that the hash stays a mixing function (and thus doesn't have any information loss). + mHashMultiplier += UINT64_C(0xc4ceb9fe1a85ec54); + } + + void destroy() { + if (0 == mMask) { + // don't deallocate! + return; + } + + Destroyer::value>{} + .nodesDoNotDeallocate(*this); + + // This protection against not deleting mMask shouldn't be needed as it's sufficiently + // protected with the 0==mMask check, but I have this anyways because g++ 7 otherwise + // reports a compile error: attempt to free a non-heap object 'fm' + // [-Werror=free-nonheap-object] + if (mKeyVals != reinterpret_cast_no_cast_align_warning(&mMask)) { + ROBIN_HOOD_LOG("std::free") + std::free(mKeyVals); + } + } + + void init() noexcept { + mKeyVals = reinterpret_cast_no_cast_align_warning(&mMask); + mInfo = reinterpret_cast(&mMask); + mNumElements = 0; + mMask = 0; + mMaxNumElementsAllowed = 0; + mInfoInc = InitialInfoInc; + mInfoHashShift = InitialInfoHashShift; + } + + // members are sorted so no padding occurs + uint64_t mHashMultiplier = UINT64_C(0xc4ceb9fe1a85ec53); // 8 byte 8 + Node* mKeyVals = reinterpret_cast_no_cast_align_warning(&mMask); // 8 byte 16 + uint8_t* mInfo = reinterpret_cast(&mMask); // 8 byte 24 + size_t mNumElements = 0; // 8 byte 32 + size_t mMask = 0; // 8 byte 40 + size_t mMaxNumElementsAllowed = 0; // 8 byte 48 + InfoType mInfoInc = InitialInfoInc; // 4 byte 52 + InfoType mInfoHashShift = InitialInfoHashShift; // 4 byte 56 + // 16 byte 56 if NodeAllocator +}; + +} // namespace detail + +// map + +template , + typename KeyEqual = std::equal_to, size_t MaxLoadFactor100 = 80> +using unordered_flat_map = detail::Table; + +template , + typename KeyEqual = std::equal_to, size_t MaxLoadFactor100 = 80> +using unordered_node_map = detail::Table; + +template , + typename KeyEqual = std::equal_to, size_t MaxLoadFactor100 = 80> +using unordered_map = + detail::Table) <= sizeof(size_t) * 6 && + std::is_nothrow_move_constructible>::value && + std::is_nothrow_move_assignable>::value, + MaxLoadFactor100, Key, T, Hash, KeyEqual>; + +// set + +template , typename KeyEqual = std::equal_to, + size_t MaxLoadFactor100 = 80> +using unordered_flat_set = detail::Table; + +template , typename KeyEqual = std::equal_to, + size_t MaxLoadFactor100 = 80> +using unordered_node_set = detail::Table; + +template , typename KeyEqual = std::equal_to, + size_t MaxLoadFactor100 = 80> +using unordered_set = detail::Table::value && + std::is_nothrow_move_assignable::value, + MaxLoadFactor100, Key, void, Hash, KeyEqual>; + +} // namespace robin_hood + +#endif