Replace std::unordered_map/set with robin-hood-hashing

Robin-hood-hashing (https://github.com/martinus/robin-hood-hashing)
is considered faster then std::unordered_map/set,
so we use it to improve mindspore performance.

1. robin_hood head file in `third_party/robin_hood/include`;
2. In `utils/hash_map.h` and `utils/hash_set.h`, we define:
 - mindspore::HashMap as an alias of robin_hood::unordered_map;
 - mindspore::HashSet as an alias of robin_hood::unordered_set;
3. Replace:
 - `#include <unordered_map>` --> `#include "utils/hash_map.h"`;
 - `#include <unordered_set>` --> `#include "utils/hash_set.h"`;
 - `std::unordered_map` --> `mindspore::HashMap`;
 - `std::unordered_set` --> `mindspore::HashSet`;
 - `map.insert(std::pair(key, value))` --> `map.emplace(key, value)`;
 - `[] (const std::pair<K, V> &p) {..} ` --> `[] (const auto &p) {..} `;
4. Fix issues found by switch to robin_hood:
 - AnfNodeConfig hash and equal;
 - Fix a bug in `Slice::operator==()`;
 - Fix a bug in `CNode::HasPrimalAttr()`;
 - Fix map.erase() usage bugs: `map.erase(iter++)` --> `iter = map.erase(iter)`;
 - Fix some iterator invalidated problem;
5. Some std::unordered_map/set can not replace by robin_hood:
 - As parameter of functions that exposed to python by pybind11;
 - Use bad hash that cause robin_hood::map over_flow, such as AbstractBasePtrListHasher;
6. Update cpp unit tests;
7. Add build option '-F' to enable robin_hood, default on.
This commit is contained in:
He Wei 2021-11-24 08:51:27 +08:00
parent 354b7d44df
commit 41dcac9c49
452 changed files with 4687 additions and 1965 deletions

View File

@ -57,6 +57,11 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/securec/include)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/flatbuffers/include)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/flatbuffers/include/flatbuffers)
if(ENABLE_FAST_HASH_TABLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DENABLE_FAST_HASH_TABLE=1")
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/robin_hood/include)
endif()
include(${CMAKE_SOURCE_DIR}/cmake/dependency_utils.cmake)
find_package(Python3 COMPONENTS Interpreter Development)
if(Python3_FOUND)

View File

@ -27,6 +27,7 @@ option(MODE_ASCEND_ALL "supports all ascend platform" OFF)
option(MODE_ASCEND_ACL "supports ascend acl mode only" OFF)
option(ENABLE_SYM_FILE "enable sym file" OFF)
option(BUILD_DEV_MODE "MindSpore build nightly dev mode" OFF)
option(ENABLE_FAST_HASH_TABLE "Enable use fast hash table instead of std ones" ON)
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")

View File

@ -30,7 +30,7 @@ void BatchMatmulFusedMulAddFusionPass::MatchBatchMatmulFusedMulAdd(const CNodePt
auto batch_matmul = cnode->input(kIndex2);
MS_EXCEPTION_IF_NULL(batch_matmul);
if (batch_matmul->isa<CNode>() && AnfAlgo::CheckPrimitiveType(batch_matmul, prim::kPrimBatchMatMul)) {
std::unordered_set<AnfNodePtr> record{cnode, batch_matmul};
mindspore::HashSet<AnfNodePtr> record{cnode, batch_matmul};
candidate_fusion->push_back(record);
SetRecordFusionId(record);
}

View File

@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BATCHMATMUL_FUSEDMULADD_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BATCHMATMUL_FUSEDMULADD_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class BatchMatmulFusedMulAddFusionPass : public FusionBasePass {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,9 +15,9 @@
*/
#include "backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.h"
#include <vector>
#include <unordered_set>
#include <memory>
#include <string>
#include "utils/hash_set.h"
#include "backend/kernel_compiler/kernel_fusion.h"
#include "debug/anf_ir_dump.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -45,7 +45,7 @@ void BnupdateEltwiseEltwiseFusionPass::MatchBnupdateAddRelu(const CNodePtr &cnod
MS_EXCEPTION_IF_NULL(bnupdate);
if (bnupdate->isa<CNode>() && AnfAlgo::GetCNodeName(bnupdate) == kBNTrainingUpdateOpName &&
GetNodeOutputTotalUsedNum(kernel_graph, bnupdate) == kBNTrainingUpdateOutputUsedTotalNum) {
std::unordered_set<AnfNodePtr> record{cnode, relu_input, bnupdate};
mindspore::HashSet<AnfNodePtr> record{cnode, relu_input, bnupdate};
candidate_fusion->push_back(record);
SetRecordFusionId(record);
}

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BNUPDATE_ELTWISE_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BNUPDATE_ELTWISE_ELTWISE_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class BnupdateEltwiseEltwiseFusionPass : public FusionBasePass {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -35,7 +35,7 @@ void BnupdateEltwiseFusionPass::MatchBnupdateDoubleOutputEltwise(const CNodePtr
MS_EXCEPTION_IF_NULL(bnupdate);
if (bnupdate->isa<CNode>() && AnfAlgo::GetCNodeName(bnupdate) == kBNTrainingUpdateOpName &&
GetNodeOutputTotalUsedNum(kernel_graph, bnupdate) == kBNTrainingUpdateOutputUsedTotalNum) {
std::unordered_set<AnfNodePtr> record{cnode, bnupdate};
mindspore::HashSet<AnfNodePtr> record{cnode, bnupdate};
candidate_fusion->push_back(record);
SetRecordFusionId(record);
}

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BNUPDATE_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BNUPDATE_ELTWISE_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class BnupdateEltwiseFusionPass : public FusionBasePass {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -26,7 +26,7 @@ void Conv2DBackpropEltwiseEltwiseFusionPass::MatchConv2DBackpropInputEltwiseEltw
const CNodePtr &cnode, const session::KernelGraph &kernel_graph, FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::unordered_set<AnfNodePtr> record{cnode};
mindspore::HashSet<AnfNodePtr> record{cnode};
auto eltwise_input = cnode->input(kIndex1);
MS_EXCEPTION_IF_NULL(eltwise_input);
if (CheckDoubleInEltWiseNode(kernel_graph, eltwise_input)) {

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV2DBACKPROP_ELTWISE_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV2DBACKPROP_ELTWISE_ELTWISE_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class Conv2DBackpropEltwiseEltwiseFusionPass : public FusionBasePass {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -27,7 +27,7 @@ void Conv2DBackpropEltwiseFusionPass::MatchConv2DBackpropInputEltwise(const CNod
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::unordered_set<AnfNodePtr> record{cnode};
mindspore::HashSet<AnfNodePtr> record{cnode};
auto eltwise_input = cnode->input(kIndex1);
MS_EXCEPTION_IF_NULL(eltwise_input);
if (!eltwise_input->isa<CNode>() || !AnfUtils::IsRealCNodeKernel(eltwise_input) ||

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV2DBACKPROP_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV2DBACKPROP_ELTWISE_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class Conv2DBackpropEltwiseFusionPass : public FusionBasePass {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -32,7 +32,7 @@ void ConvBnReduceFusionPass::MatchConvBnreduce(const CNodePtr &cnode, const sess
MS_EXCEPTION_IF_NULL(conv);
if (conv->isa<CNode>() && AnfAlgo::GetCNodeName(conv) == prim::kPrimConv2D->name() &&
GetNodeOutputTotalUsedNum(kernel_graph, conv) == kConvOutputUsedTotalNum) {
std::unordered_set<AnfNodePtr> record{cnode, conv};
mindspore::HashSet<AnfNodePtr> record{cnode, conv};
candidate_fusion->push_back(record);
SetRecordFusionId(record);
}

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_CONV_BNREDUCE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_CONV_BNREDUCE_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class ConvBnReduceFusionPass : public FusionBasePass {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -26,7 +26,7 @@ void ConvDoubleInFusionPass::MatchConvDoubleInEltwise(const CNodePtr &cnode, con
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::unordered_set<AnfNodePtr> record{cnode};
mindspore::HashSet<AnfNodePtr> record{cnode};
auto eltwise_input = cnode->input(kIndex1);
MS_EXCEPTION_IF_NULL(eltwise_input);
if (CheckDoubleInEltWiseNode(kernel_graph, eltwise_input)) {

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV_DOUBLE_IN_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV_DOUBLE_IN_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class ConvDoubleInFusionPass : public FusionBasePass {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,8 +15,8 @@
*/
#include "backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.h"
#include <vector>
#include <unordered_set>
#include <memory>
#include "utils/hash_set.h"
#include "backend/kernel_compiler/kernel_fusion.h"
#include "debug/anf_ir_dump.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -30,7 +30,7 @@ void ConvSingleInFusionPass::MatchConvSingleInEltwise(const CNodePtr &cnode, con
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::unordered_set<AnfNodePtr> record{cnode};
mindspore::HashSet<AnfNodePtr> record{cnode};
auto eltwise_input = cnode->input(kIndex1);
while (CheckEltWiseNode(kernel_graph, eltwise_input)) {
(void)record.insert(eltwise_input);

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV_SINGLE_IN_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV_SINGLE_IN_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class ConvSingleInFusionPass : public FusionBasePass {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -33,7 +33,7 @@ void DepthwiseConvEltwiseFusionPass::MatchDepthwiseConvRelu(const CNodePtr &cnod
auto depthwise_conv = cnode->input(kIndex1);
MS_EXCEPTION_IF_NULL(depthwise_conv);
if (cnode->isa<CNode>() && IsPrimitiveCNode(depthwise_conv, prim::kPrimDepthwiseConv2dNative)) {
std::unordered_set<AnfNodePtr> record{cnode, depthwise_conv};
mindspore::HashSet<AnfNodePtr> record{cnode, depthwise_conv};
candidate_fusion->push_back(record);
SetRecordFusionId(record);
}
@ -42,7 +42,7 @@ void DepthwiseConvEltwiseFusionPass::MatchDepthwiseConvRelu(const CNodePtr &cnod
auto relu = cnode->input(kIndex1);
MS_EXCEPTION_IF_NULL(relu);
if (cnode->isa<CNode>() && (IsPrimitiveCNode(relu, prim::kPrimRelu) || IsPrimitiveCNode(relu, prim::kPrimReluV2))) {
std::unordered_set<AnfNodePtr> record{cnode, relu};
mindspore::HashSet<AnfNodePtr> record{cnode, relu};
candidate_fusion->push_back(record);
SetRecordFusionId(record);
}

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_DEPTHWISECONV_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_DEPTHWISECONV_ELTWISE_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class DepthwiseConvEltwiseFusionPass : public FusionBasePass {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -26,7 +26,7 @@ void EltwiseFusionPass::MatchEltwise(const CNodePtr &cnode, const session::Kerne
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::unordered_set<AnfNodePtr> record{cnode};
mindspore::HashSet<AnfNodePtr> record{cnode};
auto eltwise_input = cnode->input(kIndex1);
MS_EXCEPTION_IF_NULL(eltwise_input);
while (CheckEltWiseNode(kernel_graph, eltwise_input)) {

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_ELTWISE_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class EltwiseFusionPass : public FusionBasePass {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -81,7 +81,7 @@ size_t FusionBasePass::GetNotUpdateStateUserNums(const session::KernelGraph &ker
return not_updatestate_users;
}
void FusionBasePass::SetRecordFusionId(const std::unordered_set<AnfNodePtr> &record) {
void FusionBasePass::SetRecordFusionId(const mindspore::HashSet<AnfNodePtr> &record) {
auto id = fusion_id_allocator->AllocateFusionId();
for (auto node : record) {
fusion_id_allocator->SetFusionId(node, id);

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,11 +15,11 @@
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_FUSION_BASE_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_FUSION_BASE_PASS_H_
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <string>
#include <utility>
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
#include "backend/optimizer/common/fusion_id_allocator.h"
@ -45,7 +45,7 @@ const int8_t MULTI_ELTWISE_SIZE = 4;
constexpr int64_t kBNTrainingUpdateOutputUsedTotalNum = 5;
constexpr int64_t kConvOutputUsedTotalNum = 4;
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
struct BufferFusionInfo_t {
std::string full_name;
@ -66,7 +66,7 @@ class FusionBasePass : public PassWithSwitch {
bool RunPass(const FuncGraphPtr &graph) override;
virtual void MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) = 0;
void SetRecordFusionId(const std::unordered_set<AnfNodePtr> &record);
void SetRecordFusionId(const mindspore::HashSet<AnfNodePtr> &record);
bool CheckEltWiseNode(const session::KernelGraph &kernel_graph, const AnfNodePtr &node);
bool CheckDoubleInEltWiseNode(const session::KernelGraph &kernel_graph, const AnfNodePtr &node);
bool CheckMultiOutputEltWiseNode(const session::KernelGraph &kernel_graph, const AnfNodePtr &node);

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -31,7 +31,7 @@ void MatmulConfusionTranposeFusionPass::MatchMatmulConfusionTranpose(const CNode
MS_EXCEPTION_IF_NULL(matmul);
if (matmul->isa<CNode>() && (AnfAlgo::CheckPrimitiveType(matmul, prim::kPrimMatMul) ||
AnfAlgo::CheckPrimitiveType(matmul, prim::kPrimBatchMatMul))) {
std::unordered_set<AnfNodePtr> record{cnode, matmul};
mindspore::HashSet<AnfNodePtr> record{cnode, matmul};
candidate_fusion->push_back(record);
SetRecordFusionId(record);
}

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_CONFUSIONTRANSPOSE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_CONFUSIONTRANSPOSE_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class MatmulConfusionTranposeFusionPass : public FusionBasePass {
public:

View File

@ -30,7 +30,7 @@ void MatmulEltwiseFusionPass::MatchMatmulEltwise(const CNodePtr &cnode, const An
if (fusion_id_allocator->HasFusionIdAttr(relu_input)) {
return;
}
std::unordered_set<AnfNodePtr> record{cnode, relu_input};
mindspore::HashSet<AnfNodePtr> record{cnode, relu_input};
candidate_fusion->push_back(record);
SetRecordFusionId(record);
}

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_ELTWISE_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class MatmulEltwiseFusionPass : public FusionBasePass {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -26,7 +26,7 @@ void MultiOutputFusionPass::MatchMultiOutputEltwise(const CNodePtr &cnode, const
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::unordered_set<AnfNodePtr> record{cnode};
mindspore::HashSet<AnfNodePtr> record{cnode};
auto eltwise_input = cnode->input(kIndex1);
MS_EXCEPTION_IF_NULL(eltwise_input);
if (CheckMultiOutputEltWiseNode(kernel_graph, eltwise_input)) {

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MULTI_OUTPUT_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MULTI_OUTPUT_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class MultiOutputFusionPass : public FusionBasePass {
public:

View File

@ -28,7 +28,7 @@ void ReduceEltwiseFusionPass::MatchReduceEltwise(const CNodePtr &cnode, const se
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::unordered_set<AnfNodePtr> record{cnode};
mindspore::HashSet<AnfNodePtr> record{cnode};
auto eltwise_input = cnode->input(kIndex1);
while (CheckEltWiseNode(kernel_graph, eltwise_input)) {
(void)record.insert(eltwise_input);

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_REDUCE_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_REDUCE_ELTWISE_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class ReduceEltwiseFusionPass : public FusionBasePass {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -26,7 +26,7 @@ void SegmentEltwiseFusionPass::MatchSegmentEltwise(const CNodePtr &cnode, const
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::unordered_set<AnfNodePtr> record{cnode};
mindspore::HashSet<AnfNodePtr> record{cnode};
auto eltwise_input = cnode->input(kIndex1);
while (CheckEltWiseNode(kernel_graph, eltwise_input)) {
(void)record.insert(eltwise_input);

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_SEGMENT_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_SEGMENT_ELTWISE_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class SegmentEltwiseFusionPass : public FusionBasePass {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -27,7 +27,7 @@ void StridedReadConvStridedWriteFusionPass::MatchStridedReadConvStridedWrite(con
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::unordered_set<AnfNodePtr> record{cnode};
mindspore::HashSet<AnfNodePtr> record{cnode};
auto write_input = cnode->input(kIndex1);
if (CheckEltWiseNode(kernel_graph, write_input)) {
(void)record.insert(write_input);

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_STRIDEDREAD_CONV_STRIDEDWRITE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_STRIDEDREAD_CONV_STRIDEDWRITE_FUSION_PASS_H_
#include <unordered_set>
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -29,7 +29,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class StridedReadConvStridedWriteFusionPass : public FusionBasePass {
public:

View File

@ -16,10 +16,10 @@
#include "backend/optimizer/ascend/buffer_fusion/ub_pattern_fusion.h"
#include <vector>
#include <utility>
#include <unordered_map>
#include <map>
#include <memory>
#include <string>
#include "utils/hash_map.h"
#include "backend/kernel_compiler/tbe/tbe_kernel_compile.h"
#include "backend/kernel_compiler/tbe/tbe_utils.h"
#include "debug/anf_ir_dump.h"
@ -163,7 +163,7 @@ AnfNodePtr CreateTupleGetItem(const AnfNodePtr &buffer_fusion_kernel, session::K
return tuple_item;
}
void ReplaceInputNodeInOtherFusionScope(std::unordered_map<int64_t, BufferFusionInfo_t> *buffer_fusion_infos,
void ReplaceInputNodeInOtherFusionScope(mindspore::HashMap<int64_t, BufferFusionInfo_t> *buffer_fusion_infos,
int64_t fusion_id, const AnfNodePtr &output_item,
const AnfNodePtr &replace_item) {
for (int64_t id = fusion_id + 1; id <= SizeToLong(buffer_fusion_infos->size()); ++id) {
@ -176,7 +176,7 @@ void ReplaceInputNodeInOtherFusionScope(std::unordered_map<int64_t, BufferFusion
}
}
void ReplaceOldNode(std::unordered_map<int64_t, BufferFusionInfo_t> *buffer_fusion_infos, int64_t fusion_id,
void ReplaceOldNode(mindspore::HashMap<int64_t, BufferFusionInfo_t> *buffer_fusion_infos, int64_t fusion_id,
const AnfNodePtr &buffer_fusion_kernel, session::KernelGraph *kernel_graph) {
MS_EXCEPTION_IF_NULL(kernel_graph);
MS_EXCEPTION_IF_NULL(buffer_fusion_infos);
@ -204,7 +204,7 @@ void ReplaceOldNode(std::unordered_map<int64_t, BufferFusionInfo_t> *buffer_fusi
}
void GetFusionScopeComputeNodeList(session::KernelGraph *kernel_graph,
std::unordered_map<int64_t, BufferFusionInfo_t> *buffer_fusion_infos) {
mindspore::HashMap<int64_t, BufferFusionInfo_t> *buffer_fusion_infos) {
MS_EXCEPTION_IF_NULL(buffer_fusion_infos);
MS_EXCEPTION_IF_NULL(kernel_graph);
auto nodes = TopoSort(kernel_graph->get_return());
@ -222,7 +222,7 @@ void GetFusionScopeComputeNodeList(session::KernelGraph *kernel_graph,
}
void GetFusionScopeInputNodeList(const session::KernelGraph &kernel_graph,
std::unordered_map<int64_t, BufferFusionInfo_t> *buffer_fusion_infos) {
mindspore::HashMap<int64_t, BufferFusionInfo_t> *buffer_fusion_infos) {
MS_EXCEPTION_IF_NULL(buffer_fusion_infos);
auto manager = kernel_graph.manager();
MS_EXCEPTION_IF_NULL(manager);
@ -283,7 +283,7 @@ AnfNodePtr RemoveNodeFromUpdateState(session::KernelGraph *kernel_graph, const A
}
void GetFusionScopeOutputNodeList(session::KernelGraph *kernel_graph,
std::unordered_map<int64_t, BufferFusionInfo_t> *buffer_fusion_infos) {
mindspore::HashMap<int64_t, BufferFusionInfo_t> *buffer_fusion_infos) {
MS_EXCEPTION_IF_NULL(kernel_graph);
MS_EXCEPTION_IF_NULL(buffer_fusion_infos);
auto manager = kernel_graph->manager();
@ -349,7 +349,7 @@ void GetFusionScopeOutputNodeList(session::KernelGraph *kernel_graph,
}
void SetOutputUsedNumAttr(const session::KernelGraph &kernel_graph,
const std::unordered_map<int64_t, BufferFusionInfo_t> &buffer_fusion_infos) {
const mindspore::HashMap<int64_t, BufferFusionInfo_t> &buffer_fusion_infos) {
for (auto &fusion_info : buffer_fusion_infos) {
auto &fusion_nodes = fusion_info.second.anf_nodes;
for (auto iter = fusion_nodes.begin(); iter != fusion_nodes.end() - 1; ++iter) {
@ -408,7 +408,7 @@ bool CheckCircle(const session::KernelGraph &kernel_graph, const BufferFusionInf
}
void RemoveCircle(const session::KernelGraph &kernel_graph,
std::unordered_map<int64_t, BufferFusionInfo_t> *buffer_fusion_infos) {
mindspore::HashMap<int64_t, BufferFusionInfo_t> *buffer_fusion_infos) {
MS_EXCEPTION_IF_NULL(buffer_fusion_infos);
std::vector<int64_t> fusion_ids;
for (auto &[fusion_id, fusion_info] : *buffer_fusion_infos) {
@ -425,7 +425,7 @@ void RemoveCircle(const session::KernelGraph &kernel_graph,
} // namespace
void UbPatternFusion::GetBufferFusionInfo(session::KernelGraph *kernel_graph,
std::unordered_map<int64_t, BufferFusionInfo_t> *buffer_fusion_infos) const {
mindspore::HashMap<int64_t, BufferFusionInfo_t> *buffer_fusion_infos) const {
MS_EXCEPTION_IF_NULL(buffer_fusion_infos);
MS_EXCEPTION_IF_NULL(kernel_graph);
GetFusionScopeComputeNodeList(kernel_graph, buffer_fusion_infos);
@ -449,17 +449,17 @@ void UbPatternFusion::GetBufferFusionInfo(session::KernelGraph *kernel_graph,
bool UbPatternFusion::FuseBufferFusionPattern(session::KernelGraph *kernel_graph) const {
MS_EXCEPTION_IF_NULL(kernel_graph);
bool change = false;
std::unordered_map<int64_t, BufferFusionInfo_t> buffer_fusion_infos;
mindspore::HashMap<int64_t, BufferFusionInfo_t> buffer_fusion_infos;
GetBufferFusionInfo(kernel_graph, &buffer_fusion_infos);
std::vector<mindspore::kernel::FusionScopeInfo> fusion_scope_infos;
std::transform(
buffer_fusion_infos.begin(), buffer_fusion_infos.end(), std::back_inserter(fusion_scope_infos),
[](const std::pair<int64_t, BufferFusionInfo_t> &buffer_fusion_info) -> mindspore::kernel::FusionScopeInfo {
return mindspore::kernel::FusionScopeInfo(
buffer_fusion_info.first, buffer_fusion_info.second.full_name, buffer_fusion_info.second.inputs_list,
buffer_fusion_info.second.anf_nodes, buffer_fusion_info.second.outputs_list);
});
std::transform(buffer_fusion_infos.begin(), buffer_fusion_infos.end(), std::back_inserter(fusion_scope_infos),
[](const auto &buffer_fusion_info) -> mindspore::kernel::FusionScopeInfo {
return mindspore::kernel::FusionScopeInfo(
buffer_fusion_info.first, buffer_fusion_info.second.full_name,
buffer_fusion_info.second.inputs_list, buffer_fusion_info.second.anf_nodes,
buffer_fusion_info.second.outputs_list);
});
auto &build_manager = kernel::ascend::TbeKernelCompileManager::GetInstance();
auto id_names = build_manager.TbeFusionOpCompile(fusion_scope_infos);
std::set<int64_t> fusion_ids;
@ -486,7 +486,7 @@ bool UbPatternFusion::FuseBufferFusionPattern(session::KernelGraph *kernel_graph
return change;
}
bool UbPatternFusion::ReplaceFusionOp(std::unordered_map<int64_t, BufferFusionInfo_t> *buffer_fusion_infos,
bool UbPatternFusion::ReplaceFusionOp(mindspore::HashMap<int64_t, BufferFusionInfo_t> *buffer_fusion_infos,
int64_t fusion_id, session::KernelGraph *kernel_graph) const {
MS_EXCEPTION_IF_NULL(buffer_fusion_infos);
auto buffer_fusion_info = (*buffer_fusion_infos)[fusion_id];

View File

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,11 +15,11 @@
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_UB_PATTERN_FUSION_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_UB_PATTERN_FUSION_H_
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <string>
#include <map>
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pass.h"
@ -30,7 +30,7 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<std::unordered_set<AnfNodePtr>>;
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class UbPatternFusion : public PassWithSwitch {
public:
@ -41,8 +41,8 @@ class UbPatternFusion : public PassWithSwitch {
bool RunPass(const FuncGraphPtr &graph) override;
void GetBufferFusionInfo(session::KernelGraph *kernel_graph,
std::unordered_map<int64_t, BufferFusionInfo_t> *buffer_fusion_infos) const;
bool ReplaceFusionOp(std::unordered_map<int64_t, BufferFusionInfo_t> *buffer_fusion_infos, int64_t fusion_id,
mindspore::HashMap<int64_t, BufferFusionInfo_t> *buffer_fusion_infos) const;
bool ReplaceFusionOp(mindspore::HashMap<int64_t, BufferFusionInfo_t> *buffer_fusion_infos, int64_t fusion_id,
session::KernelGraph *kernel_graph) const;
bool FuseBufferFusionPattern(session::KernelGraph *kernel_graph) const;
};

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -17,10 +17,10 @@
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "utils/hash_map.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/optimizer/common/helper.h"
namespace mindspore {
@ -79,7 +79,7 @@ void ConvertCastFormat::ChangeCastFormat(const CNodePtr &cast_node, const FuncGr
AnfAlgo::SetNodeAttr(kAttrVisited, MakeValue(true), cast_node);
auto used_cast_node_list = GetRealNodeUsedList(func_graph, cast_node);
MS_EXCEPTION_IF_NULL(used_cast_node_list);
std::unordered_map<string, size_t> format_counter = CalculateFormat(used_cast_node_list, cast_node);
mindspore::HashMap<string, size_t> format_counter = CalculateFormat(used_cast_node_list, cast_node);
auto cast_input_format = AnfAlgo::GetPrevNodeOutputFormat(cast_node, 0);
string convert_format = kOpFormat_DEFAULT;
if (cast_input_format == kOpFormat_DEFAULT) {
@ -109,12 +109,12 @@ void ConvertCastFormat::ChangeCastFormat(const CNodePtr &cast_node, const FuncGr
}
}
std::unordered_map<string, size_t> ConvertCastFormat::CalculateFormat(
mindspore::HashMap<string, size_t> ConvertCastFormat::CalculateFormat(
const std::shared_ptr<std::vector<std::pair<AnfNodePtr, int>>> &used_cast_node_list,
const CNodePtr &cast_node) const {
MS_EXCEPTION_IF_NULL(used_cast_node_list);
MS_EXCEPTION_IF_NULL(cast_node);
std::unordered_map<string, size_t> format_counter;
mindspore::HashMap<string, size_t> format_counter;
for (const auto &node_info : *used_cast_node_list) {
MS_EXCEPTION_IF_NULL(node_info.first);
auto cast_out_node = node_info.first->cast<CNodePtr>();

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -17,11 +17,11 @@
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_FORMAT_TYPE_CONVERT_CAST_FORMAT_H_
#include <string>
#include <unordered_map>
#include <utility>
#include <memory>
#include <vector>
#include "utils/hash_map.h"
#include "backend/optimizer/common/optimizer.h"
namespace mindspore {
@ -34,7 +34,7 @@ class ConvertCastFormat : public PatternProcessPass {
const AnfNodePtr Process(const FuncGraphPtr &func_graph, const AnfNodePtr &, const EquivPtr &) const override;
private:
std::unordered_map<string, size_t> CalculateFormat(
mindspore::HashMap<string, size_t> CalculateFormat(
const std::shared_ptr<std::vector<std::pair<AnfNodePtr, int>>> &used_cast_node_list,
const CNodePtr &cast_node) const;
void ChangeCastFormat(const CNodePtr &cast_node, const FuncGraphPtr &func_graph) const;

View File

@ -17,7 +17,7 @@
#include <string>
#include <vector>
#include <memory>
#include <unordered_set>
#include "utils/hash_set.h"
#include "backend/optimizer/common/helper.h"
#include "backend/kernel_compiler/kernel_build_info.h"
#include "utils/utils.h"
@ -164,7 +164,7 @@ const AnfNodePtr TopKSplit::Process(const FuncGraphPtr &func_graph, const AnfNod
auto new_value_node = std::make_shared<ValueNode>(MakeValue(*data));
new_cnode->set_input(kTopkIndexK + 1, new_value_node);
std::unordered_set<size_t> attr_index{kTopkIndexK};
mindspore::HashSet<size_t> attr_index{kTopkIndexK};
ConstInputToAttr(new_cnode, attr_index);
auto indices_const = CreateValueNode();
new_cnode->add_input(indices_const);

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -95,7 +95,7 @@ InputToOutputRegistry &InputToOutputRegistry::Instance() {
void InputToOutputRegistry::Register(const InputToOutputRegister &reg) {
auto op_name = reg.op_name();
if (op_input_to_output_map_.find(op_name) == op_input_to_output_map_.end()) {
(void)op_input_to_output_map_.insert(make_pair(op_name, reg));
(void)op_input_to_output_map_.emplace(op_name, reg);
MS_LOG(DEBUG) << op_name << " input2output register successfully!";
}
}
@ -105,7 +105,7 @@ void InputToOutputRegistry::Register(const std::string &op_name, const std::vect
if (op_input_to_output_map_.find(op_name) == op_input_to_output_map_.end()) {
InputToOutputRegister reg(op_name, pre_check_func);
reg.set_input_indices(input_indices);
(void)op_input_to_output_map_.insert(make_pair(op_name, reg));
(void)op_input_to_output_map_.emplace(op_name, reg);
MS_LOG(DEBUG) << op_name << " input2output register successfully!";
}
}

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_IR_FUSION_INPUT_TO_OUTPUT_REGISTRY_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_IR_FUSION_INPUT_TO_OUTPUT_REGISTRY_H_
#include <string>
#include <unordered_map>
#include <vector>
#include <utility>
#include "utils/hash_map.h"
#include "ir/anf.h"
#include "utils/ms_utils.h"
@ -56,7 +56,7 @@ class InputToOutputRegistry {
InputToOutputRegistry();
~InputToOutputRegistry() = default;
DISABLE_COPY_AND_ASSIGN(InputToOutputRegistry)
std::unordered_map<std::string, InputToOutputRegister> op_input_to_output_map_;
mindspore::HashMap<std::string, InputToOutputRegister> op_input_to_output_map_;
};
} // namespace opt
} // namespace mindspore

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -20,8 +20,8 @@
#include <vector>
#include <string>
#include <utility>
#include <unordered_map>
#include <memory>
#include "utils/hash_map.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pattern_engine.h"
#include "backend/optimizer/common/helper.h"

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -20,8 +20,8 @@
#include <vector>
#include <string>
#include <utility>
#include <unordered_map>
#include <memory>
#include "utils/hash_map.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pattern_engine.h"
#include "backend/optimizer/common/helper.h"

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -94,17 +94,17 @@ ConstInputToAttrInfoRegistry &ConstInputToAttrInfoRegistry::Instance() {
void ConstInputToAttrInfoRegistry::Register(const ConstInputToAttrInfoRegister &reg) {
auto op_name = reg.GetOpName();
if (op_input_to_attr_map_.find(op_name) == op_input_to_attr_map_.end()) {
(void)op_input_to_attr_map_.insert(make_pair(op_name, reg));
(void)op_input_to_attr_map_.emplace(op_name, reg);
MS_LOG(DEBUG) << op_name << " const2attr register successfully!";
}
}
void ConstInputToAttrInfoRegistry::Register(const std::string &op_name,
const std::unordered_set<size_t> &input_attr_set) {
const mindspore::HashSet<size_t> &input_attr_set) {
if (op_input_to_attr_map_.find(op_name) == op_input_to_attr_map_.end()) {
ConstInputToAttrInfoRegister reg(op_name);
(void)reg.SetConstInputToAttr(input_attr_set);
(void)op_input_to_attr_map_.insert(make_pair(op_name, reg));
(void)op_input_to_attr_map_.emplace(op_name, reg);
MS_LOG(DEBUG) << op_name << " const2attr register successfully!";
}
}

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_CONST_INPUT_TO_ATTR_REGISTRY_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_CONST_INPUT_TO_ATTR_REGISTRY_H_
#include <string>
#include <unordered_map>
#include <unordered_set>
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "utils/ms_utils.h"
namespace mindspore {
@ -33,31 +33,31 @@ class ConstInputToAttrInfoRegister {
return *this;
}
ConstInputToAttrInfoRegister &SetConstInputToAttr(const std::unordered_set<size_t> &input_attr_set) {
ConstInputToAttrInfoRegister &SetConstInputToAttr(const mindspore::HashSet<size_t> &input_attr_set) {
(void)input_attr_set_.insert(input_attr_set.begin(), input_attr_set.end());
return *this;
}
const std::unordered_set<size_t> &GetConstInputAttrInfo() const { return input_attr_set_; }
const mindspore::HashSet<size_t> &GetConstInputAttrInfo() const { return input_attr_set_; }
const std::string &GetOpName() const { return op_name_; }
private:
std::string op_name_;
std::unordered_set<size_t> input_attr_set_;
mindspore::HashSet<size_t> input_attr_set_;
};
class ConstInputToAttrInfoRegistry {
public:
static ConstInputToAttrInfoRegistry &Instance();
void Register(const ConstInputToAttrInfoRegister &reg);
void Register(const std::string &op_name, const std::unordered_set<size_t> &input_attr_set);
void Register(const std::string &op_name, const mindspore::HashSet<size_t> &input_attr_set);
bool GetRegisterByOpName(const std::string &op_name, ConstInputToAttrInfoRegister *reg) const;
private:
ConstInputToAttrInfoRegistry();
~ConstInputToAttrInfoRegistry() = default;
DISABLE_COPY_AND_ASSIGN(ConstInputToAttrInfoRegistry)
std::unordered_map<std::string, ConstInputToAttrInfoRegister> op_input_to_attr_map_;
mindspore::HashMap<std::string, ConstInputToAttrInfoRegister> op_input_to_attr_map_;
};
struct ConstInputToAttrInfoReceiver {

View File

@ -17,11 +17,11 @@
#include "backend/optimizer/common/helper.h"
#include <string>
#include <utility>
#include <unordered_set>
#include <algorithm>
#include <map>
#include <set>
#include <deque>
#include "utils/hash_set.h"
#include "utils/utils.h"
#include "base/base_ref.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -74,7 +74,7 @@ bool IsDepend(const FuncGraph &graph, const AnfNodePtr &node, const std::vector<
FuncGraphManagerPtr manager = graph.manager();
MS_EXCEPTION_IF_NULL(manager);
std::unordered_set<AnfNodePtr> seen_node;
mindspore::HashSet<AnfNodePtr> seen_node;
std::deque<AnfNodePtr> todo{node};
while (!todo.empty()) {
AnfNodePtr nd = todo.front();
@ -299,7 +299,7 @@ bool IsNopNode(const AnfNodePtr &node) {
return false;
}
static std::unordered_set<std::string> nop_nodes = {prim::kPrimReshape->name(), kExpandDimsOpName,
static mindspore::HashSet<std::string> nop_nodes = {prim::kPrimReshape->name(), kExpandDimsOpName,
prim::kPrimSqueeze->name(), prim::kPrimFlatten->name(),
kFlattenGradOpName, prim::kPrimReformat->name()};
if (node == nullptr || !node->isa<CNode>()) {
@ -580,7 +580,7 @@ ValueNodePtr CreateShapeValueNode(const FuncGraphPtr &func_graph, const std::vec
return shape_value_node;
}
void ConstInputToAttr(const CNodePtr &cnode, const std::unordered_set<size_t> &input_attrs) {
void ConstInputToAttr(const CNodePtr &cnode, const mindspore::HashSet<size_t> &input_attrs) {
MS_EXCEPTION_IF_NULL(cnode);
std::vector<AnfNodePtr> new_inputs;
auto primitive = AnfAlgo::GetCNodePrimitive(cnode);
@ -1074,7 +1074,7 @@ int64_t GetNodeOutputTotalUsedNum(const session::KernelGraph &kernel_graph, cons
return std::accumulate(output_used_num.begin(), output_used_num.end(), int64_t(0));
}
void GetCustomOpAttrIndex(const PrimitivePtr &primitive, std::unordered_set<size_t> *indexes) {
void GetCustomOpAttrIndex(const PrimitivePtr &primitive, mindspore::HashSet<size_t> *indexes) {
if (primitive == nullptr || primitive->name() != prim::kPrimCustom->name()) {
return;
}

View File

@ -21,7 +21,7 @@
#include <utility>
#include <string>
#include <set>
#include <unordered_set>
#include "utils/hash_set.h"
#include "ir/func_graph.h"
#include "backend/session/kernel_graph.h"
#include "utils/ms_utils.h"
@ -194,7 +194,7 @@ std::shared_ptr<std::vector<std::pair<AnfNodePtr, int>>> GetRealNodeUsedListByOu
size_t output_index);
bool IsNotRealUsedByOthers(const FuncGraphPtr &graph, const AnfNodePtr &node);
void ConstInputToAttr(const CNodePtr &cnode, const std::unordered_set<size_t> &input_attrs);
void ConstInputToAttr(const CNodePtr &cnode, const mindspore::HashSet<size_t> &input_attrs);
bool AnfEqual(const BaseRef &a, const BaseRef &b);
@ -236,7 +236,7 @@ std::vector<int64_t> GetNodeOutputUsedNum(const session::KernelGraph &kernel_gra
int64_t GetNodeOutputTotalUsedNum(const session::KernelGraph &kernel_graph, const AnfNodePtr &node);
// Get custom operator attr input indexes
void GetCustomOpAttrIndex(const PrimitivePtr &primitive, std::unordered_set<size_t> *indexes);
void GetCustomOpAttrIndex(const PrimitivePtr &primitive, mindspore::HashSet<size_t> *indexes);
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_HELPER_H_

View File

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,12 +15,12 @@
*/
#include "backend/optimizer/common/node_pass.h"
#include <unordered_set>
#include <unordered_map>
#include <deque>
#include "ir/anf.h"
#include "ir/func_graph.h"
#include "ir/manager.h"
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "backend/session/anf_runtime_algorithm.h"
namespace mindspore {
@ -29,7 +29,7 @@ const size_t kSwitchBranchIndex = 2;
const size_t kCallArgsIndex = 1;
const size_t kPartialArgsIndex = 1;
void AddOutputAndCallerToMap(const CNodePtr &cnode, std::unordered_map<AnfNodePtr, AnfNodePtr> *out_caller_map) {
void AddOutputAndCallerToMap(const CNodePtr &cnode, mindspore::HashMap<AnfNodePtr, AnfNodePtr> *out_caller_map) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(out_caller_map);
auto inputs = cnode->inputs();
@ -56,8 +56,8 @@ bool NodePass::Run(const FuncGraphPtr &func_graph) {
MS_EXCEPTION_IF_NULL(manager);
manager->AddFuncGraph(func_graph);
std::unordered_map<AnfNodePtr, AnfNodePtr> subgraph_out_caller_map = {};
std::unordered_set<AnfNodePtr> seen_node;
mindspore::HashMap<AnfNodePtr, AnfNodePtr> subgraph_out_caller_map = {};
mindspore::HashSet<AnfNodePtr> seen_node;
std::deque<std::pair<AnfNodePtr, FuncGraphPtr>> todo{{func_graph->output(), func_graph}};
bool changes = false;
while (!todo.empty()) {

View File

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -19,8 +19,8 @@
#include <memory>
#include <string>
#include <vector>
#include <unordered_map>
#include "utils/hash_map.h"
#include "ir/anf.h"
#include "ir/func_graph.h"
#include "ir/primitive.h"

View File

@ -1,7 +1,7 @@
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,8 +23,6 @@
#include <sstream>
#include <memory>
#include <vector>
#include <unordered_set>
#include <unordered_map>
#include <initializer_list>
#include <iostream>
#include <algorithm>
@ -33,6 +31,8 @@
#include <list>
#include <utility>
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "backend/optimizer/common/visit.h"
#include "base/base.h"
#include "utils/log_adapter.h"
@ -158,7 +158,7 @@ std::ostream &operator<<(std::ostream &os, const VarPtr &var);
using Equiv = std::map<VarPtr, BaseRef>;
using EquivPtr = std::shared_ptr<Equiv>;
using PrimitiveVarMap = std::unordered_map<PrimitivePtr, VarPtr>;
using PrimitiveVarMap = mindspore::HashMap<PrimitivePtr, VarPtr>;
using PrimitiveVarMapPtr = std::shared_ptr<PrimitiveVarMap>;
inline bool DefaultTypeEq(const BaseRef &x, const BaseRef &y) { return x.type() == y.type(); }

View File

@ -1,7 +1,7 @@
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -19,13 +19,13 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_VISIT_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_VISIT_H_
#include <unordered_map>
#include <stdexcept>
#include <list>
#include <vector>
#include <string>
#include <memory>
#include "utils/hash_map.h"
#include "base/base.h"
#include "base/base_ref.h"

View File

@ -16,12 +16,12 @@
#include "backend/optimizer/cpu/insert_format_transform_op.h"
#include <unordered_set>
#include <numeric>
#include <memory>
#include <string>
#include <vector>
#include <utility>
#include "utils/hash_set.h"
#include "backend/kernel_compiler/kernel_build_info.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/session/kernel_graph.h"
@ -172,7 +172,7 @@ void InsertTransformOpForOutput(const FuncGraphPtr &graph, const AnfNodePtr &nod
}
} // namespace
const std::unordered_set<std::string> kChannelLastKernel = {prim::kPrimBiasAdd->name()};
const mindspore::HashSet<std::string> kChannelLastKernel = {prim::kPrimBiasAdd->name()};
bool InsertFormatTransformOpCPU::Run(const FuncGraphPtr &graph) {
MS_EXCEPTION_IF_NULL(graph);

View File

@ -16,9 +16,9 @@
#include "backend/optimizer/gpu/adjust_depend_for_parallel_optimizer_recompute_all_gather_fusion.h"
#include <unordered_map>
#include <algorithm>
#include "utils/hash_map.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "utils/utils.h"
@ -26,7 +26,7 @@ namespace mindspore {
namespace opt {
bool AdjustDependForParallelOptimizerRecomputeAllGatherFusion::Run(const FuncGraphPtr &graph) {
MS_EXCEPTION_IF_NULL(graph);
std::unordered_map<int64_t, bool> forward_allgather_recompute_value_in_fusion_group;
mindspore::HashMap<int64_t, bool> forward_allgather_recompute_value_in_fusion_group;
std::vector<AnfNodePtr> node_list = TopoSort(graph->get_return());
std::vector<int64_t> parallel_optimizer_recompute_allgather_fusion_ids;
std::vector<AnfNodePtr> parallel_optimizer_recompute_allgathers;

View File

@ -18,8 +18,8 @@
#include <memory>
#include <string>
#include <vector>
#include <unordered_set>
#include "utils/hash_set.h"
#include "backend/optimizer/common/helper.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "ir/primitive.h"
@ -27,7 +27,7 @@
namespace mindspore {
namespace opt {
const std::unordered_set<std::string> kConv3DKernel = {prim::kPrimConv3DBackpropInput->name(),
const mindspore::HashSet<std::string> kConv3DKernel = {prim::kPrimConv3DBackpropInput->name(),
prim::kPrimConv3DBackpropFilter->name(),
prim::kPrimConv3D->name(), prim::kPrimConv3DTranspose->name()};

View File

@ -18,12 +18,12 @@
#include <algorithm>
#include <list>
#include <string>
#include <unordered_set>
#include <functional>
#include <set>
#include <vector>
#include <unordered_map>
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "backend/optimizer/graph_kernel/graph_kernel_helper.h"
#include "backend/optimizer/graph_kernel/core/graph_builder.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -32,7 +32,7 @@
namespace mindspore::graphkernel {
// operator which follows commutative rules
static std::unordered_set<std::string> commutative_ops{"Add", "Mul"};
static mindspore::HashSet<std::string> commutative_ops{"Add", "Mul"};
class PatternNode;
using PatternNodePtr = std::shared_ptr<PatternNode>;
@ -51,8 +51,8 @@ class PatternNode {
std::vector<PatternNodePtr> inputs_;
};
using ParaMap = std::unordered_map<char, inner::NodePtr>;
using ConstMap = std::unordered_map<std::string, inner::NodePtr>;
using ParaMap = mindspore::HashMap<char, inner::NodePtr>;
using ConstMap = mindspore::HashMap<std::string, inner::NodePtr>;
/* This class works to store a kind of pattern tree; it needs a string expression to construct;
Ex."Pow(Exp(A),B)=Exp(Mul(A,B))"
@ -86,10 +86,10 @@ class PatternTree {
protected:
// set attributes for certain pattern node if needed;
virtual std::unordered_map<PatternNodePtr, inner::DAttrs> SetAttributes(const inner::NodePtr &) {
virtual mindspore::HashMap<PatternNodePtr, inner::DAttrs> SetAttributes(const inner::NodePtr &) {
auto right_pattern = std::make_shared<PatternNodePtrList>();
DfsTraverse(right_pattern, rhs_root_);
std::unordered_map<PatternNodePtr, inner::DAttrs> attrs_map;
mindspore::HashMap<PatternNodePtr, inner::DAttrs> attrs_map;
for (auto &i : (*right_pattern)) {
attrs_map[i] = {};
}
@ -319,7 +319,7 @@ inner::NodePtr PatternTree::AlterGraph(const std::shared_ptr<ParaMap> &para_to_r
DfsTraverse(res, rhs_root_);
auto all_attrs = SetAttributes(origin_root);
inner::LiteGraph::GraphBuilder gb("");
std::unordered_map<PatternNodePtr, inner::NodePtr> pattern_to_ref;
mindspore::HashMap<PatternNodePtr, inner::NodePtr> pattern_to_ref;
for (auto &n : (*res)) {
if (PatternNodeType(n->op()) != inner::NType::Primitive) continue;
inner::NodePtrList inputs;
@ -367,7 +367,7 @@ class ExtraReduce1PatternTree : public PatternTree {
return (GetValue<bool>((origin_root->inputs()[0])->attrs().find("keep_dims")->second) ==
GetValue<bool>(origin_root->attrs().find("keep_dims")->second));
}
std::unordered_map<PatternNodePtr, inner::DAttrs> SetAttributes(const inner::NodePtr &origin_root) override {
mindspore::HashMap<PatternNodePtr, inner::DAttrs> SetAttributes(const inner::NodePtr &origin_root) override {
auto attrs_map = PatternTree::SetAttributes(origin_root);
std::vector<int64_t> axis;
std::set<int64_t> axis_set;
@ -384,7 +384,7 @@ class ExtraReduce1PatternTree : public PatternTree {
auto first_axis = GetValue<std::vector<int64_t>>(first_reduce->attrs().find("axis")->second);
auto second_axis = GetValue<std::vector<int64_t>>(origin_root->attrs().find("axis")->second);
std::set<int64_t> st(first_axis.begin(), first_axis.end());
std::unordered_map<int64_t, int64_t> mp;
mindspore::HashMap<int64_t, int64_t> mp;
int64_t shift = 0;
for (int64_t n = 0; n < SizeToLong(first_reduce->inputs()[0]->shape.size()); n++) {
if (st.find(n) != st.end()) {
@ -409,7 +409,7 @@ class ExtraReduce2PatternTree : public PatternTree {
~ExtraReduce2PatternTree() = default;
protected:
std::unordered_map<PatternNodePtr, inner::DAttrs> SetAttributes(const inner::NodePtr &origin_root) override {
mindspore::HashMap<PatternNodePtr, inner::DAttrs> SetAttributes(const inner::NodePtr &origin_root) override {
auto attrs_map = PatternTree::SetAttributes(origin_root);
bool keep_dims = GetValue<bool>(origin_root->attrs().find("keep_dims")->second);
auto axis = GetValue<std::vector<int64_t>>(origin_root->attrs().find("axis")->second);
@ -427,7 +427,7 @@ class ExtraReduce2PatternTree : public PatternTree {
this case.
*/
bool OutsideRely(const inner::NodePtrList &nodes, const inner::NodePtr &root) {
std::unordered_set<inner::Node *> nodes_can_simplify;
mindspore::HashSet<inner::Node *> nodes_can_simplify;
std::for_each(nodes.begin(), nodes.end(), [&nodes_can_simplify](auto n) { nodes_can_simplify.insert(n.get()); });
for (auto &n : nodes) {
if (n == root) {
@ -526,12 +526,12 @@ static std::vector<Expression> expressions = {
{62, "CImag(Complex(A,B))=B", EXPR_PATTERN(PatternTree)},
};
std::unordered_map<std::string, std::vector<PatternTreePtr>> GetExpressions() {
mindspore::HashMap<std::string, std::vector<PatternTreePtr>> GetExpressions() {
const auto &flags = GraphKernelFlags::GetInstance();
std::unordered_map<std::string, std::vector<PatternTreePtr>> expression_map;
std::unordered_set<std::string> enable_ids{flags.enable_simplify_exprs_only.begin(),
mindspore::HashMap<std::string, std::vector<PatternTreePtr>> expression_map;
mindspore::HashSet<std::string> enable_ids{flags.enable_simplify_exprs_only.begin(),
flags.enable_simplify_exprs_only.end()};
std::unordered_set<std::string> disable_ids{flags.disable_simplify_exprs.begin(), flags.disable_simplify_exprs.end()};
mindspore::HashSet<std::string> disable_ids{flags.disable_simplify_exprs.begin(), flags.disable_simplify_exprs.end()};
for (auto &e : expressions) {
if (!enable_ids.empty()) {
if (enable_ids.count(std::to_string(e.id)) == 0) continue;

View File

@ -18,9 +18,9 @@
#include <memory>
#include <vector>
#include <unordered_map>
#include <string>
#include "utils/hash_map.h"
#include "backend/optimizer/common/optimizer.h"
#include "ir/func_graph.h"
#include "backend/optimizer/graph_kernel/model/lite_graph.h"
@ -37,7 +37,7 @@ class ArithmeticSimplify : public opt::Pass {
private:
bool DoArithmeticTrans(const inner::LiteGraphPtr &litegraph);
bool DoConstantFold(const inner::LiteGraphPtr &litegraph);
std::unordered_map<std::string, std::vector<PatternTreePtr>> expressions_map_;
mindspore::HashMap<std::string, std::vector<PatternTreePtr>> expressions_map_;
};
using ArithmeticSimplifyPtr = std::shared_ptr<ArithmeticSimplify>;
} // namespace mindspore::graphkernel

View File

@ -16,11 +16,11 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_EXPANDERS_EXPANDER_FACTORY_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_EXPANDERS_EXPANDER_FACTORY_H_
#include <unordered_map>
#include <functional>
#include <string>
#include <memory>
#include "utils/hash_map.h"
#include "backend/optimizer/graph_kernel/expanders/utils.h"
namespace mindspore::graphkernel::expanders {
@ -44,7 +44,7 @@ class OpExpanderFactory {
void Register(const std::string &op, const RegFunc &func) { creators[op] = func; }
private:
std::unordered_map<std::string, RegFunc> creators;
mindspore::HashMap<std::string, RegFunc> creators;
};
class OpExpanderRegister {

View File

@ -21,6 +21,7 @@
#include <fstream>
#include <string>
#include "utils/hash_map.h"
#include "base/core_ops.h"
#include "ir/graph_utils.h"
#include "utils/anf_utils.h"
@ -137,7 +138,7 @@ class Graph {
size_t seed_{0}; // visited flag of dfs.
size_t max_node_id_; // largest node id of a cluster
Cluster(size_t node_id, const AnfNodePtr &node, const std::unordered_map<AnfNodePtr, size_t> &node_idx_map)
Cluster(size_t node_id, const AnfNodePtr &node, const mindspore::HashMap<AnfNodePtr, size_t> &node_idx_map)
: cluster_id_(node_id), max_node_id_(node_id) {
auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
@ -170,7 +171,7 @@ class Graph {
public:
// Init and build graph
Graph(const AnfNodePtrList &nodes, const std::unordered_map<AnfNodePtr, size_t> &node_idx_map) {
Graph(const AnfNodePtrList &nodes, const mindspore::HashMap<AnfNodePtr, size_t> &node_idx_map) {
clusters_.reserve(nodes.size());
for (size_t i = 0; i < nodes.size(); i++) {
(void)clusters_.emplace_back(i, nodes[i], node_idx_map);

View File

@ -17,9 +17,10 @@
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_GRAPH_KERNEL_CLUSTER_H_
#include <vector>
#include <unordered_map>
#include <memory>
#include <sstream>
#include "utils/hash_map.h"
#include "ir/anf.h"
#include "backend/optimizer/common/optimizer.h"
@ -51,7 +52,7 @@ class GraphKernelCluster : public opt::Pass {
GraphPtr graph_{nullptr};
std::vector<AnfNodePtr> nodes_;
std::unordered_map<AnfNodePtr, size_t> node_idx_map_;
mindspore::HashMap<AnfNodePtr, size_t> node_idx_map_;
std::stringstream dump_buf_;
std::vector<PrimitivePtr> op_list_;
};

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -51,17 +51,16 @@ bool IsCNodePrimitveEqual(const CNodePtr &main, const CNodePtr &node, const std:
return false;
}
auto all = std::all_of(main_attrs.begin(), main_attrs.end(),
[&node_attrs](const std::pair<std::string, ValuePtr> &item) -> bool {
if (item.second == nullptr) {
return false;
}
auto iter = node_attrs.find(item.first);
if (iter == node_attrs.end()) {
return false;
}
return *item.second == *iter->second;
});
auto all = std::all_of(main_attrs.begin(), main_attrs.end(), [&node_attrs](const auto &item) -> bool {
if (item.second == nullptr) {
return false;
}
auto iter = node_attrs.find(item.first);
if (iter == node_attrs.end()) {
return false;
}
return *item.second == *iter->second;
});
return all;
}

View File

@ -19,9 +19,9 @@
#include <map>
#include <set>
#include <tuple>
#include <unordered_set>
#include <utility>
#include "utils/hash_set.h"
#include "backend/kernel_compiler/common_utils.h"
#include "backend/kernel_compiler/akg/akg_kernel_json_generator.h"
#include "backend/kernel_compiler/akg/akg_kernel_json_decoder.h"

View File

@ -21,9 +21,9 @@
#include <set>
#include <string>
#include <tuple>
#include <unordered_set>
#include <utility>
#include <vector>
#include "utils/hash_set.h"
#include "ir/anf.h"
#include "ir/func_graph.h"
#include "ir/primitive.h"

View File

@ -17,11 +17,11 @@
#include <algorithm>
#include <vector>
#include <string>
#include <unordered_set>
#include <utility>
#include <queue>
#include <map>
#include <unordered_map>
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "frontend/optimizer/irpass.h"
#include "pipeline/jit/parse/python_adapter.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -178,7 +178,7 @@ bool SplitNodesDecoder::DecodeSplitNodes(const nlohmann::json &kernel_json,
namespace {
void TraverseFuncGraphFromCNode(const CNodePtr &cnode, const std::function<void(AnfNodePtr &)> &callback) {
std::unordered_set<AnfNodePtr> visited;
mindspore::HashSet<AnfNodePtr> visited;
std::queue<AnfNodePtr> que;
que.push(cnode);
visited.insert(cnode);
@ -219,8 +219,8 @@ class Area {
~Area() = default;
// Set the external inputs of spy as a Parameter.
void CreateParameters(const FuncGraphPtr &func_graph, std::unordered_map<ParameterPtr, AnfNodePtr> *param_node_map) {
std::unordered_map<AnfNodePtr, ParameterPtr> node_param_map;
void CreateParameters(const FuncGraphPtr &func_graph, mindspore::HashMap<ParameterPtr, AnfNodePtr> *param_node_map) {
mindspore::HashMap<AnfNodePtr, ParameterPtr> node_param_map;
for (auto node : this->spy_cnodes_) {
auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
@ -232,7 +232,7 @@ class Area {
auto new_param = std::make_shared<Parameter>(func_graph);
new_param->set_abstract(in_node->abstract());
func_graph->add_parameter(new_param);
node_param_map.insert(std::make_pair(in_node, new_param));
(void)node_param_map.emplace(in_node, new_param);
cnode->set_input(i, new_param);
} else {
cnode->set_input(i, it->second);
@ -241,13 +241,13 @@ class Area {
}
this->spy_cnodes_.clear(); // spy list is not useful anymore
for (auto &&elem : node_param_map) {
param_node_map->insert(std::make_pair(elem.second, elem.first));
(void)param_node_map->emplace(elem.second, elem.first);
}
return;
}
// Make a return node for traitor nodes.
void CreateReturnNode(const FuncGraphPtr &func_graph, std::unordered_map<AnfNodePtr, size_t> *tuple_node_index) {
void CreateReturnNode(const FuncGraphPtr &func_graph, mindspore::HashMap<AnfNodePtr, size_t> *tuple_node_index) {
// If there's no traitor in the area, it means that this area is the last part
// of the original FuncGraph, it already contains the original Return node.
if (traitor_nodes_.empty()) {
@ -268,7 +268,7 @@ class Area {
AbstractBasePtrList abstracts;
size_t i = 0;
for (auto &traitor : traitor_nodes_) {
tuple_node_index->insert(std::make_pair(traitor, i++));
(void)tuple_node_index->emplace(traitor, i++);
maketuple_inputs.emplace_back(traitor);
abstracts.emplace_back(traitor->abstract());
}
@ -293,7 +293,7 @@ class Area {
}
}
const std::unordered_set<AnfNodePtr> &nodes() const { return nodes_; }
const mindspore::HashSet<AnfNodePtr> &nodes() const { return nodes_; }
const std::vector<AnfNodePtr> &spy_cnodes() const { return spy_cnodes_; }
private:
@ -301,7 +301,7 @@ class Area {
bool IsExternalCNode(const AnfNodePtr &node) const { return node->isa<CNode>() && this->nodes_.count(node) == 0; }
// nodes in this area
std::unordered_set<AnfNodePtr> nodes_;
mindspore::HashSet<AnfNodePtr> nodes_;
// if a node's output is used by other Area, it's a traitor
std::vector<AnfNodePtr> traitor_nodes_;
// if a node use other Area's output, it's a spy
@ -339,7 +339,7 @@ class AreaGraph {
for (auto index : topo_order_) {
auto &current_area = areas_[index];
auto sub_func_graph = std::make_shared<FuncGraph>();
std::unordered_map<ParameterPtr, AnfNodePtr> param_node_map;
mindspore::HashMap<ParameterPtr, AnfNodePtr> param_node_map;
current_area.CreateParameters(sub_func_graph, &param_node_map);
current_area.CreateReturnNode(sub_func_graph, &node_index_in_returned_tuple_);
@ -407,7 +407,7 @@ class AreaGraph {
// Make a CNode in main graph to hold the sub_func_graph.
CNodePtr CreateMainCNode(const FuncGraphPtr &main_func_graph, const FuncGraphPtr &sub_func_graph,
const std::vector<CNodePtr> &main_cnodes,
const std::unordered_map<ParameterPtr, AnfNodePtr> &param_node_map) {
const mindspore::HashMap<ParameterPtr, AnfNodePtr> &param_node_map) {
TraceGuard guard(std::make_shared<TraceOpt>(sub_func_graph->debug_info()));
AnfNodePtrList main_cnode_inputs = {NewValueNode(sub_func_graph)};
for (const auto &param : sub_func_graph->parameters()) {
@ -452,9 +452,9 @@ class AreaGraph {
// Topological order of areas
std::vector<size_t> topo_order_;
// Map AnfNode to Area id
std::unordered_map<AnfNodePtr, size_t> node_area_map_;
mindspore::HashMap<AnfNodePtr, size_t> node_area_map_;
// Map the nodes to their index if there are multiple value in an area
std::unordered_map<AnfNodePtr, size_t> node_index_in_returned_tuple_;
mindspore::HashMap<AnfNodePtr, size_t> node_index_in_returned_tuple_;
};
class SplitSchemer {
@ -545,7 +545,8 @@ class Splitter {
if (param == nullptr) return;
auto it = this->param_to_main_graph_node_map_.find(param);
if (it != this->param_to_main_graph_node_map_.end()) {
cnode->add_input(it->second);
auto input = it->second;
cnode->add_input(input);
sub_func_graph->add_parameter(param);
// Avoid repeating parameters.
this->param_to_main_graph_node_map_.erase(it);
@ -561,7 +562,7 @@ class Splitter {
auto output = func_graph->output()->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(output);
const auto &parameters = func_graph->parameters();
std::unordered_map<AnfNodePtr, AnfNodePtr> param_input;
mindspore::HashMap<AnfNodePtr, AnfNodePtr> param_input;
for (size_t i = 0; i < parameters.size(); ++i) {
param_input[parameters[i]] = inputs[i + 1];
}
@ -590,7 +591,7 @@ class Splitter {
// For multiple output kernel, to avoid returning Parameter, the last MakeTuple was distribute to
// a new FuncGraph, just inline the last MakeTuple node.
std::vector<CNodePtr> tmp_subgraph_cnodes;
std::unordered_map<AnfNodePtr, AnfNodePtr> replace_map;
mindspore::HashMap<AnfNodePtr, AnfNodePtr> replace_map;
for (size_t i = 0; i < new_subgraph_cnodes_.size(); ++i) {
if (split_schemer_->NeedInline(cnodes_group_id[i])) {
@ -646,7 +647,7 @@ class Splitter {
// Copy all Parameter and ValueNode that the area used.
void AreaExpand(const Area &area) {
std::unordered_map<AnfNodePtr, AnfNodePtr> old_valuenode_and_param_map;
mindspore::HashMap<AnfNodePtr, AnfNodePtr> old_valuenode_and_param_map;
for (auto sub_node : area.nodes()) {
auto sub_cnode = sub_node->cast<CNodePtr>();
if (sub_cnode == nullptr) continue;
@ -682,7 +683,8 @@ class Splitter {
ParameterPtr param_c = std::make_shared<Parameter>(func);
param_c->set_name(param->name());
param_c->set_abstract(param->abstract());
param_to_main_graph_node_map_[param_c] = param_to_main_graph_node_map_[param];
auto node = param_to_main_graph_node_map_[param];
param_to_main_graph_node_map_[param_c] = node;
return param_c;
}
@ -691,7 +693,7 @@ class Splitter {
std::vector<CNodePtr> new_subgraph_cnodes_; // The cnode list that hold the new sub_func_graph
std::vector<AnfNodePtr> inlined_nodes_;
SplitSchemerPtr split_schemer_;
std::unordered_map<ParameterPtr, AnfNodePtr> param_to_main_graph_node_map_;
mindspore::HashMap<ParameterPtr, AnfNodePtr> param_to_main_graph_node_map_;
};
class CostModelSplitSchemer : public SplitSchemer {
@ -837,7 +839,8 @@ class CostModelSplitSchemer : public SplitSchemer {
MS_EXCEPTION_IF_NULL(output);
if (IsValidKernelNode(output)) {
auto group_id = node_group_[ret_node] = node_group_[output];
auto group_id = node_group_[output];
node_group_[ret_node] = group_id;
split_plan_[group_id].emplace_back(ret_node);
return;
}
@ -846,7 +849,8 @@ class CostModelSplitSchemer : public SplitSchemer {
auto group_id = split_plan_.size();
split_plan_.emplace_back(AnfNodePtrList{output, ret_node});
need_inline_.emplace_back(1);
node_group_[ret_node] = node_group_[output] = group_id;
node_group_[output] = group_id;
node_group_[ret_node] = group_id;
return;
}
}
@ -861,8 +865,9 @@ class CostModelSplitSchemer : public SplitSchemer {
for (const auto &input : cnode->inputs()) {
auto iter = node_group_.find(input);
if (iter != node_group_.end()) {
node_group_[node] = iter->second;
split_plan_[iter->second].emplace_back(node);
auto group_id = iter->second;
node_group_[node] = group_id;
split_plan_[group_id].emplace_back(node);
found = true;
break;
}
@ -885,7 +890,7 @@ class CostModelSplitSchemer : public SplitSchemer {
std::shared_ptr<FuncGraph> func_graph_;
AnfNodePtrList topo_all_nodes_;
AnfNodePtrList topo_valid_nodes_;
std::unordered_map<AnfNodePtr, size_t> node_group_;
mindspore::HashMap<AnfNodePtr, size_t> node_group_;
std::vector<int> need_inline_;
};

View File

@ -19,12 +19,12 @@
#include <algorithm>
#include <functional>
#include <map>
#include <unordered_map>
#include <set>
#include <utility>
#include <string>
#include <iostream>
#include "utils/hash_map.h"
#include "backend/optimizer/graph_kernel/model/node.h"
#include "backend/optimizer/graph_kernel/model/op_node.h"
#include "backend/optimizer/graph_kernel/model/op_register.h"
@ -52,7 +52,7 @@ std::string LiteGraph::Dump() const {
}
const NodePtrList &LiteGraph::GetOrderedNodes() {
std::unordered_map<NodePtr, size_t> outdegrees;
mindspore::HashMap<NodePtr, size_t> outdegrees;
std::function<void(NodePtr)> dfs;
std::set<NodePtr> visited;
dfs = [&dfs, &outdegrees, &visited](const NodePtr &node) {

View File

@ -19,10 +19,10 @@
#include <memory>
#include <vector>
#include <list>
#include <unordered_map>
#include <unordered_set>
#include <stack>
#include <string>
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "backend/optimizer/graph_kernel/model/node.h"
#include "backend/optimizer/graph_kernel/model/op_node.h"

View File

@ -20,14 +20,14 @@
#include <functional>
#include <sstream>
#include <vector>
#include <unordered_map>
#include <iostream>
#include <string>
#include "mindspore/core/ir/dtype/type_id.h"
#include "mindspore/core/ir/value.h"
#include "mindspore/core/ir/tensor.h"
#include "mindspore/core/utils/shape_utils.h"
#include "utils/hash_map.h"
#include "ir/dtype/type_id.h"
#include "ir/value.h"
#include "ir/tensor.h"
#include "utils/shape_utils.h"
#include "utils/utils.h"
namespace mindspore::graphkernel::inner {

View File

@ -21,13 +21,13 @@
#include <functional>
#include <sstream>
#include <vector>
#include <unordered_map>
#include <set>
#include <iostream>
#include <utility>
#include <string>
#include <stdexcept>
#include "utils/hash_map.h"
#include "mindspore/core/ir/dtype/type_id.h"
#include "mindspore/core/ir/value.h"
#include "mindspore/core/ir/tensor.h"
@ -45,7 +45,7 @@ enum class NType {
using DFormat = std::string;
using DShape = ShapeVector;
using DAttrs = std::unordered_map<std::string, ValuePtr>;
using DAttrs = mindspore::HashMap<std::string, ValuePtr>;
struct NodeBase {
DShape shape;
@ -95,13 +95,13 @@ class Node : public NodeBase, public std::enable_shared_from_this<Node> {
const DAttrs &attrs() const { return attrs_; }
const NodePtr &input(size_t i) const { return inputs_[i]; }
const NodePtrList &inputs() const { return inputs_; }
const std::unordered_map<Node *, std::set<size_t>> &users() const { return users_; }
const mindspore::HashMap<Node *, std::set<size_t>> &users() const { return users_; }
protected:
std::string name_;
DAttrs attrs_;
NodePtrList inputs_;
std::unordered_map<Node *, std::set<size_t>> users_;
mindspore::HashMap<Node *, std::set<size_t>> users_;
private:
// the nodes' users are only maintained by AddInput/SetInput.

View File

@ -22,10 +22,10 @@
#include <string>
#include <vector>
#include <functional>
#include <unordered_map>
#include <unordered_set>
#include <numeric>
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "backend/optimizer/graph_kernel/model/node.h"
namespace mindspore::graphkernel::inner {
@ -122,7 +122,7 @@ tensor::TensorPtr CalcByOperator(const NodePtrList &inputs, const std::string &o
return *static_cast<TM *>(std::static_pointer_cast<inner::ConstTensorNode>(i)->data()->data_c());
});
std::unordered_map<std::string, std::function<TM(const std::vector<TM> &)>> func_map = {
mindspore::HashMap<std::string, std::function<TM(const std::vector<TM> &)>> func_map = {
{"Add", [](const std::vector<TM> &n) { return n[0] + n[1]; }},
{"Sub", [](const std::vector<TM> &n) { return n[0] - n[1]; }},
{"Mul", [](const std::vector<TM> &n) { return n[0] * n[1]; }},

View File

@ -20,9 +20,9 @@
#include <algorithm>
#include <sstream>
#include <string>
#include <unordered_map>
#include <functional>
#include "utils/hash_map.h"
#include "backend/optimizer/graph_kernel/model/node.h"
#include "ir/dtype/type.h"

View File

@ -16,11 +16,11 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_MODEL_OP_REGISTER_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_MODEL_OP_REGISTER_H_
#include <unordered_map>
#include <functional>
#include <string>
#include <memory>
#include "utils/hash_map.h"
#include "backend/optimizer/graph_kernel/model/node.h"
namespace mindspore::graphkernel::inner {
@ -80,7 +80,7 @@ class OpRegistry {
Register("StandardNormal", OP_CREATOR(StandardNormalOp));
}
~OpRegistry() = default;
std::unordered_map<std::string, std::function<PrimOpPtr(const std::string &, const std::string &)>> creators;
mindspore::HashMap<std::string, std::function<PrimOpPtr(const std::string &, const std::string &)>> creators;
};
} // namespace mindspore::graphkernel::inner
#endif

View File

@ -18,7 +18,7 @@
#include <memory>
#include <vector>
#include <string>
#include <unordered_set>
#include "utils/hash_set.h"
#include "base/core_ops.h"
#include "utils/utils.h"
#include "utils/log_adapter.h"
@ -30,7 +30,7 @@ namespace mindspore::graphkernel {
namespace {
bool IsTypeInsensitive(const CNodePtr &node) {
// Nodes that will change the input data type will not seen as type insensitive nodes.
static std::unordered_set<PrimitivePtr> type_insensitive_op_list{
static mindspore::HashSet<PrimitivePtr> type_insensitive_op_list{
prim::kPrimTransData, prim::kPrimTranspose, prim::kPrimExpandDims, prim::kPrimReshape,
prim::kPrimSqueeze, prim::kPrimTile, prim::kPrimNeg, prim::kPrimRelu,
prim::kPrimMaximum, prim::kPrimMinimum, prim::kPrimSelect};
@ -150,7 +150,7 @@ void ReorderOps::SetTypeInsensitiveNodeInputs(const CNodePtr &node, const std::v
new_inputs->resize(0);
}
new_inputs->push_back(node->input(0));
std::unordered_set<size_t> indexes_set(indexes.begin(), indexes.end());
mindspore::HashSet<size_t> indexes_set(indexes.begin(), indexes.end());
size_t idx = 0;
for (size_t i = 1; i < node_inputs_num; ++i) {
size_t data_idx = i - 1;
@ -180,7 +180,7 @@ void ReorderOps::SetTypeInsensitiveNodeInputsInfo(const CNodePtr &node, const st
// node's inputs info at indexes change to input_at_indexes's input or output info
new_inputs_info->inputs_format.resize(0);
new_inputs_info->inputs_type.resize(0);
std::unordered_set<size_t> indexes_set(indexes.begin(), indexes.end());
mindspore::HashSet<size_t> indexes_set(indexes.begin(), indexes.end());
size_t idx = 0;
for (size_t data_idx = 0; data_idx < node_inputs_num - 1; ++data_idx) {
if (indexes_set.find(data_idx) == indexes_set.end()) {

View File

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -508,7 +508,7 @@ session::KernelWithIndex MemReuseUtil::VisitKernelWithReturnType(const AnfNodePt
auto &cache =
skip_nop_node ? visit_kernel_with_return_type_in0pos_cache_ : visit_kernel_with_return_type_in0pos_skip_nop_cache_;
std::unordered_map<AnfNodePtr, session::KernelWithIndex>::iterator tag_iter;
mindspore::HashMap<AnfNodePtr, session::KernelWithIndex>::iterator tag_iter;
if (auto iter = cache.find(node); iter == cache.end()) {
auto tmp_item =
std::pair<AnfNodePtr, session::KernelWithIndex>{node, AnfAlgo::VisitKernelWithReturnType(node, i, skip_nop_node)};

View File

@ -1,5 +1,5 @@
/**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -18,8 +18,8 @@
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_MEM_REUSE_MEM_REUSE_H_
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
#include "utils/hash_map.h"
#include "backend/optimizer/mem_reuse/kernel_refcount.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/session/kernel_graph.h"
@ -109,8 +109,8 @@ class MemReuseUtil {
bool enable_visit_kernel_cache_{false};
std::unordered_map<AnfNodePtr, session::KernelWithIndex> visit_kernel_with_return_type_in0pos_cache_;
std::unordered_map<AnfNodePtr, session::KernelWithIndex> visit_kernel_with_return_type_in0pos_skip_nop_cache_;
mindspore::HashMap<AnfNodePtr, session::KernelWithIndex> visit_kernel_with_return_type_in0pos_cache_;
mindspore::HashMap<AnfNodePtr, session::KernelWithIndex> visit_kernel_with_return_type_in0pos_skip_nop_cache_;
};
using MemReuseUtilPtr = std::shared_ptr<MemReuseUtil>;

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -370,7 +370,7 @@ void MemReuseChecker::CheckNormalIR(const session::KernelGraph *graph) {
nor_tensor_sizes_.push_back(it->GetSize());
curr_ous.push_back(it->GetPtr());
}
(void)node_ous_.insert(std::make_pair(node.get(), curr_ous));
(void)node_ous_.emplace(node.get(), curr_ous);
std::vector<const void *> curr_ins;
size_t input_num = AnfAlgo::GetInputTensorNum(node);
for (size_t i = 0; i < input_num; ++i) {
@ -390,12 +390,12 @@ void MemReuseChecker::CheckNormalIR(const session::KernelGraph *graph) {
nor_input_tensors_.push_back(device_address->GetPtr());
curr_ins.push_back(device_address->GetPtr());
}
(void)node_ins_.insert(std::make_pair(node.get(), curr_ins));
(void)node_ins_.emplace(node.get(), curr_ins);
}
size_t ou_idx = 0;
for (const auto &ou : nor_output_tensors_) {
(void)ptr_idx_.insert(std::make_pair(ou, ou_idx));
(void)ptr_refs_.insert(std::make_pair(ou, 0));
(void)ptr_idx_.emplace(ou, ou_idx);
(void)ptr_refs_.emplace(ou, 0);
ou_idx++;
}
for (const auto &in : nor_input_tensors_) {

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -17,12 +17,12 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_MEM_REUSE_MEM_SWAP_MANAGER_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_MEM_REUSE_MEM_SWAP_MANAGER_H_
#include <unordered_map>
#include <unordered_set>
#include <map>
#include <memory>
#include <vector>
#include <utility>
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "backend/optimizer/mem_reuse/mem_copy_manager.h"
using PerformPair = std::pair<float, float>;
@ -141,10 +141,10 @@ class MemSwapManager {
std::vector<CNodePtr> execution_order_;
std::vector<TensorInfo> ordered_tensors_;
std::unordered_map<void *, KernelExecutionInfo> kernel_execution_info_;
std::unordered_map<void *, std::map<size_t, PerformPair>> kernel_swap_perform_;
mindspore::HashMap<void *, KernelExecutionInfo> kernel_execution_info_;
mindspore::HashMap<void *, std::map<size_t, PerformPair>> kernel_swap_perform_;
// Key: trigger swap kernel, value: MemSwapInfoSet of kernel need to be swapped
std::unordered_map<void *, MemSwapInfoSet> mem_swap_info_map_;
mindspore::HashMap<void *, MemSwapInfoSet> mem_swap_info_map_;
std::vector<HostAddress> host_addrs_list_;
// Key: cache kernel address, value: lists of first time move pos or not

View File

@ -19,9 +19,9 @@
#include <vector>
#include <memory>
#include <utility>
#include <unordered_map>
#include <unordered_set>
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "ir/graph_utils.h"
#include "backend/optimizer/common/helper.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -31,10 +31,10 @@
namespace mindspore {
namespace opt {
namespace {
std::unordered_map<std::string, std::unordered_set<std::string>> MarkOp{
mindspore::HashMap<std::string, mindspore::HashSet<std::string>> MarkOp{
{"LSTM", {"LSTMGradWeight", "LSTMGrad", "LSTMGradData"}}};
bool CheckOP(const FuncGraphManagerPtr &manager, const AnfNodePtr &cnode, const std::unordered_set<std::string> &set) {
bool CheckOP(const FuncGraphManagerPtr &manager, const AnfNodePtr &cnode, const mindspore::HashSet<std::string> &set) {
for (const auto &node_index : manager->node_users()[cnode]) {
auto output = node_index.first;
MS_EXCEPTION_IF_NULL(output);

View File

@ -22,7 +22,7 @@ namespace mindspore {
namespace opt {
bool AdjustDependForParallelOptimizerRecomputeAllGather::Run(const FuncGraphPtr &graph) {
MS_EXCEPTION_IF_NULL(graph);
std::unordered_map<int64_t, bool> forward_allgather_recompute_value_in_fusion_group;
mindspore::HashMap<int64_t, bool> forward_allgather_recompute_value_in_fusion_group;
std::vector<AnfNodePtr> node_list = TopoSort(graph->get_return());
std::vector<int64_t> parallel_optimizer_recompute_allgather_fusion_ids;
std::vector<AnfNodePtr> parallel_optimizer_recompute_allgathers;

View File

@ -18,8 +18,8 @@
#include <vector>
#include <set>
#include <memory>
#include <unordered_map>
#include "utils/hash_map.h"
#include "ir/graph_utils.h"
#include "base/core_ops.h"
#include "runtime/device/kernel_info.h"
@ -454,7 +454,7 @@ bool CommunicationOpFusion::Run(const FuncGraphPtr &func_graph) {
const float input_grad_size_num = 0.0;
const float input_grad_time_num = 0.0;
// divide candidate fusion groups with same (group,op,fusion,dtype) attrs, fusion==0 means not fusion
std::unordered_map<std::string, CommunicationOpInfo> candidate_groups;
mindspore::HashMap<std::string, CommunicationOpInfo> candidate_groups;
std::vector<AnfNodePtr> node_list = TopoSort(func_graph->get_return());
for (auto &node : node_list) {
if (node != nullptr && node->isa<CNode>() && AnfAlgo::GetCNodeName(node) == op_name_) {

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +16,9 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_PASS_CONVERT_CONST_INPUT_TO_ATTR_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_PASS_CONVERT_CONST_INPUT_TO_ATTR_H_
#include <string>
#include <unordered_map>
#include <unordered_set>
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "ir/anf.h"
#include "backend/optimizer/common/optimizer.h"
@ -32,7 +32,7 @@ class ConvertConstInputToAttr : public PatternProcessPass {
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;
private:
std::unordered_map<std::string, std::unordered_set<size_t>> op_input_attr_map_;
mindspore::HashMap<std::string, mindspore::HashSet<size_t>> op_input_attr_map_;
};
} // namespace opt
} // namespace mindspore

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -17,8 +17,8 @@
#include <algorithm>
#include <memory>
#include <unordered_map>
#include "utils/hash_map.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/optimizer/common/helper.h"
#include "backend/session/kernel_graph.h"

View File

@ -16,8 +16,8 @@
#include "backend/optimizer/pass/custom_op_const_input_to_attr.h"
#include <memory>
#include <unordered_set>
#include "utils/hash_set.h"
#include "backend/optimizer/common/helper.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -38,7 +38,7 @@ const AnfNodePtr CustomOpConstInputToAttr::Process(const FuncGraphPtr &, const A
}
auto primitive = AnfAlgo::GetCNodePrimitive(cnode);
MS_EXCEPTION_IF_NULL(primitive);
std::unordered_set<size_t> attr_indices;
mindspore::HashSet<size_t> attr_indices;
GetCustomOpAttrIndex(primitive, &attr_indices);
if (attr_indices.empty()) {
return nullptr;

View File

@ -17,7 +17,7 @@
#include "backend/optimizer/pass/eliminate_redundant_op.h"
#include <memory>
#include <utility>
#include <unordered_map>
#include "utils/hash_map.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "utils/utils.h"
#include "backend/optimizer/common/helper.h"

View File

@ -20,7 +20,7 @@
#include <vector>
#include <string>
#include <utility>
#include <unordered_map>
#include "utils/hash_map.h"
#include "ir/anf.h"
#include "backend/optimizer/common/pattern_engine.h"
#include "backend/optimizer/common/optimizer.h"
@ -44,7 +44,7 @@ class EliminateRedundantOp : public PatternProcessPass {
const AnfNodePtr DoEliminate(const FuncGraphPtr &func_graph, const CNodePtr &cnode) const;
const AnfNodePtr ProcessMatchedNodes(const FuncGraphPtr &func_graph, const CNodePtr &cnode,
const CNodePtr &prev_cnode, std::vector<KernelWithIndex> *pass_vector) const;
std::unordered_map<std::string, RedundantOpPair> redundant_process_map_;
mindspore::HashMap<std::string, RedundantOpPair> redundant_process_map_;
};
} // namespace opt
} // namespace mindspore

View File

@ -1212,7 +1212,7 @@ bool Somas::Assign(const session::KernelGraph *graph) {
MS_EXCEPTION_IF_NULL(tensor);
if (tensor->GetSolverTensorDesc() != nullptr) {
SomasSolverTensorDescPtr pSolverTensor = tensor->GetSolverTensorDesc();
solver_tensor_desc_map_.insert(std::pair<size_t, SomasSolverTensorDescPtr>(pSolverTensor->index_, pSolverTensor));
(void)solver_tensor_desc_map_.emplace(pSolverTensor->index_, pSolverTensor);
}
}
MS_LOG(INFO) << "End Loop to create solver info";
@ -1566,7 +1566,7 @@ std::string Somas::Offline() const {
} else {
std::map<size_t, size_t> dest_infos;
for (SomasNodePtr dest_node : tensor->destinations_) {
dest_infos.insert(std::make_pair(dest_node->GetId(), dest_node->GetStream()->GetId()));
(void)dest_infos.emplace(dest_node->GetId(), dest_node->GetStream()->GetId());
}
for (auto dest_info : dest_infos) {

View File

@ -20,11 +20,11 @@
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "backend/kernel_compiler/tbe/tbe_utils.h"
#include "backend/optimizer/somas/somas_node.h"
#include "backend/optimizer/somas/somas_solver_pre.h"
@ -64,7 +64,7 @@ class Somas {
// hash id
std::string hash_id_;
// Maps
std::unordered_map<size_t, SomasTensorPtr> tensors_map_;
mindspore::HashMap<size_t, SomasTensorPtr> tensors_map_;
std::map<void *, std::vector<SomasNodePtr>> nodes_map_;
std::map<void *, vector<SomasParameterPtr>> parameters_map_;

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -17,17 +17,17 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_SOMAS_SOMAS_NODE_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_SOMAS_SOMAS_NODE_H_
#include "backend/optimizer/somas/somas_stream.h"
#include "backend/optimizer/somas/somas_tensor.h"
#include "backend/optimizer/somas/somas_parameter.h"
#include <memory>
#include <map>
#include <set>
#include <string>
#include <unordered_map>
#include <vector>
#include "utils/hash_map.h"
#include "backend/optimizer/somas/somas_stream.h"
#include "backend/optimizer/somas/somas_tensor.h"
#include "backend/optimizer/somas/somas_parameter.h"
namespace mindspore {
namespace somas {
class SomasStream;
@ -53,7 +53,7 @@ class SomasNode {
std::vector<SomasTensorPtr> workspace_tensors_;
std::map<size_t, SomasParameterPtr> input_parameters_map_;
std::unordered_map<int64_t, size_t> anc_stream_max_order_;
mindspore::HashMap<int64_t, size_t> anc_stream_max_order_;
// Constructors/Destructors
SomasNode(size_t id, NodeType type, SomasStreamPtr stream) : id_(id), stream_(stream), type_(type) {}

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -201,13 +201,12 @@ void FootPrint::addElem(BlockTensor *block, const size_t &offset) {
size_t offset1 = offset;
SomasSolverTensorDescPtr tensor = block->m_start_tensor_;
MS_LOG(DEBUG) << "Allocating block: " << tensor->index_ << " in offset: " << offset;
pair<uint32_t, size_t> sol_offset;
sol_offset.first = block->m_current_sol_;
sol_offset.second = offset;
if (block->offsets_.count(sol_offset.first))
MS_LOG(WARNING) << "Warning addElem: Offset overwritten at solution " << block->m_current_sol_ << " for block "
auto sol_id = block->m_current_sol_;
if (block->offsets_.find(sol_id) != block->offsets_.end()) {
MS_LOG(WARNING) << "Warning addElem: Offset overwritten at solution " << sol_id << " for block "
<< block->m_start_tensor_->index_;
block->offsets_.insert(sol_offset);
}
(void)block->offsets_.emplace(sol_id, offset);
while (tensor) {
tensor->offset_ = offset1;
offset1 += tensor->size_;
@ -234,14 +233,13 @@ bool FastHeuristic::Eval(vector<BlockTensor> *block_tensors_v, const std::shared
for (auto &block : *block_tensors_v) {
if (!block.m_bre_allocate_) {
offset = block.m_start_tensor_->offset_;
pair<uint32_t, size_t> aux;
aux.first = foot_print->m_solId_;
aux.second = block.m_start_tensor_->offset_;
if (block.offsets_.count(aux.first)) {
MS_LOG(WARNING) << "Warning: Offset overwritten at solution " << aux.first << " for block "
auto aux_id = foot_print->m_solId_;
auto aux_offset = block.m_start_tensor_->offset_;
if (block.offsets_.find(aux_id) != block.offsets_.end()) {
MS_LOG(WARNING) << "Warning: Offset overwritten at solution " << aux_id << " for block "
<< block.m_start_tensor_->index_;
}
block.offsets_.insert(aux);
(void)block.offsets_.emplace(aux_id, aux_offset);
continue;
}
bpushed = false;

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -27,17 +27,16 @@
#include <numeric>
#include <set>
#include <stack>
#include <unordered_map>
#include <utility>
#include <vector>
#include "utils/hash_map.h"
#include "backend/optimizer/somas/somas_solver_pre.h"
#include "utils/ms_context.h"
using std::pair;
using std::set;
using std::stack;
using std::unordered_map;
using std::vector;
namespace mindspore {
@ -85,12 +84,12 @@ class Interval {
class BlockTensor {
public:
SomasSolverTensorDescPtr m_start_tensor_;
unordered_map<uint32_t,
std::set<pair<size_t, size_t>, bool (*)(const pair<size_t, size_t> &, const pair<size_t, size_t> &)>>
mindspore::HashMap<
uint32_t, std::set<pair<size_t, size_t>, bool (*)(const pair<size_t, size_t> &, const pair<size_t, size_t> &)>>
offsets_candidates_;
uint32_t m_current_sol_;
bool m_bre_allocate_;
unordered_map<uint32_t, size_t> offsets_;
mindspore::HashMap<uint32_t, size_t> offsets_;
size_t m_size_;
BlockTensor()
: m_start_tensor_(nullptr),

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -20,15 +20,15 @@
#include <ctime>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include <map>
#include "utils/hash_map.h"
#include "backend/optimizer/somas/somas_solver_alg.h"
#include "backend/optimizer/somas/somas_solver_core.h"
#include "backend/optimizer/somas/somas_solver_pre.h"
using mindspore::HashMap;
using std::sort;
using std::unordered_map;
using std::vector;
namespace mindspore {
@ -266,7 +266,7 @@ static bool GreaterSizeGreaterConstraintsGreaterIndex(const BlockTensor &t1, con
void SomasSolverCore::SortTensors() { // need to sort the tensors for Fast Heuristic
MS_LOG(DEBUG) << "Sorting Blocks of tensor, strategy: " << sortingNames[sort_strategy_];
typedef bool (*SortingFunction)(const BlockTensor &, const BlockTensor &);
std::unordered_map<SortingType, SortingFunction> sort_map;
mindspore::HashMap<SortingType, SortingFunction> sort_map;
sort_map[kGreaterSizeSmallerIndex] = &GreaterSizeSmallerIndex;
#ifdef SOMAS_DEBUG
sort_map[kGreaterSizeGreaterIndex] = &GreaterSizeGreaterIndex;
@ -327,13 +327,13 @@ void SomasSolverCore::AppendLifelongTensors() {
MS_LOG(DEBUG) << "Appending lifelong tensors to solution";
size_t offset = upperbound_;
std::map<size_t, SomasSolverTensorDescPtr> lifelongTensors;
for (auto t_ : tensors_) {
if (t_.second->lifelong_) {
lifelongTensors.insert(t_);
for (auto &t : tensors_) {
if (t.second->lifelong_) {
(void)lifelongTensors.emplace(t.first, t.second);
}
}
for (auto t_ : lifelongTensors) {
SomasSolverTensorDescPtr pTensor = t_.second;
for (auto &t : lifelongTensors) {
auto &pTensor = t.second;
pTensor->offset_ = offset;
offset += pTensor->size_;
}

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -21,9 +21,9 @@
#include <chrono>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "utils/hash_map.h"
#include "backend/optimizer/somas/somas_solver_alg.h"
#include "backend/optimizer/somas/somas_solver_pre.h"

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -90,7 +90,7 @@ vector<TensorsDescMap> SomasSolverPre::CreateTensorsMaps(const TensorsDescMap &t
for (size_t sol = 1; sol < total_sol; sol++) {
SomasSolverTensorDesc newDesc = *(pairT.second.get());
SomasSolverTensorDescPtr newDescPtr = std::make_shared<SomasSolverTensorDesc>(newDesc);
vecTensorsMap[sol].insert(std::make_pair(pairT.first, newDescPtr));
(void)vecTensorsMap[sol].emplace(pairT.first, newDescPtr);
}
}
return vecTensorsMap;

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -25,11 +25,11 @@
#include <map>
#include <memory>
#include <stack>
#include <unordered_map>
#include <vector>
#include "utils/hash_map.h"
#include "backend/session/kernel_graph.h"
using std::unordered_map;
using mindspore::HashMap;
using std::vector;
namespace mindspore {
@ -175,7 +175,7 @@ struct SomasSolverTensorDesc {
}
};
using SomasSolverTensorDescPtr = std::shared_ptr<SomasSolverTensorDesc>;
typedef std::unordered_map<size_t, SomasSolverTensorDescPtr> TensorsDescMap;
typedef mindspore::HashMap<size_t, SomasSolverTensorDescPtr> TensorsDescMap;
class SomasSolverPre {
public:
SomasSolverPre() = default;

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -19,9 +19,9 @@
#include <memory>
#include <set>
#include <unordered_map>
#include <vector>
#include "utils/hash_map.h"
#include "backend/optimizer/somas/somas_node.h"
#include "backend/optimizer/somas/somas_solver_pre.h"
#include "backend/optimizer/somas/somas_stream.h"
@ -79,7 +79,7 @@ class SomasTensor {
std::set<SomasNodePtr> destinations_;
std::set<SomasStreamPtr> destinationStreams_;
unordered_map<SomasStreamPtr, SomasNodePtr> max_destinations_;
mindspore::HashMap<SomasStreamPtr, SomasNodePtr> max_destinations_;
// Constructors/Destructors
explicit SomasTensor(size_t id, SomasNodePtr source_node, SomasStreamPtr source_stream, size_t real_size,
@ -116,7 +116,7 @@ class SomasTensor {
private:
bool ref_overlap_;
size_t num_constraints_{0};
unordered_map<SomasStreamPtr, size_t> max_destination_id_;
mindspore::HashMap<SomasStreamPtr, size_t> max_destination_id_;
const size_t id_{0};
const SomasNodePtr source_node_;
SomasStreamPtr const source_stream_;

View File

@ -20,11 +20,11 @@
#include <vector>
#include <set>
#include <map>
#include <unordered_map>
#include <tuple>
#include <algorithm>
#include <utility>
#include <string>
#include "utils/hash_map.h"
#include "utils/ms_context.h"
#include "backend/optimizer/trt_pass/trt_converter_context.h"
#include "utils/singleton.h"
@ -105,9 +105,9 @@ CNodePtr BuildMakeTupleNode(const FuncGraphPtr root, const std::map<size_t, size
AnfNodePtrList GraphConverter::GetUsefulArguments(const AnfNodePtrList &arguments, const AnfNodePtrList &parameters,
const AnfNodePtrList &useful_parameters) {
// Present map between formal parameter and actual argument.
std::unordered_map<AnfNodePtr, AnfNodePtr> args_map;
mindspore::HashMap<AnfNodePtr, AnfNodePtr> args_map;
for (size_t i = 0; i < parameters.size(); i++) {
args_map.insert(std::make_pair(parameters[i], arguments[i]));
(void)args_map.emplace(parameters[i], arguments[i]);
}
AnfNodePtrList useful_arguments;

View File

@ -20,10 +20,10 @@
#include <vector>
#include <set>
#include <map>
#include <unordered_map>
#include <algorithm>
#include <utility>
#include <string>
#include "utils/hash_map.h"
#include "utils/ms_context.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/optimizer/trt_pass/trt_op_factory.h"
@ -61,8 +61,8 @@ bool WeightCheck(const AnfNodePtr &node) {
return true;
}
std::unordered_map<AnfNodePtr, NodeInfo> CollectNodeInfo(const FuncGraphPtr &func_graph) {
std::unordered_map<AnfNodePtr, NodeInfo> res;
mindspore::HashMap<AnfNodePtr, NodeInfo> CollectNodeInfo(const FuncGraphPtr &func_graph) {
mindspore::HashMap<AnfNodePtr, NodeInfo> res;
const std::vector<AnfNodePtr> &node_list = TopoSort(func_graph->get_return());
for (size_t i = 0; i < node_list.size(); i++) {

View File

@ -19,9 +19,9 @@
#include <memory>
#include <set>
#include <map>
#include <unordered_map>
#include <tuple>
#include <string>
#include "utils/hash_map.h"
#include "backend/optimizer/common/optimizer.h"
namespace mindspore {
@ -78,7 +78,7 @@ class GraphDependency {
std::string ToString() const;
private:
std::unordered_map<std::string, std::set<std::string>> dependencies_;
mindspore::HashMap<std::string, std::set<std::string>> dependencies_;
};
using Subgraph = std::tuple<FuncGraphPtr, AnfNodePtrList, AnfNodePtrList>;
@ -114,7 +114,7 @@ class GraphPartitioner {
bool NodeGrouping(const FuncGraphPtr &func_graph);
std::map<std::string, AnfNodePtrList> CollectSegments();
std::unordered_map<AnfNodePtr, NodeInfo> node_info_;
mindspore::HashMap<AnfNodePtr, NodeInfo> node_info_;
GraphDependency dependency_;
};
} // namespace opt

View File

@ -16,6 +16,8 @@
#include "backend/optimizer/trt_pass/trt_converter_context.h"
#include <utility>
#include <algorithm>
#include "runtime/device/gpu/trt_loader.h"
#include "backend/optimizer/trt_pass/trt_op_factory.h"
#include "backend/kernel_compiler/gpu/trt/trt_utils.h"
@ -219,7 +221,7 @@ bool TrtConverterContext::LoadLayerInput(const AnfNodePtr &node, std::vector<Lay
std::vector<AnfNodePtr> TrtConverterContext::GetGraphInputs() const {
// Get Anf-graph inputs without weights. All weights were binded to Trt-graph.
std::unordered_map<std::string, AnfNodePtr> graph_inputs;
mindspore::HashMap<std::string, AnfNodePtr> graph_inputs;
for (const auto &input_node : func_graph_->parameters()) {
if (!input_node->isa<Parameter>()) {
continue;
@ -227,7 +229,7 @@ std::vector<AnfNodePtr> TrtConverterContext::GetGraphInputs() const {
auto input = input_node->cast<ParameterPtr>();
if (!AnfAlgo::IsParameterWeight(input)) {
graph_inputs.insert(std::make_pair(input->name(), input_node));
(void)graph_inputs.emplace(input->name(), input_node);
}
}
@ -260,7 +262,7 @@ std::tuple<std::map<size_t, size_t>, std::vector<session::KernelWithIndex>> TrtC
size_t pos = name.find_first_not_of("return_output_");
size_t anf_index = atoi(name.substr(pos).c_str());
anf_trt_index_map.insert(std::make_pair(anf_index, trt_index));
(void)anf_trt_index_map.emplace(anf_index, trt_index);
trt_output_list[trt_index] = anf_output_list[anf_index];
trt_index++;
}

View File

@ -17,13 +17,13 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTITIMIZER_TRT_CONVERTER_CONTEXT_H_
#define MINDSPORE_CCSRC_BACKEND_OPTITIMIZER_TRT_CONVERTER_CONTEXT_H_
#include <unordered_map>
#include <vector>
#include <string>
#include <memory>
#include <tuple>
#include <map>
#include <NvInfer.h>
#include "utils/hash_map.h"
#include "base/base.h"
#include "ir/anf.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -90,7 +90,7 @@ class TrtConverterContext : public std::enable_shared_from_this<TrtConverterCont
std::shared_ptr<nvinfer1::ICudaEngine> engine_;
// Cache (AnfNode + output_index : ILayer output).
std::unordered_map<AnfNodePtr, std::unordered_map<size_t, LayerInput>> output_map_;
mindspore::HashMap<AnfNodePtr, mindspore::HashMap<size_t, LayerInput>> output_map_;
std::vector<std::shared_ptr<tensor::Tensor>> temp_weights_;
};
} // namespace opt

Some files were not shown because too many files have changed in this diff Show More