!18008 [Auto parallel] [Clean code] Fix some codestyle warnings on master

Merge pull request !18008 from Xiaoda/71-fix-some-codestyle-warnings-on-master
This commit is contained in:
i-robot 2021-06-09 11:52:46 +08:00 committed by Gitee
commit 63e5edee6f
10 changed files with 131 additions and 133 deletions

View File

@ -111,8 +111,8 @@ CNodeCostMap AllreduceFusion::FindCNode(const AnfNodePtr &from, uint64_t recursi
return cnode_dist;
} else {
auto cnode_dist_next = FindNextCNodes(cnode, recursive_times + 1);
for (auto &ele : cnode_dist_next) {
cnode_dist[ele.first] = cost + ele.second;
for (auto &ele_next : cnode_dist_next) {
cnode_dist[ele_next.first] = cost + ele_next.second;
}
}
} else {
@ -235,9 +235,9 @@ Status FindMirrorAndSetFusion(const AnfNodePtr &para, int64_t fusion) {
return SUCCESS;
}
if (mirror_cnodes.size() > 2) {
for (auto &mirror_cnode : mirror_cnodes) {
MS_EXCEPTION_IF_NULL(mirror_cnode);
MS_LOG(INFO) << mirror_cnode->DebugString();
for (auto &mirror_cnode_1 : mirror_cnodes) {
MS_EXCEPTION_IF_NULL(mirror_cnode_1);
MS_LOG(INFO) << mirror_cnode_1->DebugString();
}
MS_EXCEPTION_IF_NULL(para);
MS_LOG(ERROR) << para->ToString() << " FindMirror is more than 2. " << mirror_cnodes.size()

View File

@ -19,6 +19,7 @@
#include <functional>
#include "ir/anf.h"
#include "frontend/parallel/allreduce_fusion/allreduce_node.h"
#include "frontend/parallel/ops_info/ops_utils.h"
#include "utils/log_adapter.h"
namespace mindspore {
@ -118,7 +119,7 @@ std::pair<std::vector<AnfNodePtr>, double> AllreduceGraph::GetParaByParaSize(dou
double cur_para_size = 0;
double from = to;
for (auto &arnode : arnode_vec_) {
if (arnode.depend_feat_size() != max_ && arnode.depend_feat_size() >= to) {
if ((arnode.depend_feat_size() - max_ <= EPS) && arnode.depend_feat_size() >= to) {
continue;
}
if (para_size > 0 && cur_para_size >= para_size && arnode.depend_feat_size() < from) {

View File

@ -42,7 +42,9 @@ bool StepAllreduceFusion(const FuncGraphPtr &root, const opt::OptimizerPtr &opti
#if defined(_WIN32) || defined(_WIN64)
auto start_time = std::chrono::steady_clock::now();
#else
struct timeval start_time, end_time;
struct timeval start_time {
0
}, end_time{0};
(void)gettimeofday(&start_time, nullptr);
#endif
MS_LOG(INFO) << "Now entering allreduce fusion";

View File

@ -40,7 +40,7 @@ Status GetStrategy(const CostGraphPtr &graph) {
auto r_edge = node->GetAliveSuccEdges()[0];
auto n_edge = graph->EliminationOp(node);
auto elimi_op = std::make_shared<OpElimination>(n_edge, l_edge, node, r_edge);
eliminations.emplace_back(std::move(elimi_op));
(void)eliminations.emplace_back(std::move(elimi_op));
}
if (!flag) {
auto edges = graph->CheckEdgeElimination();
@ -49,7 +49,7 @@ Status GetStrategy(const CostGraphPtr &graph) {
flag = true;
auto new_edge = graph->EliminationEdges(edges);
auto elimi_edge = std::make_shared<EdgeElimination>(new_edge, edges);
eliminations.emplace_back(std::move(elimi_edge));
(void)eliminations.emplace_back(std::move(elimi_edge));
}
}
if (!flag) {
@ -60,7 +60,7 @@ Status GetStrategy(const CostGraphPtr &graph) {
auto succ_edge = merge_node->GetAliveSuccEdges()[0];
auto target_node = graph->EliminationMerge(merge_node);
auto elimi_merge = std::make_shared<MergeElimination>(merge_node, succ_edge, target_node);
eliminations.emplace_back(std::move(elimi_merge));
(void)eliminations.emplace_back(std::move(elimi_merge));
}
}
if (!flag) {
@ -71,7 +71,7 @@ Status GetStrategy(const CostGraphPtr &graph) {
auto prev_edge = contracted_node->GetAlivePrevEdges()[0];
auto target_node = graph->EliminationContract(contracted_node);
auto elimi_contract = std::make_shared<ContractElimination>(target_node, prev_edge, contracted_node);
eliminations.emplace_back(std::move(elimi_contract));
(void)eliminations.emplace_back(std::move(elimi_contract));
}
}
if (!flag) {
@ -95,7 +95,7 @@ Status GetStrategy(const CostGraphPtr &graph) {
auto right_node = l_r_edge->next_operator();
auto elimi_tri =
std::make_shared<TriangleElimination>(eliminated_node, left_edge, left_node_cpy, right_edge, right_node);
eliminations.emplace_back(std::move(elimi_tri));
(void)eliminations.emplace_back(std::move(elimi_tri));
}
}
if (!flag) {
@ -110,7 +110,7 @@ Status GetStrategy(const CostGraphPtr &graph) {
succ_nodes.push_back(succ_edges[i]->next_operator());
}
auto elimi_star = std::make_shared<StarElimination>(star_center, succ_edges, succ_nodes);
eliminations.emplace_back(std::move(elimi_star));
(void)eliminations.emplace_back(std::move(elimi_star));
}
}
}
@ -135,101 +135,99 @@ Status RecoverStrategy(std::vector<EliminationPtr> eliminations) {
const auto triangle_star_overwrite = CostModelContext::GetInstance()->triangle_star_strategy_overwrite();
for (rit = eliminations.rbegin(); rit != eliminations.rend(); ++rit) {
if ((*rit)->isa<OpElimination>()) {
auto elimination = (*rit)->cast<OpEliminationPtr>();
auto e = elimination->new_edge_;
auto w = elimination->op_;
auto left_edge = elimination->left_edge_;
auto right_edge = elimination->right_edge_;
auto decision = e->selected_cost()->decision_ptr_->cast<OpEliminationDecisionPtr>();
w->SetSelectedStrategyAndCost(decision->op_strategy_, decision->middle_cost_);
left_edge->set_selected_cost(decision->left_cost_);
right_edge->set_selected_cost(decision->right_cost_);
auto elimination_op = (*rit)->cast<OpEliminationPtr>();
auto e = elimination_op->new_edge_;
auto w = elimination_op->op_;
auto left_edge_op = elimination_op->left_edge_;
auto right_edge_op = elimination_op->right_edge_;
auto decision_op = e->selected_cost()->decision_ptr_->cast<OpEliminationDecisionPtr>();
w->SetSelectedStrategyAndCost(decision_op->op_strategy_, decision_op->middle_cost_);
left_edge_op->set_selected_cost(decision_op->left_cost_);
right_edge_op->set_selected_cost(decision_op->right_cost_);
MS_LOG(INFO) << "Recover opElimination succeeded.";
} else if ((*rit)->isa<EdgeElimination>()) {
auto elimination = (*rit)->cast<EdgeEliminationPtr>();
auto new_edge = elimination->new_edge_;
MS_EXCEPTION_IF_NULL(new_edge);
auto &edges = elimination->edges_;
auto decision = new_edge->selected_cost()->decision_ptr_->cast<EdgeEliminationDecisionPtr>();
auto elimination_edge = (*rit)->cast<EdgeEliminationPtr>();
auto new_edge = elimination_edge->new_edge_;
auto &edges = elimination_edge->edges_;
auto decision_edge = new_edge->selected_cost()->decision_ptr_->cast<EdgeEliminationDecisionPtr>();
for (size_t j = 0; j < edges.size(); ++j) {
MS_EXCEPTION_IF_NULL(edges[j]);
edges[j]->set_selected_cost(decision->edges_cost_list_[j]);
edges[j]->set_selected_cost(decision_edge->edges_cost_list_[j]);
}
MS_LOG(INFO) << "Recover edgeElimination succeeded.";
} else if ((*rit)->isa<MergeElimination>()) {
auto elimination = (*rit)->cast<MergeEliminationPtr>();
auto target_node = elimination->target_node_;
MS_EXCEPTION_IF_NULL(target_node);
auto merged_node = elimination->merged_node_;
MS_EXCEPTION_IF_NULL(merged_node);
auto merged_edge = elimination->dir_edge_;
MS_EXCEPTION_IF_NULL(merged_edge);
MS_EXCEPTION_IF_NULL(target_node->selected_cost());
MS_EXCEPTION_IF_NULL(target_node->selected_cost()->decision_ptr_);
auto decision = target_node->selected_cost()->decision_ptr_->cast<MergeEliminationDecisionPtr>();
auto elimination_merge = (*rit)->cast<MergeEliminationPtr>();
auto target_node_m = elimination_merge->target_node_;
auto merged_node = elimination_merge->merged_node_;
auto merged_edge = elimination_merge->dir_edge_;
MS_EXCEPTION_IF_NULL(target_node_m->selected_cost());
MS_EXCEPTION_IF_NULL(target_node_m->selected_cost()->decision_ptr_);
auto decision = target_node_m->selected_cost()->decision_ptr_->cast<MergeEliminationDecisionPtr>();
merged_node->SetSelectedStrategyAndCost(decision->merged_op_strategy_, decision->merged_op_cost_);
merged_edge->set_selected_cost(decision->edge_cost_);
target_node->SetSelectedStrategyAndCost(decision->target_op_strategy_, decision->target_op_cost_);
target_node_m->SetSelectedStrategyAndCost(decision->target_op_strategy_, decision->target_op_cost_);
MS_LOG(INFO) << "Recover mergeElimination succeeded.";
} else if ((*rit)->isa<ContractElimination>()) {
auto elimination = (*rit)->cast<ContractEliminationPtr>();
auto target_node = elimination->target_node_;
auto contracted_node = elimination->contracted_node_;
auto contracted_edge = elimination->dir_edge_;
auto decision = target_node->selected_cost()->decision_ptr_->cast<ContractEliminationDecisionPtr>();
contracted_node->SetSelectedStrategyAndCost(decision->contracted_op_strategy_, decision->contracted_op_cost_);
contracted_edge->set_selected_cost(decision->edge_cost_);
target_node->SetSelectedStrategyAndCost(decision->target_op_strategy_, decision->target_cost_);
auto elimination_cont = (*rit)->cast<ContractEliminationPtr>();
auto target_node = elimination_cont->target_node_;
auto contracted_node = elimination_cont->contracted_node_;
auto contracted_edge = elimination_cont->dir_edge_;
auto decision_cont = target_node->selected_cost()->decision_ptr_->cast<ContractEliminationDecisionPtr>();
contracted_node->SetSelectedStrategyAndCost(decision_cont->contracted_op_strategy_,
decision_cont->contracted_op_cost_);
contracted_edge->set_selected_cost(decision_cont->edge_cost_);
target_node->SetSelectedStrategyAndCost(decision_cont->target_op_strategy_, decision_cont->target_cost_);
MS_LOG(INFO) << "Recover contractElimination succeeded.";
} else if ((*rit)->isa<TriangleElimination>()) {
auto elimination = (*rit)->cast<TriangleEliminationPtr>();
auto left_node = elimination->left_node_;
auto left_edge = elimination->left_edge_;
auto eliminated_node = elimination->eliminated_node_;
auto right_edge = elimination->right_edge_;
auto right_node = elimination->right_node_;
auto decision = left_node->selected_cost()->decision_ptr_->cast<TriangleEliminationDecisionPtr>();
auto elimination_tri = (*rit)->cast<TriangleEliminationPtr>();
auto left_node = elimination_tri->left_node_;
auto left_edge = elimination_tri->left_edge_;
auto eliminated_node = elimination_tri->eliminated_node_;
auto right_edge_tri = elimination_tri->right_edge_;
auto right_node = elimination_tri->right_node_;
auto decision_tri = left_node->selected_cost()->decision_ptr_->cast<TriangleEliminationDecisionPtr>();
eliminated_node->SetSelectedStrategyAndCost(decision->eliminated_op_strategy_, decision->eliminated_op_cost_);
left_edge->set_selected_cost(decision->left_edge_cost_);
right_edge->set_selected_cost(decision->right_edge_cost_);
eliminated_node->SetSelectedStrategyAndCost(decision_tri->eliminated_op_strategy_,
decision_tri->eliminated_op_cost_);
left_edge->set_selected_cost(decision_tri->left_edge_cost_);
right_edge_tri->set_selected_cost(decision_tri->right_edge_cost_);
// 'left_node' recovers the strategy.
left_node->SetSelectedStrategyAndCost(decision->left_node_strategy_, decision->left_node_cost_);
left_node->SetSelectedStrategyAndCost(decision_tri->left_node_strategy_, decision_tri->left_node_cost_);
if (triangle_star_overwrite) {
// 'right_node' recovers the strategy.
MS_LOG(INFO) << "Overwrite the right-node: " << right_node->name() << " in recovering triangle elimination.";
right_node->SetSelectedStrategyAndCost(decision->right_node_strategy_, decision->right_node_cost_);
right_node->SetSelectedStrategyAndCost(decision_tri->right_node_strategy_, decision_tri->right_node_cost_);
} else {
// In this case, 'right_node' is not overwritten strategy, and it checks strategy consistency.
right_node->CheckSelectedStrategy(decision->right_node_strategy_);
right_node->CheckSelectedStrategy(decision_tri->right_node_strategy_);
}
MS_LOG(INFO) << "Recover triangleElimination succeeded.";
} else if ((*rit)->isa<StarElimination>()) {
auto elimination = (*rit)->cast<StarEliminationPtr>();
auto merged_node = elimination->eliminated_node_;
auto succ_edges = elimination->succ_edges_;
auto succ_nodes = elimination->succ_ops_;
auto elimination_star = (*rit)->cast<StarEliminationPtr>();
auto merged_node_star = elimination_star->eliminated_node_;
auto succ_edges = elimination_star->succ_edges_;
auto succ_nodes = elimination_star->succ_ops_;
// decision is hidden in succ_nodes[0]
auto decision = succ_nodes[0]->selected_cost()->decision_ptr_->cast<StarEliminationDecisionPtr>();
merged_node->SetSelectedStrategyAndCost(decision->eliminated_op_strategy_, decision->eliminated_op_cost_);
auto decision_star = succ_nodes[0]->selected_cost()->decision_ptr_->cast<StarEliminationDecisionPtr>();
merged_node_star->SetSelectedStrategyAndCost(decision_star->eliminated_op_strategy_,
decision_star->eliminated_op_cost_);
for (size_t i = 0; i < succ_edges.size(); ++i) {
succ_edges[i]->set_selected_cost(decision->succ_edges_cost_list_[i]);
succ_edges[i]->set_selected_cost(decision_star->succ_edges_cost_list_[i]);
}
MS_EXCEPTION_IF_NULL(succ_nodes[0]);
MS_EXCEPTION_IF_NULL(decision->succ_ops_stra_list_[0]);
MS_EXCEPTION_IF_NULL(decision->succ_ops_cost_list_[0]);
MS_EXCEPTION_IF_NULL(decision_star->succ_ops_stra_list_[0]);
MS_EXCEPTION_IF_NULL(decision_star->succ_ops_cost_list_[0]);
// Star is eliminated into 'succ_nodes[0]'
succ_nodes[0]->SetSelectedStrategyAndCost(decision->succ_ops_stra_list_[0], decision->succ_ops_cost_list_[0]);
succ_nodes[0]->SetSelectedStrategyAndCost(decision_star->succ_ops_stra_list_[0],
decision_star->succ_ops_cost_list_[0]);
for (size_t k = 1; k < succ_nodes.size(); ++k) {
if (triangle_star_overwrite) {
// 'succ_nodes[k]' is overwritten strategy and cost.
succ_nodes[k]->SetSelectedStrategyAndCost(decision->succ_ops_stra_list_[k], decision->succ_ops_cost_list_[k]);
succ_nodes[k]->SetSelectedStrategyAndCost(decision_star->succ_ops_stra_list_[k],
decision_star->succ_ops_cost_list_[k]);
} else {
// In this case, 'succ_nodes[k]' is NOT overwritten strategy and cost, however, it checks the strategy.
succ_nodes[k]->CheckSelectedStrategy(decision->succ_ops_stra_list_[k]);
succ_nodes[k]->CheckSelectedStrategy(decision_star->succ_ops_stra_list_[k]);
}
}
MS_LOG(INFO) << "Recover starElimination succeeded.";

View File

@ -112,7 +112,7 @@ Status Edge::InitEdgeCost() {
}
Status Edge::GetRedistributionCost(const TensorLayout &prev_op_output_layout, const TensorLayout &next_op_input_layout,
size_t type_length, TypePtr type, CostPtr *cost) {
size_t type_length, const TypePtr &type, CostPtr *cost) {
MS_EXCEPTION_IF_NULL(prev_op_);
MS_EXCEPTION_IF_NULL(cost);
RankList dev_list = prev_op_->stage_device_list();

View File

@ -86,7 +86,7 @@ class Edge {
// and the input tensor layout of v, return the redistribution cost,
// and the op_list to carry out the redistribution.
Status GetRedistributionCost(const TensorLayout &prev_op_output_layout, const TensorLayout &next_op_input_layout,
size_t, TypePtr type, CostPtr *cost);
size_t, const TypePtr &type, CostPtr *cost);
void set_pre_op_output(const std::vector<std::pair<std::shared_ptr<Strategy>, std::vector<TensorInfo>>> &output_set) {
pre_op_output_ = output_set;

View File

@ -322,7 +322,7 @@ CostPtr CostGraph::SelectCostWithMinTrainingTime(const CostPtrList &cost_list, d
}
CostPtrList CostGraph::SelectCostListWithMinTrainingTimeMultiple(const std::vector<CostPtrList> &all_cost_list,
double available_memory) {
double available_memory) const {
CostPtrList selected_cost_list(all_cost_list.size(), nullptr);
double minimum = DBL_MAX, total_memory = 0.0;
CostPtrList ret(all_cost_list.size(), nullptr);
@ -389,8 +389,8 @@ Status CostGraph::SearchStrategyForMultiNodeFinalGraph(const std::vector<Operato
MS_EXCEPTION_IF_NULL(one_component);
if (one_component->GetOperators().size() == 1) {
MS_LOG(INFO) << "There are 1 operator in a component in the final graph.";
auto cost_list = one_component->CreateFinalSingleCostList(one_component->GetOperators()[0]);
all_list.push_back(cost_list);
auto cost_list_1 = one_component->CreateFinalSingleCostList(one_component->GetOperators()[0]);
all_list.push_back(cost_list_1);
} else if (one_component->GetOperators().size() == 2) {
MS_LOG(INFO) << "There are 2 operators in a component in the final graph.";
OperatorInfoPtr u, v;
@ -430,8 +430,8 @@ Status CostGraph::SearchStrategyForMultiNodeFinalGraph(const std::vector<Operato
MS_EXCEPTION_IF_NULL(connected_components[k]);
if (connected_components[k]->GetOperators().size() == 1) {
auto u = connected_components[k]->GetOperators()[0];
auto decision = selected_cost->decision_ptr_->cast<FinalSingleDecisionPtr>();
u->SetSelectedStrategyAndCost(decision->u_strategy_, decision->u_cost_);
auto decision_f = selected_cost->decision_ptr_->cast<FinalSingleDecisionPtr>();
u->SetSelectedStrategyAndCost(decision_f->u_strategy_, decision_f->u_cost_);
MS_LOG(INFO) << "Searching the strategy for the component " << k << " final graph ended.";
} else if (connected_components[k]->GetOperators().size() == 2) {
OperatorInfoPtr u = nullptr, v = nullptr;
@ -627,9 +627,9 @@ std::vector<std::shared_ptr<Edge>> CostGraph::CheckEdgeElimination() const {
MS_EXCEPTION_IF_NULL(op);
if (!op->is_alive()) continue;
std::map<void *, int64_t> count;
for (auto &edge : op->GetAliveSuccEdges()) {
MS_EXCEPTION_IF_NULL(edge);
auto v = edge->next_operator();
for (auto &edge_su : op->GetAliveSuccEdges()) {
MS_EXCEPTION_IF_NULL(edge_su);
auto v = edge_su->next_operator();
count[v.get()]++;
}
for (auto &pair : count) {
@ -754,7 +754,7 @@ std::pair<std::vector<EdgePtr>, std::vector<EdgePtr>> UpdateEdgesIncidentToNodes
// replace the old successive edges with the new ones.
op1->ReplaceSuccEdge(ith_edge->next_operator(), new_edge);
ith_edge->next_operator()->ReplacePreEdge(op1, new_edge);
(void)op1_new_succ_edges->erase(op1_new_succ_edges->begin() + i);
(void)op1_new_succ_edges->erase(op1_new_succ_edges->begin() + SizeToLong(i));
(void)op1_new_succ_edges->emplace(op1_new_succ_edges->begin() + i, new_edge);
}
for (size_t i = 0; i < op2_old_succ_edges->size(); ++i) {
@ -779,8 +779,8 @@ std::pair<std::vector<EdgePtr>, std::vector<EdgePtr>> UpdateEdgesIncidentToNodes
// replace the old successive edges with the new ones.
destination->ReplacePreEdge(op2, new_edge);
op1->AddSuccEdge(new_edge);
(void)op2_new_succ_edges->erase(op2_new_succ_edges->begin() + i);
(void)op2_new_succ_edges->emplace(op2_new_succ_edges->begin() + i, new_edge);
(void)op2_new_succ_edges->erase(op2_new_succ_edges->begin() + SizeToLong(i));
(void)op2_new_succ_edges->emplace(op2_new_succ_edges->begin() + SizeToLong(i), new_edge);
}
return std::make_pair(*op1_new_succ_edges, *op2_new_succ_edges);
}
@ -1477,11 +1477,11 @@ Status CostGraph::InitReshapeStrategy() {
if (ops_[i]->name().find(RESHAPEINFO) != std::string::npos) {
auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(ops_[i]);
auto in_edges = GetOriginalPrevEdges(ops_[i]);
auto pre_iter = std::find_if(in_edges.begin(), in_edges.end(), [&](std::shared_ptr<Edge> edge) {
auto pre_iter = std::find_if(in_edges.begin(), in_edges.end(), [&](const std::shared_ptr<Edge> &edge) {
return edge->prev_operator()->name() == reshape_info->pre_operator_name();
});
auto out_edges = GetOriginalNextEdges(ops_[i]);
auto next_iter = std::find_if(out_edges.begin(), out_edges.end(), [&](std::shared_ptr<Edge> edge) {
auto next_iter = std::find_if(out_edges.begin(), out_edges.end(), [&](const std::shared_ptr<Edge> &edge) {
return edge->next_operator()->name() == reshape_info->next_operator_name();
});
bool reshape_is_first_op = reshape_info->pre_operator_name() == reshape_info->name();
@ -1495,10 +1495,10 @@ Status CostGraph::InitReshapeStrategy() {
std::shared_ptr<OperatorInfo> pre_op_info;
if (reshape_is_first_op) {
pre_op_info = reshape_info;
pre_info = pre_op_info->inputs_tensor_info()[pre_index];
pre_info = pre_op_info->inputs_tensor_info()[LongToSize(pre_index)];
} else {
pre_op_info = (*pre_iter)->prev_operator();
pre_info = pre_op_info->outputs_tensor_info()[pre_index];
pre_info = pre_op_info->outputs_tensor_info()[LongToSize(pre_index)];
}
reshape_info->SetInputLayout(pre_info.tensor_layout());
if (pre_iter != in_edges.end()) {
@ -1515,7 +1515,8 @@ Status CostGraph::InitReshapeStrategy() {
if (next_iter != out_edges.end()) {
MS_LOG(DEBUG) << "Set reshape output layout by " << reshape_info->next_operator_name();
int64_t next_index = reshape_info->next_operator_index();
reshape_info->SetOutputLayout((*next_iter)->next_operator()->inputs_tensor_info()[next_index].tensor_layout());
reshape_info->SetOutputLayout(
(*next_iter)->next_operator()->inputs_tensor_info()[LongToSize(next_index)].tensor_layout());
}
if (reshape_info->Init(nullptr) != SUCCESS) {
return FAILED;
@ -1531,9 +1532,9 @@ Status CostGraph::InitSelectedStrategy() {
if (op->name().find(RESHAPEINFO) != std::string::npos) {
continue;
}
auto result = op->InitSelectedStrategy(op->selected_strategy());
if (result != SUCCESS) {
return result;
auto result_op = op->InitSelectedStrategy(op->selected_strategy());
if (result_op != SUCCESS) {
return result_op;
}
}
auto result = InitReshapeStrategy();

View File

@ -67,7 +67,8 @@ class CostGraph {
CostPtrList CreateFinalSingleCostList(const OperatorInfoPtr &u);
CostPtr SelectCostWithMinInferenceTime(const CostPtrList &cost_list, double memory);
CostPtr SelectCostWithMinTrainingTime(const CostPtrList &cost_list, double memory);
CostPtrList SelectCostListWithMinTrainingTimeMultiple(const std::vector<CostPtrList> &all_costlist, double memory);
CostPtrList SelectCostListWithMinTrainingTimeMultiple(const std::vector<CostPtrList> &all_costlist,
double memory) const;
Status SearchStrategyForMultiNodeFinalGraph(const std::vector<OperatorInfoPtr> &);
Status SearchStrategyForTwoNodeFinalGraph(const std::vector<OperatorInfoPtr> &);
std::vector<std::shared_ptr<Edge>> GetOriginalEdgeBetweenOperators(OperatorInfoPtr u_node, OperatorInfoPtr v_node) {

View File

@ -733,9 +733,8 @@ void ReshapeCost::CalculateInputsInMemory(const std::map<size_t, bool> &) {
double SubCost::GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &,
int64_t) const {
double result;
result = ListProduct(inputs[0].slice_shape()) * static_cast<double>(inputs_type_lengths_[0]) +
ListProduct(inputs[1].slice_shape()) * static_cast<double>(inputs_type_lengths_[1]);
double result = ListProduct(inputs[0].slice_shape()) * static_cast<double>(inputs_type_lengths_[0]) +
ListProduct(inputs[1].slice_shape()) * static_cast<double>(inputs_type_lengths_[1]);
return result;
}
@ -1103,8 +1102,7 @@ double ReduceSumCost::GetForwardCommCost(const std::vector<TensorInfo> &inputs,
return result;
}
std::vector<int64_t> dim_list = input0.reduce_dim();
std::vector<int64_t>::iterator pos;
pos = std::find_if(dim_list.begin(), dim_list.end(), [input0_shape, input0_slice_shape](int64_t index) {
auto pos = std::find_if(dim_list.begin(), dim_list.end(), [input0_shape, input0_slice_shape](int64_t index) {
return input0_shape[LongToSize(index)] != input0_slice_shape[LongToSize(index)];
});
if (pos != dim_list.end()) {
@ -1146,8 +1144,7 @@ double ReduceSumCost::GetForwardComputationCost(const std::vector<TensorInfo> &i
Shape input0_slice_shape = input0.slice_shape();
Shape input0_shape = input0.shape();
if (!cross_batch_ || !IsDataParallel(input0_shape, input0_slice_shape, stage_id)) {
std::vector<int64_t>::iterator pos;
pos = std::find_if(dim_list.begin(), dim_list.end(), [input0_shape, input0_slice_shape](int64_t index) {
auto pos = std::find_if(dim_list.begin(), dim_list.end(), [input0_shape, input0_slice_shape](int64_t index) {
return input0_shape[LongToSize(index)] != input0_slice_shape[LongToSize(index)];
});
if (pos != dim_list.end()) {
@ -1192,8 +1189,7 @@ double ReduceMeanCost::GetForwardComputationCost(const std::vector<TensorInfo> &
Shape input0_slice_shape = input0.slice_shape();
Shape input0_shape = input0.shape();
if (!cross_batch_ || !IsDataParallel(input0_shape, input0_slice_shape, stage_id)) {
std::vector<int64_t>::iterator pos;
pos = std::find_if(dim_list.begin(), dim_list.end(), [input0_shape, input0_slice_shape](int64_t index) {
auto pos = std::find_if(dim_list.begin(), dim_list.end(), [input0_shape, input0_slice_shape](int64_t index) {
return input0_shape[LongToSize(index)] != input0_slice_shape[LongToSize(index)];
});
if (pos != dim_list.end()) {
@ -1415,11 +1411,10 @@ void LayerNormCost::CalculateInputsInMemory(const std::map<size_t, bool> &prev_o
is_inputs_should_in_memory_[LAYERNORM_INPUTS_SIZE - 1] = is_parameter_[LAYERNORM_INPUTS_SIZE - 1];
}
double UniqueCost::GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const {
double UniqueCost::GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &, int64_t) const {
return 0.0;
}
double UniqueCost::GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
double UniqueCost::GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &,
int64_t stage_id) const {
double result = 0.0;
if (is_parameter_[0]) {
@ -1439,15 +1434,15 @@ double UniqueCost::GetBackwardCommCost(const std::vector<TensorInfo> &inputs, co
}
return result;
}
double UniqueCost::GetForwardComputationCost(const std::vector<TensorInfo> &inputs,
const std::vector<TensorInfo> &outputs, int64_t stage_id) const {
double UniqueCost::GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &,
int64_t) const {
// In forward phase, the computation cost = slice(A) + slice(B)
Shape input_slice_shape = inputs[0].slice_shape();
double result = ListProduct(input_slice_shape) * static_cast<double>(inputs_type_lengths_[0]);
return result;
}
double UniqueCost::GetBackwardComputationCost(const std::vector<TensorInfo> &inputs,
const std::vector<TensorInfo> &outputs, int64_t stage_id) const {
double UniqueCost::GetBackwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &,
int64_t stage_id) const {
// In backward phase, the computation cost = (0 or 1) allreduce(slice(B))
double result = 0.0;
if (is_parameter_[0]) {
@ -1471,7 +1466,7 @@ double UniqueCost::GetBackwardComputationCost(const std::vector<TensorInfo> &inp
}
double GatherV2PCost::GetForwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
int64_t stage_id) const {
int64_t) const {
double result = 0.0;
if (outputs_type_lengths_.size() != outputs.size()) {
MS_LOG(EXCEPTION) << "Invalid inputs type size " << inputs_type_lengths_.size() << " for gatherv2 cost";
@ -1486,13 +1481,13 @@ double GatherV2PCost::GetForwardCommCost(const std::vector<TensorInfo> &inputs,
auto index_shape = inputs[1].slice_shape();
Shape reducescatter_shape = index_shape;
if (param_shape.size() == 2) {
reducescatter_shape.push_back(param_shape.at(1 - axis_));
reducescatter_shape.push_back(param_shape.at(LongToSize(1 - axis_)));
}
result += ListProduct(reducescatter_shape) * static_cast<double>(outputs_type_lengths_[0]);
return result;
}
double GatherV2PCost::GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &outputs,
double GatherV2PCost::GetBackwardCommCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &,
int64_t stage_id) const {
double result = 0.0;
CheckGlobalDeviceManager();
@ -1518,8 +1513,7 @@ double GatherV2PCost::GetBackwardCommCost(const std::vector<TensorInfo> &inputs,
}
double UniformCandidateSamplerCost::GetForwardComputationCost(const std::vector<TensorInfo> &inputs,
const std::vector<TensorInfo> &outputs,
int64_t stage_id) const {
const std::vector<TensorInfo> &, int64_t) const {
Shape input0_slice_shape = inputs[0].slice_shape();
if (inputs_type_lengths_.size() != inputs.size()) {
MS_LOG(EXCEPTION) << "Invalid inputs type size " << inputs_type_lengths_.size()
@ -1537,8 +1531,8 @@ void UniformCandidateSamplerCost::CalculateInputsInMemory(const std::map<size_t,
is_inputs_should_in_memory_[0] = is_parameter_[0];
}
double GatherV2PCost::GetForwardComputationCost(const std::vector<TensorInfo> &inputs,
const std::vector<TensorInfo> &outputs, int64_t stage_id) const {
double GatherV2PCost::GetForwardComputationCost(const std::vector<TensorInfo> &inputs, const std::vector<TensorInfo> &,
int64_t) const {
double result = 0.0;
Shape input0_slice_shape = inputs[0].slice_shape();
Shape input1_slice_shape = inputs[1].slice_shape();
@ -1578,7 +1572,7 @@ double GatherV2PCost::GetBackwardComputationCost(const std::vector<TensorInfo> &
// The forward communication is determined by whether the slice is column split or row split
// The number of segments is actually the shape[0] of the output, which is the cost of the AllReduce
double UnsortedSegmentSumCost::GetForwardCommCost(const std::vector<TensorInfo> &inputs,
const std::vector<TensorInfo> &outputs, int64_t stage_id) const {
const std::vector<TensorInfo> &outputs, int64_t) const {
TensorInfo input0 = inputs[0];
TensorInfo input1 = inputs[1];
TensorInfo output0 = outputs[0];
@ -1599,7 +1593,7 @@ double UnsortedSegmentSumCost::GetForwardCommCost(const std::vector<TensorInfo>
}
double UnsortedSegmentSumCost::GetBackwardCommCost(const std::vector<TensorInfo> &inputs,
const std::vector<TensorInfo> &outputs, int64_t stage_id) const {
const std::vector<TensorInfo> &outputs, int64_t) const {
TensorInfo input0 = inputs[0];
TensorInfo input1 = inputs[1];
TensorInfo output0 = outputs[0];
@ -1656,7 +1650,7 @@ void UnsortedSegmentSumCost::CalculateInputsInMemory(const std::map<size_t, bool
}
double UnsortedSegmentMinCost::GetForwardCommCost(const std::vector<TensorInfo> &inputs,
const std::vector<TensorInfo> &outputs, int64_t stage_id) const {
const std::vector<TensorInfo> &outputs, int64_t) const {
TensorInfo input0 = inputs[0];
TensorInfo input1 = inputs[1];
TensorInfo output0 = outputs[0];
@ -1679,7 +1673,7 @@ double UnsortedSegmentMinCost::GetForwardCommCost(const std::vector<TensorInfo>
}
double UnsortedSegmentMinCost::GetBackwardCommCost(const std::vector<TensorInfo> &inputs,
const std::vector<TensorInfo> &outputs, int64_t stage_id) const {
const std::vector<TensorInfo> &outputs, int64_t) const {
TensorInfo input0 = inputs[0];
TensorInfo input1 = inputs[1];
TensorInfo output0 = outputs[0];

View File

@ -16,8 +16,8 @@
#include "frontend/parallel/step_auto_parallel.h"
#include <inttypes.h>
#include <sys/time.h>
#include <cinttypes>
#include <ctime>
#include <algorithm>
#include <map>
#include <memory>
@ -78,7 +78,9 @@ bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) {
MS_LOG(INFO) << "Non-idicated strategy searching mode, using DP searching mode as default";
}
struct timeval start_time, end_time;
struct timeval start_time {
0
}, end_time{0};
(void)gettimeofday(&start_time, nullptr);
if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
@ -492,7 +494,7 @@ Status ConstructCostGraphNodesByUniqueIdTC(const std::vector<AnfNodePtr> &all_no
StrategyMap stra_map;
if (StrategyCheckpoint::GetInstance().LoadCheckPointOn() &&
StrategyCheckpoint::GetInstance().Load(&stra_map) != SUCCESS) {
MS_LOG(EXCEPTION) << "Load strategy checkpoint failed";
MS_LOG(WARNING) << "Load strategy checkpoint failed";
return FAILED;
}
for (auto &node : all_nodes) {
@ -778,7 +780,6 @@ void AugmentCostGraph(const std::vector<AnfNodePtr> &all_nodes) {
// Create edges between this TmpIdentityInfo instance and subsequent Operator instances
for (auto &target : target_set) {
auto target_cnode = target.first->cast<CNodePtr>();
auto prim = GetValueNode<PrimitivePtr>(target_cnode->input(0));
auto input_index = target.second;
auto target_op_info = target_cnode->user_data<OperatorInfo>();
@ -830,10 +831,10 @@ void ReshapeCostCompute(const std::vector<AnfNodePtr> &all_nodes) {
std::vector<std::shared_ptr<StrategyWithCost>> pre_stra_costs;
auto operator_info = cnode->user_data<OperatorInfo>();
if (pre_node->isa<Parameter>()) {
auto reshape_info = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
reshape_info->SetCostForReshapeWithParameter();
pre_operator_info = reshape_info;
pre_stra_costs = reshape_info->strategy_cost();
auto reshape_info1 = std::dynamic_pointer_cast<ReshapeInfo>(operator_info);
reshape_info1->SetCostForReshapeWithParameter();
pre_operator_info = reshape_info1;
pre_stra_costs = reshape_info1->strategy_cost();
} else {
if (!FindReshapePreNodeStraCosts(pre_node, &pre_operator_info, &out_index, 0)) {
MS_LOG(EXCEPTION) << "FindReshapePreNodeStraCosts for reshape failed";