!17870 fix pclint and codedex for parallel ops info

Merge pull request !17870 from yangzhenzhang/0604-fix-pclint-and-codedex-for-master-ops-info
This commit is contained in:
i-robot 2021-06-07 14:10:32 +08:00 committed by Gitee
commit 878cb6ac3b
5 changed files with 17 additions and 17 deletions

View File

@ -150,12 +150,12 @@ Status GroupManager::CreateGroup(const std::string &group_name, const std::vecto
(void)CommManager::GetInstance().GetRankSize(world_group_, &world_size); (void)CommManager::GetInstance().GetRankSize(world_group_, &world_size);
if (devices.size() == world_size) { if (devices.size() == world_size) {
auto it = groups_.find(world_group_); auto iter = groups_.find(world_group_);
if (it == groups_.end()) { if (iter == groups_.end()) {
(void)group->Init(world_group_, devices); (void)group->Init(world_group_, devices);
groups_[world_group_] = *group; groups_[world_group_] = *group;
} else { } else {
*group = it->second; *group = iter->second;
} }
MS_LOG(INFO) << "It is world group " << world_group_ << ", no need to create it."; MS_LOG(INFO) << "It is world group " << world_group_ << ", no need to create it.";
return Status::SUCCESS; return Status::SUCCESS;

View File

@ -243,7 +243,7 @@ Status GatherInfo::InferTensorSubOps() {
if ((axis_ >= SizeToLong(inputs_shape_.at(0).size())) || axis_ < 0) { if ((axis_ >= SizeToLong(inputs_shape_.at(0).size())) || axis_ < 0) {
MS_LOG(ERROR) << "Axis is " << axis_ << ", not in [0, " << inputs_shape_.at(0).size() << ")."; MS_LOG(ERROR) << "Axis is " << axis_ << ", not in [0, " << inputs_shape_.at(0).size() << ").";
} }
int64_t sub_value = static_cast<int64_t>(inputs_shape_[0][LongToSize(axis_)] / dev_matrix_shape_[axis_]) * mod_rank; int64_t sub_value = inputs_shape_[0][LongToSize(axis_)] / dev_matrix_shape_[axis_] * mod_rank;
OperatorVector sub_op; OperatorVector sub_op;
sub_ops_.emplace_back(std::move(sub_op)); sub_ops_.emplace_back(std::move(sub_op));

View File

@ -889,14 +889,14 @@ void OperatorInfo::ReplacePreEdges(const std::shared_ptr<OperatorInfo> &op, cons
MS_LOG(ERROR) << name_ << ": ReplacePreEdges: the op is null."; MS_LOG(ERROR) << name_ << ": ReplacePreEdges: the op is null.";
return; return;
} }
std::vector<std::shared_ptr<Edge>> new_pre_edges; std::vector<std::shared_ptr<Edge>> update_pre_edges;
for (auto &edge : prev_edges_) { for (auto &edge : prev_edges_) {
if (edge->prev_operator() != op) { if (edge->prev_operator() != op) {
new_pre_edges.push_back(edge); update_pre_edges.push_back(edge);
} }
} }
new_pre_edges.push_back(replace_edge); update_pre_edges.push_back(replace_edge);
prev_edges_ = new_pre_edges; prev_edges_ = update_pre_edges;
} }
void OperatorInfo::ReplaceSuccEdges(const std::shared_ptr<OperatorInfo> &op, void OperatorInfo::ReplaceSuccEdges(const std::shared_ptr<OperatorInfo> &op,
@ -905,14 +905,14 @@ void OperatorInfo::ReplaceSuccEdges(const std::shared_ptr<OperatorInfo> &op,
MS_LOG(ERROR) << name_ << ": ReplaceSuccEdges: the op is null"; MS_LOG(ERROR) << name_ << ": ReplaceSuccEdges: the op is null";
return; return;
} }
std::vector<std::shared_ptr<Edge>> new_succ_edges; std::vector<std::shared_ptr<Edge>> update_pre_edges;
for (auto &edge : succ_edges_) { for (auto &edge : succ_edges_) {
if (edge->next_operator() != op) { if (edge->next_operator() != op) {
new_succ_edges.push_back(edge); update_pre_edges.push_back(edge);
} }
} }
new_succ_edges.push_back(replace_edge); update_pre_edges.push_back(replace_edge);
succ_edges_ = new_succ_edges; succ_edges_ = update_pre_edges;
} }
std::shared_ptr<Strategys> GenerateBatchStrategiesBySplitFlag(const Shapes &shapes, std::shared_ptr<Strategys> GenerateBatchStrategiesBySplitFlag(const Shapes &shapes,

View File

@ -114,7 +114,7 @@ Status StackInfo::InferTensorMap() {
inputs_tensor_map_.push_back(in_tensor_map); inputs_tensor_map_.push_back(in_tensor_map);
} }
out_tensor_map.insert(out_tensor_map.begin() + SizeToLong(axis_), MAP_NONE); (void)out_tensor_map.insert(out_tensor_map.begin() + SizeToLong(axis_), MAP_NONE);
outputs_tensor_map_.push_back(out_tensor_map); outputs_tensor_map_.push_back(out_tensor_map);
return SUCCESS; return SUCCESS;
} }

View File

@ -136,16 +136,16 @@ Status SplitInfo::InferTensorMap() {
Status SplitInfo::SetCostUnderStrategy(const StrategyPtr &strategy) { return SetCostUnderStrategyBase(strategy); } Status SplitInfo::SetCostUnderStrategy(const StrategyPtr &strategy) { return SetCostUnderStrategyBase(strategy); }
std::vector<StrategyPtr> SplitInfo::GenerateOpStrategies(int64_t stage_id) { std::vector<StrategyPtr> SplitInfo::GenerateOpStrategies(int64_t stage_id) {
Shape input_split; Shape split_flag;
for (size_t i = 0; i < inputs_shape_[0].size(); ++i) { for (size_t i = 0; i < inputs_shape_[0].size(); ++i) {
if (i == axis_) { if (i == axis_) {
input_split.push_back(0); split_flag.push_back(0);
} else { } else {
input_split.push_back(1); split_flag.push_back(1);
} }
} }
Shapes splittable_input = {input_split}; Shapes splittable_input = {split_flag};
Shapes tmp_inputs_shape = {inputs_shape_[0]}; Shapes tmp_inputs_shape = {inputs_shape_[0]};
std::vector<StrategyPtr> sp_vector; std::vector<StrategyPtr> sp_vector;