From b968b409ec4530363d2f01807c6f65c4f405a9a8 Mon Sep 17 00:00:00 2001 From: chendongsheng Date: Wed, 8 Sep 2021 11:09:22 +0800 Subject: [PATCH] clean pclint warnings --- mindspore/ccsrc/ps/core/abstract_node.cc | 6 +++--- mindspore/ccsrc/ps/core/instance_manager.cc | 8 ++++---- mindspore/ccsrc/ps/core/node.h | 2 ++ mindspore/ccsrc/ps/core/node_manager.cc | 6 +++--- mindspore/ccsrc/ps/worker.cc | 16 ++++++++-------- 5 files changed, 20 insertions(+), 18 deletions(-) diff --git a/mindspore/ccsrc/ps/core/abstract_node.cc b/mindspore/ccsrc/ps/core/abstract_node.cc index f53891ab796..19ea97ab6fd 100644 --- a/mindspore/ccsrc/ps/core/abstract_node.cc +++ b/mindspore/ccsrc/ps/core/abstract_node.cc @@ -578,7 +578,7 @@ void AbstractNode::ProcessHeartbeatResp(const std::shared_ptr &meta NodeInfo info; info.ip_ = it.ip(); info.node_id_ = it.node_id(); - info.port_ = it.port(); + info.port_ = static_cast(it.port()); info.node_role_ = it.role(); info.rank_id_ = it.rank_id(); info.is_alive = it.is_alive(); @@ -1072,8 +1072,8 @@ void AbstractNode::InitNodeInfo(const NodeRole &role) { } void AbstractNode::InitNodeNum() { - worker_num_ = PSContext::instance()->cluster_config().initial_worker_num; - server_num_ = PSContext::instance()->cluster_config().initial_server_num; + worker_num_ = SizeToInt(PSContext::instance()->cluster_config().initial_worker_num); + server_num_ = SizeToInt(PSContext::instance()->cluster_config().initial_server_num); scheduler_ip_ = PSContext::instance()->cluster_config().scheduler_host; scheduler_port_ = PSContext::instance()->cluster_config().scheduler_port; MS_LOG(INFO) << "The worker num:" << worker_num_ << ", the server num:" << server_num_ diff --git a/mindspore/ccsrc/ps/core/instance_manager.cc b/mindspore/ccsrc/ps/core/instance_manager.cc index e7cc482a60c..57c9271cd04 100644 --- a/mindspore/ccsrc/ps/core/instance_manager.cc +++ b/mindspore/ccsrc/ps/core/instance_manager.cc @@ -19,7 +19,7 @@ namespace mindspore { namespace ps { namespace core { -void InstanceManager::NewInstanceAsync(const std::shared_ptr &client, const NodeManager &manager, +void InstanceManager::NewInstanceAsync(const std::shared_ptr &client, const NodeManager &, const std::string &body, const uint64_t &request_id, const NodeInfo &node_info) { MS_EXCEPTION_IF_NULL(client); MS_EXCEPTION_IF_NULL(node_); @@ -38,7 +38,7 @@ void InstanceManager::NewInstanceAsync(const std::shared_ptr &client, MS_LOG(INFO) << "The scheduler is sending new instance to workers and servers!"; } -void InstanceManager::QueryInstanceAsync(const std::shared_ptr &client, const NodeManager &manager, +void InstanceManager::QueryInstanceAsync(const std::shared_ptr &client, const NodeManager &, const uint64_t &request_id, const NodeInfo &node_info) { MS_EXCEPTION_IF_NULL(client); MS_EXCEPTION_IF_NULL(node_); @@ -58,7 +58,7 @@ void InstanceManager::QueryInstanceAsync(const std::shared_ptr &clien MS_LOG(INFO) << "The scheduler is sending query instance to workers and servers!"; } -void InstanceManager::EnableFLSAsync(const std::shared_ptr &client, const NodeManager &manager, +void InstanceManager::EnableFLSAsync(const std::shared_ptr &client, const NodeManager &, const uint64_t &request_id, const NodeInfo &node_info) { MS_EXCEPTION_IF_NULL(client); MS_EXCEPTION_IF_NULL(node_); @@ -78,7 +78,7 @@ void InstanceManager::EnableFLSAsync(const std::shared_ptr &client, c MS_LOG(INFO) << "The scheduler is sending query instance to workers and servers!"; } -void InstanceManager::DisableFLSAsync(const std::shared_ptr &client, const NodeManager &manager, +void InstanceManager::DisableFLSAsync(const std::shared_ptr &client, const NodeManager &, const uint64_t &request_id, const NodeInfo &node_info) { MS_EXCEPTION_IF_NULL(client); MS_EXCEPTION_IF_NULL(node_); diff --git a/mindspore/ccsrc/ps/core/node.h b/mindspore/ccsrc/ps/core/node.h index 052445feeb2..7a3a2ad536c 100644 --- a/mindspore/ccsrc/ps/core/node.h +++ b/mindspore/ccsrc/ps/core/node.h @@ -92,7 +92,9 @@ class Node { void RunMessageCallback(const uint64_t &request_id); NodeInfo node_info_; + // Whether the cluster is ready std::atomic is_ready_; + // Whether the cluster is finished. std::atomic is_finish_; std::atomic is_already_stopped_; diff --git a/mindspore/ccsrc/ps/core/node_manager.cc b/mindspore/ccsrc/ps/core/node_manager.cc index bd22d8c0ef0..7620e8797c9 100644 --- a/mindspore/ccsrc/ps/core/node_manager.cc +++ b/mindspore/ccsrc/ps/core/node_manager.cc @@ -25,7 +25,7 @@ void NodeManager::InitNode() { meta_data_ = std::make_unique(PSContext::instance()->cluster_config().initial_worker_num, PSContext::instance()->cluster_config().initial_server_num); MS_EXCEPTION_IF_NULL(meta_data_); - total_node_num_ = initial_total_node_num_; + total_node_num_ = UintToInt(initial_total_node_num_); } uint32_t NodeManager::NextRankId(const RegisterMessage ®ister_message, const std::shared_ptr &meta) { @@ -79,7 +79,7 @@ uint32_t NodeManager::NextRankId(const RegisterMessage ®ister_message, const node_info.node_id_ = node_id; node_info.rank_id_ = rank_id; node_info.ip_ = ip; - node_info.port_ = port; + node_info.port_ = static_cast(port); node_info.is_alive = true; registered_nodes_info_[node_id] = node_info; MS_LOG(INFO) << "The server node id:" << node_id << ",node ip: " << node_info.ip_ << ",node port:" << port @@ -118,7 +118,7 @@ uint32_t NodeManager::NextRankId(const RegisterMessage ®ister_message, const node_info.node_id_ = node_id; node_info.rank_id_ = rank_id; node_info.ip_ = ip; - node_info.port_ = port; + node_info.port_ = static_cast(port); node_info.is_alive = true; registered_nodes_info_[node_id] = node_info; MS_LOG(INFO) << "The worker node id:" << node_id << " assign rank id:" << rank_id; diff --git a/mindspore/ccsrc/ps/worker.cc b/mindspore/ccsrc/ps/worker.cc index 2550a5aa2ce..8386eee81a0 100644 --- a/mindspore/ccsrc/ps/worker.cc +++ b/mindspore/ccsrc/ps/worker.cc @@ -307,7 +307,7 @@ void Worker::DoPSEmbeddingLookup(const Key &key, const std::vector &lookup_ } std::vector resp; - if (!worker_node_.Send(core::NodeRole::SERVER, rank_ids, data, sizes, cmd, &resp)) { + if (!worker_node_.Send(core::NodeRole::SERVER, rank_ids, data, sizes, LongToInt(cmd), &resp)) { MS_LOG(ERROR) << "Worker send failed!"; } int64_t single_id_len = SizeToLong(lookup_result->size() / lookup_ids.size()); @@ -391,7 +391,7 @@ void Worker::UpdateEmbeddingTable(const std::vector &keys, const std::vecto sizes.push_back(kv_data.length()); } } - worker_node_.Send(core::NodeRole::SERVER, rank_ids, data, sizes, kUpdateEmbeddingsCmd); + worker_node_.Send(core::NodeRole::SERVER, rank_ids, data, sizes, LongToInt(kUpdateEmbeddingsCmd)); } void Worker::Finalize() { @@ -844,11 +844,11 @@ void Worker::RoundRobinPartitioner(const KVMessage &send, PartitionKVMessages *p for (int i = 0; i < send.keys_size(); i++) { param_key = keys[i]; int64_t server_id = key_to_server_id_[param_key]; - if (!partition->at(server_id).first) { - partition->at(server_id).first = true; + if (!partition->at(LongToUlong(server_id)).first) { + partition->at(LongToUlong(server_id)).first = true; } - KVMessage &server_kv_pairs = partition->at(server_id).second; + KVMessage &server_kv_pairs = partition->at(LongToUlong(server_id)).second; server_kv_pairs.add_keys(param_key); if (values.empty()) { continue; @@ -865,7 +865,7 @@ void Worker::RoundRobinPartitioner(const KVMessage &send, PartitionKVMessages *p } void Worker::WorkerInitEmbeddingPartitioner(const KVMessage &send, std::vector> *partition, - const std::map &attrs) { + const std::map &) { MS_EXCEPTION_IF_NULL(partition); partition->resize(LongToSize(server_num_)); auto keys = send.keys(); @@ -890,8 +890,8 @@ void Worker::UpdateEmbeddingPartitioner(const KVMessage &send, PartitionKVMessag MS_EXCEPTION_IF_NULL(partition); const float *embedding_vals = send.values().data(); const uint64_t *lookup_ids = send.len().data(); - size_t val_size = send.values_size(); - size_t id_size = send.len_size(); + size_t val_size = IntToSize(send.values_size()); + size_t id_size = IntToSize(send.len_size()); size_t embedding_dim = val_size / id_size; const Key &key = send.keys()[0];