forked from mindspore-Ecosystem/mindspore
clean pclint warnings
This commit is contained in:
parent
8065a3014e
commit
b968b409ec
|
@ -578,7 +578,7 @@ void AbstractNode::ProcessHeartbeatResp(const std::shared_ptr<MessageMeta> &meta
|
|||
NodeInfo info;
|
||||
info.ip_ = it.ip();
|
||||
info.node_id_ = it.node_id();
|
||||
info.port_ = it.port();
|
||||
info.port_ = static_cast<uint16_t>(it.port());
|
||||
info.node_role_ = it.role();
|
||||
info.rank_id_ = it.rank_id();
|
||||
info.is_alive = it.is_alive();
|
||||
|
@ -1072,8 +1072,8 @@ void AbstractNode::InitNodeInfo(const NodeRole &role) {
|
|||
}
|
||||
|
||||
void AbstractNode::InitNodeNum() {
|
||||
worker_num_ = PSContext::instance()->cluster_config().initial_worker_num;
|
||||
server_num_ = PSContext::instance()->cluster_config().initial_server_num;
|
||||
worker_num_ = SizeToInt(PSContext::instance()->cluster_config().initial_worker_num);
|
||||
server_num_ = SizeToInt(PSContext::instance()->cluster_config().initial_server_num);
|
||||
scheduler_ip_ = PSContext::instance()->cluster_config().scheduler_host;
|
||||
scheduler_port_ = PSContext::instance()->cluster_config().scheduler_port;
|
||||
MS_LOG(INFO) << "The worker num:" << worker_num_ << ", the server num:" << server_num_
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
namespace mindspore {
|
||||
namespace ps {
|
||||
namespace core {
|
||||
void InstanceManager::NewInstanceAsync(const std::shared_ptr<TcpClient> &client, const NodeManager &manager,
|
||||
void InstanceManager::NewInstanceAsync(const std::shared_ptr<TcpClient> &client, const NodeManager &,
|
||||
const std::string &body, const uint64_t &request_id, const NodeInfo &node_info) {
|
||||
MS_EXCEPTION_IF_NULL(client);
|
||||
MS_EXCEPTION_IF_NULL(node_);
|
||||
|
@ -38,7 +38,7 @@ void InstanceManager::NewInstanceAsync(const std::shared_ptr<TcpClient> &client,
|
|||
MS_LOG(INFO) << "The scheduler is sending new instance to workers and servers!";
|
||||
}
|
||||
|
||||
void InstanceManager::QueryInstanceAsync(const std::shared_ptr<TcpClient> &client, const NodeManager &manager,
|
||||
void InstanceManager::QueryInstanceAsync(const std::shared_ptr<TcpClient> &client, const NodeManager &,
|
||||
const uint64_t &request_id, const NodeInfo &node_info) {
|
||||
MS_EXCEPTION_IF_NULL(client);
|
||||
MS_EXCEPTION_IF_NULL(node_);
|
||||
|
@ -58,7 +58,7 @@ void InstanceManager::QueryInstanceAsync(const std::shared_ptr<TcpClient> &clien
|
|||
MS_LOG(INFO) << "The scheduler is sending query instance to workers and servers!";
|
||||
}
|
||||
|
||||
void InstanceManager::EnableFLSAsync(const std::shared_ptr<TcpClient> &client, const NodeManager &manager,
|
||||
void InstanceManager::EnableFLSAsync(const std::shared_ptr<TcpClient> &client, const NodeManager &,
|
||||
const uint64_t &request_id, const NodeInfo &node_info) {
|
||||
MS_EXCEPTION_IF_NULL(client);
|
||||
MS_EXCEPTION_IF_NULL(node_);
|
||||
|
@ -78,7 +78,7 @@ void InstanceManager::EnableFLSAsync(const std::shared_ptr<TcpClient> &client, c
|
|||
MS_LOG(INFO) << "The scheduler is sending query instance to workers and servers!";
|
||||
}
|
||||
|
||||
void InstanceManager::DisableFLSAsync(const std::shared_ptr<TcpClient> &client, const NodeManager &manager,
|
||||
void InstanceManager::DisableFLSAsync(const std::shared_ptr<TcpClient> &client, const NodeManager &,
|
||||
const uint64_t &request_id, const NodeInfo &node_info) {
|
||||
MS_EXCEPTION_IF_NULL(client);
|
||||
MS_EXCEPTION_IF_NULL(node_);
|
||||
|
|
|
@ -92,7 +92,9 @@ class Node {
|
|||
void RunMessageCallback(const uint64_t &request_id);
|
||||
|
||||
NodeInfo node_info_;
|
||||
// Whether the cluster is ready
|
||||
std::atomic<bool> is_ready_;
|
||||
// Whether the cluster is finished.
|
||||
std::atomic<bool> is_finish_;
|
||||
|
||||
std::atomic<bool> is_already_stopped_;
|
||||
|
|
|
@ -25,7 +25,7 @@ void NodeManager::InitNode() {
|
|||
meta_data_ = std::make_unique<ClusterMetadata>(PSContext::instance()->cluster_config().initial_worker_num,
|
||||
PSContext::instance()->cluster_config().initial_server_num);
|
||||
MS_EXCEPTION_IF_NULL(meta_data_);
|
||||
total_node_num_ = initial_total_node_num_;
|
||||
total_node_num_ = UintToInt(initial_total_node_num_);
|
||||
}
|
||||
|
||||
uint32_t NodeManager::NextRankId(const RegisterMessage ®ister_message, const std::shared_ptr<MessageMeta> &meta) {
|
||||
|
@ -79,7 +79,7 @@ uint32_t NodeManager::NextRankId(const RegisterMessage ®ister_message, const
|
|||
node_info.node_id_ = node_id;
|
||||
node_info.rank_id_ = rank_id;
|
||||
node_info.ip_ = ip;
|
||||
node_info.port_ = port;
|
||||
node_info.port_ = static_cast<uint16_t>(port);
|
||||
node_info.is_alive = true;
|
||||
registered_nodes_info_[node_id] = node_info;
|
||||
MS_LOG(INFO) << "The server node id:" << node_id << ",node ip: " << node_info.ip_ << ",node port:" << port
|
||||
|
@ -118,7 +118,7 @@ uint32_t NodeManager::NextRankId(const RegisterMessage ®ister_message, const
|
|||
node_info.node_id_ = node_id;
|
||||
node_info.rank_id_ = rank_id;
|
||||
node_info.ip_ = ip;
|
||||
node_info.port_ = port;
|
||||
node_info.port_ = static_cast<uint16_t>(port);
|
||||
node_info.is_alive = true;
|
||||
registered_nodes_info_[node_id] = node_info;
|
||||
MS_LOG(INFO) << "The worker node id:" << node_id << " assign rank id:" << rank_id;
|
||||
|
|
|
@ -307,7 +307,7 @@ void Worker::DoPSEmbeddingLookup(const Key &key, const std::vector<int> &lookup_
|
|||
}
|
||||
|
||||
std::vector<VectorPtr> resp;
|
||||
if (!worker_node_.Send(core::NodeRole::SERVER, rank_ids, data, sizes, cmd, &resp)) {
|
||||
if (!worker_node_.Send(core::NodeRole::SERVER, rank_ids, data, sizes, LongToInt(cmd), &resp)) {
|
||||
MS_LOG(ERROR) << "Worker send failed!";
|
||||
}
|
||||
int64_t single_id_len = SizeToLong(lookup_result->size() / lookup_ids.size());
|
||||
|
@ -391,7 +391,7 @@ void Worker::UpdateEmbeddingTable(const std::vector<Key> &keys, const std::vecto
|
|||
sizes.push_back(kv_data.length());
|
||||
}
|
||||
}
|
||||
worker_node_.Send(core::NodeRole::SERVER, rank_ids, data, sizes, kUpdateEmbeddingsCmd);
|
||||
worker_node_.Send(core::NodeRole::SERVER, rank_ids, data, sizes, LongToInt(kUpdateEmbeddingsCmd));
|
||||
}
|
||||
|
||||
void Worker::Finalize() {
|
||||
|
@ -844,11 +844,11 @@ void Worker::RoundRobinPartitioner(const KVMessage &send, PartitionKVMessages *p
|
|||
for (int i = 0; i < send.keys_size(); i++) {
|
||||
param_key = keys[i];
|
||||
int64_t server_id = key_to_server_id_[param_key];
|
||||
if (!partition->at(server_id).first) {
|
||||
partition->at(server_id).first = true;
|
||||
if (!partition->at(LongToUlong(server_id)).first) {
|
||||
partition->at(LongToUlong(server_id)).first = true;
|
||||
}
|
||||
|
||||
KVMessage &server_kv_pairs = partition->at(server_id).second;
|
||||
KVMessage &server_kv_pairs = partition->at(LongToUlong(server_id)).second;
|
||||
server_kv_pairs.add_keys(param_key);
|
||||
if (values.empty()) {
|
||||
continue;
|
||||
|
@ -865,7 +865,7 @@ void Worker::RoundRobinPartitioner(const KVMessage &send, PartitionKVMessages *p
|
|||
}
|
||||
|
||||
void Worker::WorkerInitEmbeddingPartitioner(const KVMessage &send, std::vector<std::pair<bool, KVMessage>> *partition,
|
||||
const std::map<int64_t, int64_t> &attrs) {
|
||||
const std::map<int64_t, int64_t> &) {
|
||||
MS_EXCEPTION_IF_NULL(partition);
|
||||
partition->resize(LongToSize(server_num_));
|
||||
auto keys = send.keys();
|
||||
|
@ -890,8 +890,8 @@ void Worker::UpdateEmbeddingPartitioner(const KVMessage &send, PartitionKVMessag
|
|||
MS_EXCEPTION_IF_NULL(partition);
|
||||
const float *embedding_vals = send.values().data();
|
||||
const uint64_t *lookup_ids = send.len().data();
|
||||
size_t val_size = send.values_size();
|
||||
size_t id_size = send.len_size();
|
||||
size_t val_size = IntToSize(send.values_size());
|
||||
size_t id_size = IntToSize(send.len_size());
|
||||
size_t embedding_dim = val_size / id_size;
|
||||
|
||||
const Key &key = send.keys()[0];
|
||||
|
|
Loading…
Reference in New Issue