!39730 修复静态检查告警

Merge pull request !39730 from larryzhangyong/fix-code-check
This commit is contained in:
i-robot 2022-08-04 23:33:44 +00:00 committed by Gitee
commit 65a3346daf
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
11 changed files with 24 additions and 21 deletions

View File

@ -60,7 +60,7 @@ bool CPUKernelRuntime::Init() {
}
const size_t INIT_NODE_REF = 1;
void CPUKernelRuntime::AssignKernelGraphAddress(session::KernelGraph *kernel_graph) {
void CPUKernelRuntime::AssignKernelGraphAddress(const session::KernelGraph *kernel_graph) {
AssignValueNodeAddress(kernel_graph);
AssignInputNodeAddress(kernel_graph);
auto context_ptr = MsContext::GetInstance();

View File

@ -38,7 +38,7 @@ class CPUKernelRuntime : public KernelRuntime {
bool Init();
bool Run(const session::KernelGraph &graph, bool is_task_sink) override;
void AssignKernelGraphAddress(session::KernelGraph *kernel_graph);
void AssignKernelGraphAddress(const session::KernelGraph *kernel_graph);
void CreateOutputTensors(session::KernelGraph *kernel_graph, const std::vector<tensor::TensorPtr> &inputs,
VectorRef *outputs, std::map<tensor::TensorPtr, session::KernelWithIndex> *tensor_to_node);
void BindInputOutput(session::KernelGraph *kernel_graph, const std::vector<tensor::TensorPtr> &inputs,

View File

@ -41,7 +41,7 @@ bool AllReduceLauncher::Initialize() {
const auto &cluster_ctx = distributed::cluster::ClusterContext::instance();
MS_EXCEPTION_IF_NULL(cluster_ctx);
node_role_ = cluster_ctx->node_role();
rank_size_ = IntToSize(cluster_ctx->node_num(cluster_ctx->node_role()));
rank_size_ = IntToSize(static_cast<int>(cluster_ctx->node_num(cluster_ctx->node_role())));
return true;
}

View File

@ -43,7 +43,6 @@ class CPUDeviceResManager : public DeviceResManager {
bool LoadCollectiveCommLib() override;
protected:
// Relevant function to allocate and free device memory of raw ptr.
void *AllocateMemory(size_t size) const override;
void FreeMemory(void *ptr) const override;

View File

@ -250,14 +250,18 @@ bool MsCollectiveCommLib::Broadcast(const void *send_buff, void *recv_buff, size
return CollectiveOpsImpl::GetInstance().Broadcast<char>(send_buff, recv_buff, send_count, root_rank, node_,
group_info);
case TypeId::kNumberTypeInt32:
[[fallthrough]] case TypeId::kNumberTypeInt : return CollectiveOpsImpl::GetInstance().Broadcast<int32_t>(
send_buff, recv_buff, send_count, root_rank, node_, group_info);
[[fallthrough]];
case TypeId::kNumberTypeInt:
return CollectiveOpsImpl::GetInstance().Broadcast<int32_t>(send_buff, recv_buff, send_count, root_rank, node_,
group_info);
case TypeId::kNumberTypeUInt64:
return CollectiveOpsImpl::GetInstance().Broadcast<uint64_t>(send_buff, recv_buff, send_count, root_rank, node_,
group_info);
case TypeId::kNumberTypeFloat32:
[[fallthrough]] case TypeId::kNumberTypeFloat : return CollectiveOpsImpl::GetInstance().Broadcast<float>(
send_buff, recv_buff, send_count, root_rank, node_, group_info);
[[fallthrough]];
case TypeId::kNumberTypeFloat:
return CollectiveOpsImpl::GetInstance().Broadcast<float>(send_buff, recv_buff, send_count, root_rank, node_,
group_info);
default:
return false;
}

View File

@ -86,7 +86,7 @@ bool CollectiveNode::SynchronizeAddresses() {
success = cgn_->PutMetadata(rank_id, address);
if (!success) {
MS_LOG(WARNING) << "Retry to register the address of rank " << rank_id << "...";
sleep(interval);
(void)sleep(interval);
} else {
MS_LOG(INFO) << "The address of rank " << rank_id << " has been registered successfully.";
break;
@ -116,7 +116,7 @@ bool CollectiveNode::SynchronizeAddresses() {
} else {
MS_LOG(INFO) << "Waiting for the address of rank " << other_rank_id << " to be registered, retry " << retry
<< " times.";
sleep(interval);
(void)sleep(interval);
}
MsException::Instance().CheckException();
}

View File

@ -26,7 +26,7 @@ namespace ps {
namespace core {
class BACKEND_EXPORT CollectiveNode : public PSWorkerNode {
public:
explicit CollectiveNode(std::shared_ptr<distributed::cluster::topology::ComputeGraphNode> cgn) : cgn_(cgn) {}
explicit CollectiveNode(const std::shared_ptr<distributed::cluster::topology::ComputeGraphNode> cgn) : cgn_(cgn) {}
~CollectiveNode() = default;
bool Start(const uint32_t &timeout = PSContext::instance()->cluster_config().cluster_available_timeout) override;

View File

@ -31,7 +31,7 @@ const char kCollectivePhaseBroadcast[] = "broadcast";
bool MSCollectiveOpsImpl::Initialize() {
MS_EXCEPTION_IF_NULL(topo_node_);
rank_id_ = topo_node_->rank_id();
rank_id_ = static_cast<uint32_t>(topo_node_->rank_id());
return true;
}
@ -83,7 +83,7 @@ bool MSCollectiveOpsImpl::RingAllGatherImpl(uint32_t send_to_rank, uint32_t recv
size_t send_chunk_index = (rank_id_ - i + rank_size_) % rank_size_;
T *send_chunk = output_buff + chunk_offset[send_chunk_index];
topo_node_->SendAsync(send_to_rank, send_chunk, chunk_sizes[send_chunk_index] * sizeof(T));
(void)topo_node_->SendAsync(send_to_rank, send_chunk, chunk_sizes[send_chunk_index] * sizeof(T));
size_t recv_chunk_index = (rank_id_ - i - 1 + rank_size_) % rank_size_;
T *recv_chunk = output_buff + chunk_offset[recv_chunk_index];
@ -125,7 +125,7 @@ bool MSCollectiveOpsImpl::Broadcast(const void *sendbuff, void *recvbuff, size_t
MS_ERROR_IF_NULL_W_RET_VAL(sendbuff, false);
// Initialize collective communication parameters.
rank_id_ = topo_node_->rank_id();
rank_id_ = static_cast<uint32_t>(topo_node_->rank_id());
rank_size_ = group_info.size;
if (rank_size_ == 0) {
MS_LOG(ERROR) << "Rank size should not be 0.";
@ -151,7 +151,7 @@ bool MSCollectiveOpsImpl::Broadcast(const void *sendbuff, void *recvbuff, size_t
uint32_t dst_rank = group_to_global_ranks[i];
MS_LOG(DEBUG) << "Broadcast data to process " << dst_rank;
topo_node_->SendAsync(dst_rank, const_cast<void *>(sendbuff), count * sizeof(T));
(void)topo_node_->SendAsync(dst_rank, const_cast<void *>(sendbuff), count * sizeof(T));
if (!topo_node_->WaitForSend(dst_rank)) {
MS_LOG(ERROR) << "Failed to send data to rank: " << dst_rank;
return false;
@ -185,8 +185,8 @@ bool MSCollectiveOpsImpl::AllGather(const void *sendbuff, void *recvbuff, size_t
MS_ERROR_IF_NULL_W_RET_VAL(sendbuff, false);
// Initialize collective communication parameters.
rank_id_ = topo_node_->rank_id();
rank_size_ = topo_node_->rank_size();
rank_id_ = static_cast<uint32_t>(topo_node_->rank_id());
rank_size_ = static_cast<uint32_t>(topo_node_->rank_size());
if (rank_size_ == 0) {
MS_LOG(ERROR) << "Rank size should not be 0.";
return false;

View File

@ -53,7 +53,7 @@ struct CommunicationGroupInfo {
// supported for the elastic scaling feature of the server.
class MSCollectiveOpsImpl {
public:
explicit MSCollectiveOpsImpl(std::shared_ptr<TopologyNode> topo_node)
explicit MSCollectiveOpsImpl(const std::shared_ptr<TopologyNode> topo_node)
: rank_id_(0), rank_size_(0), topo_node_(topo_node) {}
~MSCollectiveOpsImpl() = default;

View File

@ -34,7 +34,7 @@ namespace device {
namespace cpu {
class TopologyNode {
public:
TopologyNode(size_t total_node_num, std::shared_ptr<distributed::cluster::topology::ComputeGraphNode> cgn)
TopologyNode(size_t total_node_num, const std::shared_ptr<distributed::cluster::topology::ComputeGraphNode> cgn)
: rank_id_(-1), total_node_num_(total_node_num), cgn_(cgn), initialized_(false) {}
~TopologyNode() = default;

View File

@ -182,7 +182,7 @@ class GraphExecutor {
}
protected:
DeviceContext *device_context_;
DeviceContext *device_context_{nullptr};
private:
template <class... Args>
@ -212,7 +212,7 @@ class KernelExecutor {
}
protected:
DeviceContext *device_context_;
DeviceContext *device_context_{nullptr};
private:
template <class... Args>