diff --git a/mindspore/lite/src/common/ops/populate/all_gather.cc b/mindspore/lite/src/common/ops/populate/all_gather.cc index cbbb37f570c..ea8c1f1e8b1 100644 --- a/mindspore/lite/src/common/ops/populate/all_gather.cc +++ b/mindspore/lite/src/common/ops/populate/all_gather.cc @@ -45,8 +45,8 @@ OpParameter *PopulateAllGatherParameter(const void *prim) { MS_LOG(ERROR) << "Malloc AllGatherParameter failed."; return nullptr; } - memset(param, 0, sizeof(AllGatherParameter)); - memcpy(param->group_, value->group()->c_str(), value->group()->size()); + (void)memset(param, 0, sizeof(AllGatherParameter)); + (void)memcpy(param->group_, value->group()->c_str(), value->group()->size()); param->rank_size_ = value->rank_size(); param->op_parameter_.type_ = primitive->value_type(); diff --git a/mindspore/lite/src/common/ops/populate/random_standard_normal_populate.cc b/mindspore/lite/src/common/ops/populate/random_standard_normal_populate.cc index 8d5b8fef699..f432ae45af3 100644 --- a/mindspore/lite/src/common/ops/populate/random_standard_normal_populate.cc +++ b/mindspore/lite/src/common/ops/populate/random_standard_normal_populate.cc @@ -35,7 +35,7 @@ OpParameter *PopulateRandomStandardNormalParameter(const void *prim) { MS_LOG(ERROR) << "malloc RandomParam failed."; return nullptr; } - memset(param, 0, sizeof(RandomNormalParam)); + (void)memset(param, 0, sizeof(RandomNormalParam)); param->op_parameter_.type_ = primitive->value_type(); if (value->seed2() != 0) { diff --git a/mindspore/lite/src/common/ops/populate/reduce_scatter.cc b/mindspore/lite/src/common/ops/populate/reduce_scatter.cc index 5e666f1c302..1e02e6e8c0d 100644 --- a/mindspore/lite/src/common/ops/populate/reduce_scatter.cc +++ b/mindspore/lite/src/common/ops/populate/reduce_scatter.cc @@ -46,8 +46,8 @@ OpParameter *PopulateReduceScatterParameter(const void *prim) { MS_LOG(ERROR) << "Malloc ReduceScatterParameter failed."; return nullptr; } - memset(param, 0, sizeof(ReduceScatterParameter)); - memcpy(param->group_, value->group()->c_str(), value->group()->size()); + (void)memset(param, 0, sizeof(ReduceScatterParameter)); + (void)memcpy(param->group_, value->group()->c_str(), value->group()->size()); param->rank_size_ = value->rank_size(); param->mode_ = value->mode(); param->op_parameter_.type_ = primitive->value_type(); diff --git a/mindspore/lite/src/control_flow/actor/entrance_actor.cc b/mindspore/lite/src/control_flow/actor/entrance_actor.cc index 6af934a6191..2370266ea91 100644 --- a/mindspore/lite/src/control_flow/actor/entrance_actor.cc +++ b/mindspore/lite/src/control_flow/actor/entrance_actor.cc @@ -40,7 +40,7 @@ void LiteEntranceOpActor::RunOpData(OpData *inputs, OpContext *c inputs_data_[item->index_] = item->data_; } - InitInputData(); + (void)InitInputData(); input_actor_id_data_[inputs->op_id_].clear(); AsyncOutput(context); SetOutputData(context); diff --git a/mindspore/lite/src/control_flow/actor/exit_actor.cc b/mindspore/lite/src/control_flow/actor/exit_actor.cc index 816dbceacd3..9d2a37579b3 100644 --- a/mindspore/lite/src/control_flow/actor/exit_actor.cc +++ b/mindspore/lite/src/control_flow/actor/exit_actor.cc @@ -34,7 +34,7 @@ void LiteExitOpActor::RunOpData(OpData *inputs, OpContext *conte } auto ret = InitInputData(); - input_op_datas_.erase(op_uuid); + (void)input_op_datas_.erase(op_uuid); if (ret != RET_OK) { context->SetFailed(ret); return; @@ -72,7 +72,7 @@ int LiteExitOpActor::SetInputShape() { return RET_OK; } -void LiteExitOpActor::SetEntranceInputAID(OpData *inputs) { +void LiteExitOpActor::SetEntranceInputAID(const OpData *inputs) { if (inputs->index_ == kEntranceTensorIndex) { entrance_input_aid_ = inputs->op_id_; } @@ -185,7 +185,7 @@ int LiteExitOpActor::CreateMappingInfo() { return RET_ERROR; } MappingInfo info(partial, call_node); - all_mapping_info_.emplace_back(info); + (void)all_mapping_info_.emplace_back(info); } return RET_OK; } diff --git a/mindspore/lite/src/control_flow/actor/exit_actor.h b/mindspore/lite/src/control_flow/actor/exit_actor.h index d507bbe0b6e..591090cdda9 100644 --- a/mindspore/lite/src/control_flow/actor/exit_actor.h +++ b/mindspore/lite/src/control_flow/actor/exit_actor.h @@ -49,7 +49,7 @@ class LiteExitOpActor : public LiteOpActor { int CreateMappingInfo(); int RecordCallNodeOutputActor(std::vector> *actors); void RecordPartialNodeInputActor(); - void SetEntranceInputAID(OpData *inputs); + void SetEntranceInputAID(const OpData *inputs); bool IsSubSet(const std::vector &all_set, const std::vector &sub_set); std::vector> *actors_{}; diff --git a/mindspore/lite/src/control_flow/actor/switch_actor.cc b/mindspore/lite/src/control_flow/actor/switch_actor.cc index d3774b2a3e7..24a215858f8 100644 --- a/mindspore/lite/src/control_flow/actor/switch_actor.cc +++ b/mindspore/lite/src/control_flow/actor/switch_actor.cc @@ -145,7 +145,11 @@ int LiteSwitchOpActor::CompileArrow(const std::unordered_mapout_tensors().empty()) { - CompileArrowThroughOutputTensors(receivers_map); + ret = CompileArrowThroughOutputTensors(receivers_map); + if (ret != RET_OK) { + MS_LOG(ERROR) << "CompileArrowThroughOutputTensors failed."; + return ret; + } } AppendOutputTensors(); @@ -282,7 +286,7 @@ void LiteSwitchOpActor::RunOpData(OpData *inputs, OpContext *con auto ret = InitInputData(); if (ret != RET_OK) { - input_op_datas_.erase(op_uuid); + (void)input_op_datas_.erase(op_uuid); context->SetFailed(ret); return; } diff --git a/mindspore/lite/src/control_flow/actor/switch_actor.h b/mindspore/lite/src/control_flow/actor/switch_actor.h index c4408c315b5..59b966d7d63 100644 --- a/mindspore/lite/src/control_flow/actor/switch_actor.h +++ b/mindspore/lite/src/control_flow/actor/switch_actor.h @@ -37,17 +37,17 @@ class LiteSwitchOpActor : public LiteOpActor { }; void RunOpData(OpData *inputs, OpContext *context = nullptr) override; int CompileArrow(const std::unordered_map>> &receivers_map) override; - int PrepareOutputData() override; std::set GetPartialKernels() const override { std::set ret{}; for (auto &item : partial_nodes_) { - ret.insert(item); + (void)ret.insert(item); } return ret; } protected: int UpdateActorOutput() override; + int PrepareOutputData() override; private: STATUS AsyncBranchOutput(const size_t &index, OpContext *context); diff --git a/mindspore/lite/src/control_flow/control_flow_scheduler.cc b/mindspore/lite/src/control_flow/control_flow_scheduler.cc index 72c7f8c1bb4..ddc153e43c6 100644 --- a/mindspore/lite/src/control_flow/control_flow_scheduler.cc +++ b/mindspore/lite/src/control_flow/control_flow_scheduler.cc @@ -111,7 +111,7 @@ std::set ControlFlowScheduler::GetNonTailCallSubGraphs( if (!kernel::KernelExecUtil::IsNonTailCallSubGraph(subgraph_kernel)) { continue; } - non_tail_subgraph_kernels.insert(kernel); + (void)non_tail_subgraph_kernels.insert(kernel); } return non_tail_subgraph_kernels; } @@ -135,7 +135,7 @@ int ControlFlowScheduler::AdjustNodesForTailCallSubGraph(std::vectorbegin(), first_part_nodes->end(), is_need); MS_CHECK_TRUE_MSG(iter != first_part_nodes->end(), RET_ERROR, "graph is not right"); second_part_nodes->insert(second_part_nodes->begin(), *iter); @@ -225,7 +225,7 @@ void ControlFlowScheduler::AppendToProcessQ(std::vector *n for (auto &item : new_non_tail_call_subgraphs) { if (all_non_tail_subgraphs->find(item) == all_non_tail_subgraphs->end()) { to_process_q_.push(item); - all_non_tail_subgraphs->insert(item); + (void)all_non_tail_subgraphs->insert(item); } } return; @@ -287,7 +287,7 @@ int ControlFlowScheduler::RecordAllNonTailCallLinkInfo(std::vector tmp_set{partial_node}; - more_than_once_called_partial_nodes_.insert( + (void)more_than_once_called_partial_nodes_.insert( std::pair>{subgraph_index, tmp_set}); } else { more_than_once_called_partial_nodes_[subgraph_index].insert(partial_node); @@ -344,7 +344,7 @@ kernel::SubGraphKernel *ControlFlowScheduler::CreateExitSubGraph(kernel::SubGrap } src_tensors_->push_back(new_tensor); new_output_tensors.push_back(new_tensor); - kernel::KernelExecUtil::ReplaceSubGraphNodesOutTensor(subgraph, old_tensor, new_tensor); + (void)kernel::KernelExecUtil::ReplaceSubGraphNodesOutTensor(subgraph, old_tensor, new_tensor); subgraph->set_out_tensor(new_tensor, i); } auto exit_subgraph = kernel::KernelExecUtil::CreateSubGraphKernel({}, &new_output_tensors, &old_output_tensors, @@ -374,11 +374,12 @@ kernel::SubGraphKernel *ControlFlowScheduler::AddOutputKernel(kernel::SubGraphKe } src_tensors_->push_back(new_tensor); new_output_tensors.push_back(new_tensor); - kernel::KernelExecUtil::ReplaceSubGraphNodesOutTensor(subgraph, old_tensor, new_tensor); + (void)kernel::KernelExecUtil::ReplaceSubGraphNodesOutTensor(subgraph, old_tensor, new_tensor); call_node->set_out_tensor(new_tensor, i); context_->ReplaceLinkInfoReceiverWithNewOne(new_tensor, old_tensor); } auto output_node = kernel::IdentityKernel::Create(new_output_tensors, old_output_tensors, this->context_); + MS_CHECK_FALSE_MSG(output_node == nullptr, nullptr, "Create Identity failed."); output_node->set_name(call_node->name() + "_output"); kernel::KernelKey output_desc = call_node->desc(); output_desc.type = PrimType_Inner_Identity; @@ -546,7 +547,7 @@ int ControlFlowScheduler::RecordTailCallLinkInfo(kernel::KernelExec *tail_call) return ret; } - if (std::any_of(final_graphs.begin(), final_graphs.end(), [&tail_call](kernel::KernelExec *item) { + if (std::any_of(final_graphs.begin(), final_graphs.end(), [&tail_call](const kernel::KernelExec *item) { return item->out_tensors().size() != tail_call->out_tensors().size(); })) { MS_LOG(DEBUG) << "not is mindir model, return ok."; @@ -603,6 +604,7 @@ kernel::SubGraphKernel *ControlFlowScheduler::IsolatePartialInputs(kernel::SubGr new_partial_inputs.push_back(new_tensor); } auto identity_node = kernel::IdentityKernel::Create(old_partial_inputs, new_partial_inputs, this->context_); + MS_CHECK_TRUE_MSG(identity_node != nullptr, nullptr, "Create Identity kernel failed."); identity_node->set_name(partial->name() + "_input_identity"); kernel::KernelKey identity_desc = partial->desc(); identity_desc.type = PrimType_Inner_Identity; @@ -617,7 +619,7 @@ kernel::SubGraphKernel *ControlFlowScheduler::IsolatePartialInputs(kernel::SubGr identity_node->AddOutKernel(partial); partial->set_in_kernels({identity_node}); auto partial_iter = std::find(nodes.begin(), nodes.end(), partial); - nodes.insert(partial_iter, identity_node); + (void)nodes.insert(partial_iter, identity_node); auto subgraph_type = subgraph->subgraph_type(); auto new_subgraph = kernel::KernelExecUtil::CreateSubGraphKernel(nodes, &inputs, &outputs, subgraph_type, *context_, schema_version_); @@ -715,7 +717,7 @@ int ControlFlowScheduler::IsolateInputOfMultipleCalledGraph(std::vector *src_tensors) + ControlFlowScheduler(InnerContext *ctx, const mindspore::Context *, std::vector *src_tensors) : context_(ctx), src_tensors_(src_tensors) {} ~ControlFlowScheduler() = default; int Schedule(std::vector *dst_kernels); diff --git a/mindspore/lite/src/control_flow/kernel/exit_subgraph_kernel.cc b/mindspore/lite/src/control_flow/kernel/exit_subgraph_kernel.cc index ba438cd7dac..9ef5ce2edbc 100644 --- a/mindspore/lite/src/control_flow/kernel/exit_subgraph_kernel.cc +++ b/mindspore/lite/src/control_flow/kernel/exit_subgraph_kernel.cc @@ -18,7 +18,7 @@ #include "src/tensor.h" namespace mindspore::kernel { -int ExitSubGraphKernel::Execute(const KernelCallBack &before, const KernelCallBack &after) { return lite::RET_OK; } +int ExitSubGraphKernel::Execute(const KernelCallBack &, const KernelCallBack &) { return lite::RET_OK; } SubGraphKernel *ExitSubGraphKernel::Create(Kernel *kernel) { auto sub_kernel = new kernel::ExitSubGraphKernel(kernel); @@ -29,5 +29,5 @@ SubGraphKernel *ExitSubGraphKernel::Create(Kernel *kernel) { return sub_kernel; } -void ExitSubGraphKernel::SetPartial(kernel::KernelExec *partial_node) { partials_.insert(partial_node); } +void ExitSubGraphKernel::SetPartial(kernel::KernelExec *partial_node) { (void)partials_.insert(partial_node); } } // namespace mindspore::kernel diff --git a/mindspore/lite/src/litert/kernel/cpu/base/reduce_base.cc b/mindspore/lite/src/litert/kernel/cpu/base/reduce_base.cc index fad32bb5f39..f187a09f736 100644 --- a/mindspore/lite/src/litert/kernel/cpu/base/reduce_base.cc +++ b/mindspore/lite/src/litert/kernel/cpu/base/reduce_base.cc @@ -119,11 +119,11 @@ int ReduceBaseCPUKernel::Prepare() { } } else { MS_CHECK_FALSE(axes_tensor->Size() == 0, RET_ERROR); - memcpy(axes_, axes_tensor->data(), axes_tensor->Size()); + (void)memcpy(axes_, axes_tensor->data(), axes_tensor->Size()); } } else { num_axes_ = reduce_param->num_axes_; - memcpy(axes_, reduce_param->axes_, sizeof(reduce_param->axes_)); + (void)memcpy(axes_, reduce_param->axes_, sizeof(reduce_param->axes_)); } mode_ = reduce_param->mode_; diff --git a/mindspore/lite/src/litert/kernel/cpu/base/select.cc b/mindspore/lite/src/litert/kernel/cpu/base/select.cc index f3ba5c0fbf1..6c84d7dab0f 100644 --- a/mindspore/lite/src/litert/kernel/cpu/base/select.cc +++ b/mindspore/lite/src/litert/kernel/cpu/base/select.cc @@ -65,8 +65,7 @@ int MoveTensorListData(lite::TensorList *dst_tensorlist, lite::TensorList *src_t MS_LOG(ERROR) << "CopyTensorListTensorDataType failed."; return ret; } - lite::MoveTensorListTensorData(dst_tensorlist, src_tensorlist); - return RET_OK; + return lite::MoveTensorListTensorData(dst_tensorlist, src_tensorlist); } int MoveData(const std::vector::iterator &dst_begin, diff --git a/mindspore/lite/src/litert/kernel_exec_util.cc b/mindspore/lite/src/litert/kernel_exec_util.cc index 9b5db45524c..43984a52c84 100644 --- a/mindspore/lite/src/litert/kernel_exec_util.cc +++ b/mindspore/lite/src/litert/kernel_exec_util.cc @@ -36,7 +36,7 @@ std::set KernelExecUtil::AllOutTensor(const std::vector all_out_tensors{}; for (const auto &kernel_in_subgraph : kernels) { for (auto *tensor : kernel_in_subgraph->out_tensors()) { - all_out_tensors.insert(tensor); + (void)all_out_tensors.insert(tensor); } } return all_out_tensors; @@ -260,7 +260,7 @@ void KernelExecUtil::FindAllInoutKernelsInSubgraphKernel(const std::vector(kernel); MS_ASSERT(sub_graph != nullptr); auto kernel_in_subgraph = sub_graph->nodes(); - all_kernels.insert(all_kernels.end(), kernel_in_subgraph.begin(), kernel_in_subgraph.end()); + (void)all_kernels.insert(all_kernels.end(), kernel_in_subgraph.begin(), kernel_in_subgraph.end()); } KernelExecUtil::FindAllInoutKernels(all_kernels); @@ -311,7 +311,7 @@ int KernelExecUtil::SetKernelTensorDataType(const kernel::KernelExec *kernel) { return RET_OK; } -bool KernelExecUtil::IsOutputSubGraph(KernelExec *subgraph_kernel) { +bool KernelExecUtil::IsOutputSubGraph(const KernelExec *subgraph_kernel) { return !subgraph_kernel->out_tensors().empty() && std::all_of(subgraph_kernel->out_tensors().begin(), subgraph_kernel->out_tensors().end(), [](lite::Tensor *tensor) { return tensor->IsGraphOutput(); }); @@ -456,7 +456,7 @@ SubGraphKernel *KernelExecUtil::BelongToWhichSubGraph(const std::vectornodes().begin(), subgraph->nodes().end(), - [&kernel](KernelExec *node) { return node == kernel; })) { + [&kernel](const KernelExec *node) { return node == kernel; })) { return subgraph; } } @@ -483,12 +483,12 @@ bool KernelExecUtil::IsSwitchTypeCall(KernelExec *kernel) { return false; } -bool KernelExecUtil::IsNonTailCall(KernelExec *node) { +bool KernelExecUtil::IsNonTailCall(const KernelExec *node) { return node->type() == schema::PrimitiveType_Call && !(reinterpret_cast(node->op_parameter())->is_tail_call); } -bool KernelExecUtil::IsTailCall(KernelExec *node) { +bool KernelExecUtil::IsTailCall(const KernelExec *node) { return node->type() == schema::PrimitiveType_Call && (reinterpret_cast(node->op_parameter())->is_tail_call); } @@ -574,7 +574,7 @@ std::vector KernelExecUtil::GetCallInputPartialsCorrespondingOutpu return all_subgraphs; } -KernelExec *KernelExecUtil::GetPartialOutputCall(KernelExec *partial_node) { +KernelExec *KernelExecUtil::GetPartialOutputCall(const KernelExec *partial_node) { if (partial_node->type() != schema::PrimitiveType_PartialFusion) { MS_LOG(ERROR) << "input node is not partial node."; return nullptr; @@ -620,9 +620,9 @@ KernelExec *KernelExecUtil::GetPartialOutputCall(KernelExec *partial_node) { bool KernelExecUtil::IsSwitchTypeCall(KernelExec *kernel) { return false; } -bool KernelExecUtil::IsNonTailCall(KernelExec *node) { return false; } +bool KernelExecUtil::IsNonTailCall(const KernelExec *node) { return false; } -bool KernelExecUtil::IsTailCall(KernelExec *node) { return false; } +bool KernelExecUtil::IsTailCall(const KernelExec *node) { return false; } bool KernelExecUtil::IsNonTailCallSubGraph(KernelExec *kernel) { return false; } @@ -634,7 +634,7 @@ std::vector KernelExecUtil::GetCallInputPartialsCorrespondingOutpu return {}; } -KernelExec *KernelExecUtil::GetPartialOutputCall(KernelExec *partial_node) { return nullptr; } +KernelExec *KernelExecUtil::GetPartialOutputCall(const KernelExec *partial_node) { return nullptr; } #endif } // namespace mindspore::kernel diff --git a/mindspore/lite/src/litert/kernel_exec_util.h b/mindspore/lite/src/litert/kernel_exec_util.h index 7d7d02dcf44..ed2f94bd2ed 100644 --- a/mindspore/lite/src/litert/kernel_exec_util.h +++ b/mindspore/lite/src/litert/kernel_exec_util.h @@ -33,10 +33,10 @@ class KernelExecUtil { static int TopologicalSortKernels(std::vector *kernels); static void InitTensorInitRefCount(const std::vector &kernels); static bool IsSwitchTypeCall(KernelExec *kernel); - static bool IsNonTailCall(KernelExec *node); - static bool IsTailCall(KernelExec *node); + static bool IsNonTailCall(const KernelExec *node); + static bool IsTailCall(const KernelExec *node); static std::vector GetCallInputPartials(KernelExec *call_node); - static KernelExec *GetPartialOutputCall(KernelExec *partial_node); + static KernelExec *GetPartialOutputCall(const KernelExec *partial_node); static bool IsNonTailCallSubGraph(KernelExec *kernel); static bool IsTailCallSubGraph(KernelExec *kernel); static std::vector GetCallInputPartialsCorrespondingOutputSubgraph(KernelExec *call_node); @@ -55,7 +55,7 @@ class KernelExecUtil { static int ReplaceSubGraphNodesInTensor(KernelExec *kernel, const lite::Tensor *old_tensor, lite::Tensor *new_tensor); static int ReplaceSubGraphNodesOutTensor(KernelExec *kernel, const lite::Tensor *old_tensor, lite::Tensor *new_tensor); - static bool IsOutputSubGraph(KernelExec *subgraph_kernel); + static bool IsOutputSubGraph(const KernelExec *subgraph_kernel); static SubGraphKernel *BelongToWhichSubGraph(const std::vector &subgraphs, KernelExec *kernel); private: diff --git a/mindspore/lite/src/litert/lite_mindrt.cc b/mindspore/lite/src/litert/lite_mindrt.cc index 02804f4a797..79ebd7a46b2 100644 --- a/mindspore/lite/src/litert/lite_mindrt.cc +++ b/mindspore/lite/src/litert/lite_mindrt.cc @@ -221,8 +221,8 @@ int LiteOpActor::UpdateActorOutput() { return RET_ERROR; } partial_node_ = partial_nodes.front(); - std::copy(partial_node_->in_tensors().begin(), partial_node_->in_tensors().end(), - std::back_inserter(origin_output_tensors)); + (void)std::copy(partial_node_->in_tensors().begin(), partial_node_->in_tensors().end(), + std::back_inserter(origin_output_tensors)); kernel_->set_out_tensors(origin_output_tensors); @@ -381,7 +381,7 @@ int LiteOpActor::InitInputData() { auto subgraph_kernel = reinterpret_cast(kernel_); ret = subgraph_kernel->ReSize(); MS_CHECK_FALSE_MSG(ret != RET_OK, ret, "Subgraph kernel Resize failed."); - subgraph_kernel->MallocNodesOutputSpace(); + ret = subgraph_kernel->MallocNodesOutputSpace(); MS_CHECK_FALSE_MSG(ret != RET_OK, ret, "Subgraph kernel MallocSubgraphInputs failed."); } return RET_OK; diff --git a/mindspore/lite/src/litert/lite_mindrt.h b/mindspore/lite/src/litert/lite_mindrt.h index 71334e63bed..9d1f625899b 100644 --- a/mindspore/lite/src/litert/lite_mindrt.h +++ b/mindspore/lite/src/litert/lite_mindrt.h @@ -65,7 +65,7 @@ class LiteOpActor : public OpActor { public: void AddResultIndex(size_t index); - const kernel::KernelExec *GetKernel() { return kernel_; } + const kernel::KernelExec *GetKernel() const { return kernel_; } // call this function after CompileArrow virtual std::set GetPartialKernels() const { if (partial_node_ == nullptr) { diff --git a/mindspore/lite/src/litert/lite_session.cc b/mindspore/lite/src/litert/lite_session.cc index b95af662e85..1d67a04ba84 100644 --- a/mindspore/lite/src/litert/lite_session.cc +++ b/mindspore/lite/src/litert/lite_session.cc @@ -682,13 +682,12 @@ int LiteSession::SetTensorInitRefCount(const Model *model) { } if (!non_tail_call_kernels_.empty()) { - return SetNonTaiCallSubgraphOutputInitRefCount(non_tail_call_kernels_); + return SetNonTaiCallSubgraphOutputInitRefCount(); } return RET_OK; } -int LiteSession::SetNonTaiCallSubgraphOutputInitRefCount( - const std::vector &non_tail_call_kernels) { +int LiteSession::SetNonTaiCallSubgraphOutputInitRefCount() { for (auto call_kernel : non_tail_call_kernels_) { auto call_output = call_kernel->out_tensors(); auto all_out_subgraphs = kernel::KernelExecUtil::GetCallInputPartialsCorrespondingOutputSubgraph(call_kernel); diff --git a/mindspore/lite/src/litert/lite_session.h b/mindspore/lite/src/litert/lite_session.h index 3b1c9d66f98..caaea953751 100644 --- a/mindspore/lite/src/litert/lite_session.h +++ b/mindspore/lite/src/litert/lite_session.h @@ -139,7 +139,7 @@ class LiteSession { int SetAllocatorForDelegateKernels(const kernel::KernelExec *kernel); int PrepareKernels(const Model *model); int SetTensorInitRefCount(const Model *model); - int SetNonTaiCallSubgraphOutputInitRefCount(const std::vector &non_tail_call_kernels); + int SetNonTaiCallSubgraphOutputInitRefCount(); static int ReSizeKernels( const std::vector &kernels, const std::unordered_map &isolate_input_map = std::unordered_map()); diff --git a/mindspore/lite/src/litert/mindrt_executor.cc b/mindspore/lite/src/litert/mindrt_executor.cc index f5dc417505b..33b6b8e964b 100644 --- a/mindspore/lite/src/litert/mindrt_executor.cc +++ b/mindspore/lite/src/litert/mindrt_executor.cc @@ -87,7 +87,7 @@ int MindrtExecutor::PrepareGraphInput(const std::vector &k MS_LOG(ERROR) << "new opdata failed."; return RET_NULL_PTR; } - input_data_.emplace_back(data); + (void)input_data_.emplace_back(data); } } return RET_OK; @@ -124,7 +124,7 @@ int MindrtExecutor::PrepareGraphOutput(const std::vector & return RET_NULL_PTR; } op_actors_[j]->AddResultIndex(output_data_.size()); - output_data_.emplace_back(data); + (void)output_data_.emplace_back(data); } } } diff --git a/mindspore/lite/src/litert/scheduler.cc b/mindspore/lite/src/litert/scheduler.cc index 6c7605a7ac9..b6b3719fa39 100644 --- a/mindspore/lite/src/litert/scheduler.cc +++ b/mindspore/lite/src/litert/scheduler.cc @@ -16,6 +16,7 @@ #include "src/litert/scheduler.h" #include +#include #include #include #include @@ -528,7 +529,7 @@ int Scheduler::Schedule(std::vector *dst_kernels) { return RET_OK; } -int Scheduler::CheckInputParam(std::vector *dst_kernels) { +int Scheduler::CheckInputParam(const std::vector *dst_kernels) const { if (dst_kernels == nullptr) { return RET_ERROR; } diff --git a/mindspore/lite/src/litert/scheduler.h b/mindspore/lite/src/litert/scheduler.h index 22c1dc554ea..a1d0465f835 100644 --- a/mindspore/lite/src/litert/scheduler.h +++ b/mindspore/lite/src/litert/scheduler.h @@ -64,7 +64,7 @@ class Scheduler { private: bool CheckRunNCXPass(); int SchedulePreProcess(); - int CheckInputParam(std::vector *dst_kernels); + int CheckInputParam(const std::vector *dst_kernels) const; void FindNodeInoutTensors(const LiteGraph::Node &node, std::vector *inputs, std::vector *outputs); LiteGraph::Node *NodeInputIsPartial(const LiteGraph::Node *node); int InferPartialShape(const LiteGraph::Node *node); diff --git a/mindspore/lite/src/tensorlist.cc b/mindspore/lite/src/tensorlist.cc index a3088833cc4..d31e2595418 100644 --- a/mindspore/lite/src/tensorlist.cc +++ b/mindspore/lite/src/tensorlist.cc @@ -286,8 +286,8 @@ TensorList *TensorList::CopyTensorList(const TensorList &src, bool copy_data, Al result->set_tensor_name(src.tensor_name() + "_duplicate"); auto src_tensor_dtype = src.tensors_data_type_; std::vector > tensor_shape{}; - std::transform(src.tensors_.begin(), src.tensors_.end(), std::back_inserter(tensor_shape), - [](const Tensor *tensor_item) { return tensor_item->shape(); }); + (void)std::transform(src.tensors_.begin(), src.tensors_.end(), std::back_inserter(tensor_shape), + [](const Tensor *tensor_item) { return tensor_item->shape(); }); for (LiteQuantParam quant : src.quant_params()) { result->AddQuantParam(quant); diff --git a/mindspore/lite/test/config_level0/cropped_size.cfg b/mindspore/lite/test/config_level0/cropped_size.cfg index 9e9224c5ab9..befa5293668 100644 --- a/mindspore/lite/test/config_level0/cropped_size.cfg +++ b/mindspore/lite/test/config_level0/cropped_size.cfg @@ -1,2 +1,2 @@ Note: This is the mindspore Lite inference framework size threshold. Offline review is required before modify this value!!! -1100096 +1106192 diff --git a/mindspore/lite/test/config_level1/cropped_size.cfg b/mindspore/lite/test/config_level1/cropped_size.cfg index 122131dd8a0..8e9efff7cbc 100644 --- a/mindspore/lite/test/config_level1/cropped_size.cfg +++ b/mindspore/lite/test/config_level1/cropped_size.cfg @@ -1,2 +1,2 @@ Note: This is the mindspore Lite inference framework size threshold. Modifying this threshold requires meeting review. -1100096 +1106192 diff --git a/mindspore/lite/tools/benchmark/benchmark_base.cc b/mindspore/lite/tools/benchmark/benchmark_base.cc index b708562f340..a43283c63da 100644 --- a/mindspore/lite/tools/benchmark/benchmark_base.cc +++ b/mindspore/lite/tools/benchmark/benchmark_base.cc @@ -333,7 +333,7 @@ int BenchmarkBase::CheckDeviceTypeValid() { return RET_OK; } -int BenchmarkBase::InitDumpConfigFromJson(char *path) { +int BenchmarkBase::InitDumpConfigFromJson(const char *path) { #ifndef BENCHMARK_CLIP_JSON auto real_path = RealPath(path); std::ifstream ifs(real_path); diff --git a/mindspore/lite/tools/benchmark/benchmark_base.h b/mindspore/lite/tools/benchmark/benchmark_base.h index 48259058402..1ae22a5a807 100644 --- a/mindspore/lite/tools/benchmark/benchmark_base.h +++ b/mindspore/lite/tools/benchmark/benchmark_base.h @@ -241,7 +241,7 @@ class MS_API BenchmarkBase { int CompareStringData(const std::string &name, const std::vector &calib_strings, const std::vector &output_strings); - int InitDumpConfigFromJson(char *path); + int InitDumpConfigFromJson(const char *path); int InitCallbackParameter(); @@ -450,7 +450,7 @@ class MS_API BenchmarkBase { MS_ASSERT(data != nullptr); size_t elements_num = size / sizeof(T); (void)std::generate_n(static_cast(data), elements_num, - [&]() { return static_cast(distribution(random_engine_)); }); + [&, this]() { return static_cast(distribution(random_engine_)); }); } bool CheckShapeValid(const std::vector &calib_output_shape, const std::vector &real_output_shape) { diff --git a/mindspore/lite/tools/benchmark/benchmark_unified_api.cc b/mindspore/lite/tools/benchmark/benchmark_unified_api.cc index 31169d13665..398a476bbbf 100644 --- a/mindspore/lite/tools/benchmark/benchmark_unified_api.cc +++ b/mindspore/lite/tools/benchmark/benchmark_unified_api.cc @@ -862,27 +862,27 @@ int BenchmarkUnifiedApi::MarkPerformance() { if (flags_->time_profiling_) { const std::vector per_op_name = {"opName", "avg(ms)", "percent", "calledTimes", "opTotalTime"}; const std::vector per_op_type = {"opType", "avg(ms)", "percent", "calledTimes", "opTotalTime"}; - PrintResult(per_op_name, op_times_by_name_); - PrintResult(per_op_type, op_times_by_type_); + (void)PrintResult(per_op_name, op_times_by_name_); + (void)PrintResult(per_op_type, op_times_by_type_); #ifdef ENABLE_ARM64 } else if (flags_->perf_profiling_) { if (flags_->perf_event_ == "CACHE") { const std::vector per_op_name = {"opName", "cache ref(k)", "cache ref(%)", "miss(k)", "miss(%)"}; const std::vector per_op_type = {"opType", "cache ref(k)", "cache ref(%)", "miss(k)", "miss(%)"}; - PrintPerfResult(per_op_name, op_perf_by_name_); - PrintPerfResult(per_op_type, op_perf_by_type_); + (void)PrintPerfResult(per_op_name, op_perf_by_name_); + (void)PrintPerfResult(per_op_type, op_perf_by_type_); } else if (flags_->perf_event_ == "STALL") { const std::vector per_op_name = {"opName", "frontend(k)", "frontend(%)", "backendend(k)", "backendend(%)"}; const std::vector per_op_type = {"opType", "frontend(k)", "frontend(%)", "backendend(k)", "backendend(%)"}; - PrintPerfResult(per_op_name, op_perf_by_name_); - PrintPerfResult(per_op_type, op_perf_by_type_); + (void)PrintPerfResult(per_op_name, op_perf_by_name_); + (void)PrintPerfResult(per_op_type, op_perf_by_type_); } else { const std::vector per_op_name = {"opName", "cycles(k)", "cycles(%)", "ins(k)", "ins(%)"}; const std::vector per_op_type = {"opType", "cycles(k)", "cycles(%)", "ins(k)", "ins(%)"}; - PrintPerfResult(per_op_name, op_perf_by_name_); - PrintPerfResult(per_op_type, op_perf_by_type_); + (void)PrintPerfResult(per_op_name, op_perf_by_name_); + (void)PrintPerfResult(per_op_type, op_perf_by_type_); } #endif } diff --git a/mindspore/lite/tools/converter/import/mindir_control_flow_adjust.cc b/mindspore/lite/tools/converter/import/mindir_control_flow_adjust.cc index abcddf75b05..361a8e2dbd5 100644 --- a/mindspore/lite/tools/converter/import/mindir_control_flow_adjust.cc +++ b/mindspore/lite/tools/converter/import/mindir_control_flow_adjust.cc @@ -201,7 +201,8 @@ int MindIRControlFlowAdjust::MoveCallInputsToPartialFusionInputs(const std::set< MS_ASSERT(call_first_input_cnode != nullptr); if (IsPartialFusion(call_first_input_cnode)) { auto partial_cnode_inputs = call_first_input_cnode->inputs(); - std::copy(call_cnode_inputs.begin() + 1, call_cnode_inputs.end(), std::back_inserter(partial_cnode_inputs)); + (void)std::copy(call_cnode_inputs.begin() + 1, call_cnode_inputs.end(), + std::back_inserter(partial_cnode_inputs)); call_first_input_cnode->set_inputs(partial_cnode_inputs); } @@ -219,14 +220,14 @@ int MindIRControlFlowAdjust::MoveCallInputsToPartialFusionInputs(const std::set< auto true_partial_cnode = switch_cnode_inputs.at(kSwitchTruePartialIndex)->cast(); auto true_partial_cnode_inputs = true_partial_cnode->inputs(); - std::copy(call_cnode_inputs.begin() + 1, call_cnode_inputs.end(), - std::back_inserter(true_partial_cnode_inputs)); + (void)std::copy(call_cnode_inputs.begin() + 1, call_cnode_inputs.end(), + std::back_inserter(true_partial_cnode_inputs)); true_partial_cnode->set_inputs(true_partial_cnode_inputs); auto false_partial_cnode = switch_cnode_inputs.at(kSwitchFalsePartialIndex)->cast(); auto false_partial_cnode_inputs = false_partial_cnode->inputs(); - std::copy(call_cnode_inputs.begin() + 1, call_cnode_inputs.end(), - std::back_inserter(false_partial_cnode_inputs)); + (void)std::copy(call_cnode_inputs.begin() + 1, call_cnode_inputs.end(), + std::back_inserter(false_partial_cnode_inputs)); false_partial_cnode->set_inputs(false_partial_cnode_inputs); } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc index 317b1b334cd..969b06f1cdf 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc @@ -516,7 +516,7 @@ int InferShapePass::InferSwitchOrSwitchLayerNode(const bool &is_tail_call, const for (auto &partial_node : all_partial_nodes) { if (partial_cnode_inferred_.find(partial_node) == partial_cnode_inferred_.end()) { to_process.push_back(partial_node); - partial_cnode_inferred_.insert(partial_node); + (void)partial_cnode_inferred_.insert(partial_node); } } diff --git a/mindspore/lite/tools/mindir_exporter/mindir_serializer.cc b/mindspore/lite/tools/mindir_exporter/mindir_serializer.cc index 31bcdd93b7c..3e7f79f8890 100644 --- a/mindspore/lite/tools/mindir_exporter/mindir_serializer.cc +++ b/mindspore/lite/tools/mindir_exporter/mindir_serializer.cc @@ -40,7 +40,7 @@ bool DeleteDirRecursively(const std::string &dir_name) { std::vector file_names{}; while ((dirent = readdir(dir)) != 0) { if (strcmp(dirent->d_name, ".") != 0 && strcmp(dirent->d_name, "..") != 0) { - file_names.push_back(dirent->d_name); + file_names.emplace_back(dirent->d_name); } } for (auto &file_name : file_names) { @@ -213,7 +213,10 @@ std::shared_ptr MindIRSerializer::GetFgParaAccordingToProtoName(const int MindIRSerializer::ChangeParaDataFile(const std::string &file) { auto real_path = CreateExternalPath(file); if (fs_->FileExist(real_path)) { - fs_->DeleteFile(real_path); + if (!fs_->DeleteFile(real_path)) { + MS_LOG(ERROR) << "delete file failed."; + return RET_ERROR; + } } ChangeFileMode(real_path, S_IWUSR); data_fs_ = OpenFile(real_path, std::ios::app); @@ -223,18 +226,17 @@ int MindIRSerializer::ChangeParaDataFile(const std::string &file) { } char front_info[OFFSET]{0}; front_info[0] = IsSystemLittleEndidan(); - data_fs_->write(front_info, OFFSET); + (void)data_fs_->write(front_info, OFFSET); return RET_OK; } -bool MindIRSerializer::IsSystemLittleEndidan() { +bool MindIRSerializer::IsSystemLittleEndidan() const { int check = 0x01; auto address = reinterpret_cast(&check); return *address == 0x01; } -int MindIRSerializer::GetDataFile(const std::string &data_file_name, std::ofstream *fout, int64_t *parameter_size, - int64_t *offset) { +int MindIRSerializer::GetDataFile(const std::string &data_file_name, std::ofstream *fout, int64_t *, int64_t *offset) { if (offset == nullptr) { MS_LOG(ERROR) << "offset is nullptr."; return RET_NULL_PTR; @@ -251,7 +253,7 @@ int MindIRSerializer::GetDataFile(const std::string &data_file_name, std::ofstre std::byte place_holder[OFFSET]; fout = new std::ofstream; - fout->write(reinterpret_cast(place_holder), *offset); + (void)fout->write(reinterpret_cast(place_holder), *offset); return RET_OK; } @@ -287,7 +289,7 @@ int MindIRSerializer::SplitSave() { data_fs_ = OpenFile(external_local_path, std::ios::out | std::ios::binary | std::ios::trunc); if (data_fs_ == nullptr) { MS_LOG(ERROR) << "Open " << external_local_path << " failed"; - return false; + return RET_ERROR; } ret = ChangeParaDataFile(external_local); if (ret != RET_OK) { diff --git a/mindspore/lite/tools/mindir_exporter/mindir_serializer.h b/mindspore/lite/tools/mindir_exporter/mindir_serializer.h index 1167bb80113..c0bce6d7506 100644 --- a/mindspore/lite/tools/mindir_exporter/mindir_serializer.h +++ b/mindspore/lite/tools/mindir_exporter/mindir_serializer.h @@ -54,7 +54,7 @@ class MindIRSerializer { int CreateParameterDir(); std::shared_ptr GetFgParaAccordingToProtoName(const std::string &proto_name); int ChangeParaDataFile(const std::string &file); - bool IsSystemLittleEndidan(); + bool IsSystemLittleEndidan() const; int GetDataFile(const std::string &data_file_name, std::ofstream *fout, int64_t *parameter_size, int64_t *offset); std::string CreateExternalPath(const std::string &external_file); int RemoveQuantParameterHolder(FuncGraphPtr func_graph);