From af0d31527930d0c706d986c348e6d3fb958a3669 Mon Sep 17 00:00:00 2001 From: baihuawei Date: Wed, 1 Sep 2021 20:24:33 +0800 Subject: [PATCH] fix pclint and error log --- .../kernel_compiler/aicpu/aicpu_kernel_build.cc | 16 +++++++--------- .../ccsrc/backend/kernel_compiler/rts/assign.cc | 2 +- .../backend/kernel_compiler/rts/memcpy_async.cc | 13 +++++++------ .../backend/kernel_compiler/rts/stream_active.cc | 2 +- .../kernel_compiler/rts/tensor_copy_slices.cc | 5 +++-- .../optimizer/cpu/insert_format_transform_op.cc | 2 +- .../ccsrc/backend/session/ascend_session.cc | 9 ++++----- mindspore/ccsrc/backend/session/cpu_session.cc | 5 +++-- mindspore/ccsrc/pipeline/jit/pipeline.cc | 2 +- .../ascend/distribute/ascend_collective.cc | 3 ++- .../device/ascend/kernel_select_ascend.cc | 2 +- .../runtime/device/cpu/cpu_device_address.cc | 4 ++-- .../runtime/device/cpu/cpu_kernel_runtime.cc | 14 ++++++++++---- .../runtime/device/cpu/cpu_memory_manager.cc | 10 ++-------- .../runtime/device/cpu/cpu_memory_manager.h | 2 +- mindspore/parallel/_utils.py | 2 +- 16 files changed, 47 insertions(+), 46 deletions(-) diff --git a/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_build.cc b/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_build.cc index 4aa38d2ff76..9152e412130 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_build.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_build.cc @@ -282,11 +282,11 @@ bool CreateNodeDefBytes(const std::shared_ptr &anf_node, uint64_t SetExtInfoShapeType(char *ext_info_buf, uint64_t ext_info_offset, UnknowShapeOpType type) { // deal1: unknown shape type auto *info = reinterpret_cast(ext_info_buf + ext_info_offset); - info->infoType = FWK_ADPT_EXT_SHAPE_TYPE; + info->infoType = static_cast(FWK_ADPT_EXT_SHAPE_TYPE); info->infoLen = sizeof(int32_t); ext_info_offset += kExtInfoHeadSize; auto *shape_type = reinterpret_cast(ext_info_buf + ext_info_offset); - *shape_type = type; + *shape_type = static_cast(type); ext_info_offset += info->infoLen; return ext_info_offset; } @@ -295,8 +295,8 @@ uint64_t SetExtInfoInputShapeType(char *ext_info_buf, uint64_t ext_info_offset, const std::shared_ptr &anf_node, size_t input_num) { // deal2:input ShapeAndType auto *info = reinterpret_cast(ext_info_buf + ext_info_offset); - info->infoType = FWK_ADPT_EXT_INPUT_SHAPE; - info->infoLen = input_num * sizeof(ShapeAndType); + info->infoType = static_cast(FWK_ADPT_EXT_INPUT_SHAPE); + info->infoLen = SizeToInt(input_num * sizeof(ShapeAndType)); ext_info_offset += kExtInfoHeadSize; auto *inputs = reinterpret_cast(ext_info_buf + ext_info_offset); @@ -335,8 +335,8 @@ uint64_t SetExtInfoOutputShapeType(char *ext_info_buf, uint64_t ext_info_offset, const std::shared_ptr &anf_node, size_t output_num) { // deal3:output ShapeAndType auto *info = reinterpret_cast(ext_info_buf + ext_info_offset); - info->infoType = FWK_ADPT_EXT_OUTPUT_SHAPE; - info->infoLen = output_num * sizeof(ShapeAndType); + info->infoType = static_cast(FWK_ADPT_EXT_OUTPUT_SHAPE); + info->infoLen = SizeToInt(output_num * sizeof(ShapeAndType)); ext_info_offset += kExtInfoHeadSize; auto *outputs = reinterpret_cast(ext_info_buf + ext_info_offset); @@ -418,9 +418,7 @@ KernelModPtr AicpuOpBuild(const std::shared_ptr &anf_node) { MS_LOG(EXCEPTION) << "Create nodeDefBytes failed!"; } - if (!CreateExtInfo(anf_node, kernel_mod_ptr)) { - MS_LOG(EXCEPTION) << "Create nodeDefBytes failed!"; - } + CreateExtInfo(anf_node, kernel_mod_ptr); if (!SetIOSize(anf_node, kernel_mod_ptr)) { MS_LOG(EXCEPTION) << "Set input output size list failed."; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc index 1a23e9cd033..47869d3a3cc 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc @@ -50,7 +50,7 @@ bool AssignKernel::Launch(const std::vector &inputs, const std::vect std::vector AssignKernel::GenTask(const std::vector &inputs, const std::vector &, const std::vector &, uint32_t stream_id) { if (inputs.size() != 2) { - MS_LOG(EXCEPTION) << "inputs size is not two"; + MS_LOG(EXCEPTION) << "inputs size is not two, got " << inputs.size(); } stream_id_ = stream_id; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/memcpy_async.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/memcpy_async.cc index 5e7da21df62..a9991d26c0a 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/memcpy_async.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/memcpy_async.cc @@ -52,7 +52,7 @@ bool MemCpyAsyncKernel::Launch(const std::vector &inputs, const std: return true; } if (outputs[0]->size < inputs[0]->size) { - MS_LOG(EXCEPTION) << "rtMemcpyAsync destMax < src size"; + MS_LOG(EXCEPTION) << "rtMemcpyAsync destMax " << outputs[0]->size << " is less than src size " << inputs[0]->size; } // input x -> memcpy_async -> AllReduce if (outputs[0]->size > inputs[0]->size) { @@ -78,7 +78,7 @@ void MemCpyAsyncKernel::GetInputOutputDataType(const AnfNodePtr &anf_node) { MS_EXCEPTION_IF_NULL(anf_node); size_t input_size = AnfAlgo::GetInputTensorNum(anf_node); if (input_size != 1) { - MS_LOG(EXCEPTION) << "MemCpyAsync input size is not 1"; + MS_LOG(EXCEPTION) << "MemCpyAsync input size is not 1, got " << input_size; } input_type_id_ = AnfAlgo::GetPrevNodeOutputDeviceDataType(anf_node, 0); } @@ -87,7 +87,7 @@ void MemCpyAsyncKernel::GetInputOutputTotalCount(const AnfNodePtr &anf_node) { MS_EXCEPTION_IF_NULL(anf_node); size_t input_size = AnfAlgo::GetInputTensorNum(anf_node); if (input_size != 1) { - MS_LOG(EXCEPTION) << "MemCpyAsync input size is not 1"; + MS_LOG(EXCEPTION) << "MemCpyAsync input size is not 1, got " << input_size; } size_t type_size = abstract::TypeIdSize(input_type_id_); std::vector shape_i = AnfAlgo::GetInputDeviceShape(anf_node, 0); @@ -134,15 +134,16 @@ device::DynamicKernelPtr MemCpyAsyncKernel::GenDynamicKernel(const CNodePtr &cno device::KernelRuntime::GenLaunchArgs(*this, cnode_ptr, &kernel_inputs, &kernel_workspaces, &kernel_outputs); if (kernel_inputs.size() != 1) { - MS_LOG(EXCEPTION) << "MemCpyAsync op inputs is not one"; + MS_LOG(EXCEPTION) << "MemCpyAsync op inputs is not one, got " << kernel_inputs.size(); } if (kernel_outputs.size() != 1) { - MS_LOG(EXCEPTION) << "MemCpyAsync op output is not one"; + MS_LOG(EXCEPTION) << "MemCpyAsync op output is not one, got " << kernel_outputs.size(); } if (kernel_outputs[0]->size < kernel_inputs[0]->size) { - MS_LOG(EXCEPTION) << "Check rtMemcpyAsync destMax < src size"; + MS_LOG(EXCEPTION) << "rtMemcpyAsync destMax " << kernel_outputs[0]->size << " is less than src size " + << kernel_inputs[0]->size; } // input x -> memcpy_async -> AllReduce if (kernel_outputs[0]->size > kernel_inputs[0]->size) { diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/stream_active.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/stream_active.cc index a14707d13c0..70c7c620e08 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/stream_active.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/stream_active.cc @@ -35,7 +35,7 @@ bool StreamActiveKernel::Init(const AnfNodePtr &anf_node) { auto primitive = AnfAlgo::GetCNodePrimitive(anf_node); MS_EXCEPTION_IF_NULL(primitive); if (!AnfAlgo::HasNodeAttr(kAttrActiveStreamList, anf_node->cast())) { - MS_LOG(EXCEPTION) << "StreamActiveKernel has no attr kAttrActiveStreamList"; + MS_LOG(EXCEPTION) << "StreamActiveKernel " << anf_node->DebugString() << "has no attr kAttrActiveStreamList"; } active_streams_index_ = GetValue>(primitive->GetAttr(kAttrActiveStreamList)); return true; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/tensor_copy_slices.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/tensor_copy_slices.cc index 4bf9eee3343..c289ab96487 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/tensor_copy_slices.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/tensor_copy_slices.cc @@ -85,7 +85,7 @@ void TensorCopySlices::GetInputOutputInfo(const AnfNodePtr &anf_node) { MS_EXCEPTION_IF_NULL(anf_node); size_t input_size = AnfAlgo::GetInputTensorNum(anf_node); if (input_size != 2) { - MS_LOG(EXCEPTION) << "TensorCopySlices input size is not 2"; + MS_LOG(EXCEPTION) << "TensorCopySlices input size is not 2, got " << input_size; } input_type_id_ = AnfAlgo::GetPrevNodeOutputDeviceDataType(anf_node, 0); update_type_id_ = AnfAlgo::GetPrevNodeOutputDeviceDataType(anf_node, 0); @@ -138,7 +138,8 @@ std::vector TensorCopySlices::GenTask(const std::vector MS_LOG(EXCEPTION) << "outputs size is not 1."; } if (outputs[0]->size != inputs[0]->size) { - MS_LOG(EXCEPTION) << "TensorCopySlices input size and output size not equal."; + MS_LOG(EXCEPTION) << "TensorCopySlices input size " << inputs[0]->size << " is not equal to output size " + << outputs[0]->size; } stream_id_ = stream_id; diff --git a/mindspore/ccsrc/backend/optimizer/cpu/insert_format_transform_op.cc b/mindspore/ccsrc/backend/optimizer/cpu/insert_format_transform_op.cc index bd7d15792a1..2ce0ef0efc1 100644 --- a/mindspore/ccsrc/backend/optimizer/cpu/insert_format_transform_op.cc +++ b/mindspore/ccsrc/backend/optimizer/cpu/insert_format_transform_op.cc @@ -95,7 +95,7 @@ void ProcessForTupleItem(const FuncGraphPtr &graph, const AnfNodePtr &node, int auto used_node = used_node_list->at(i).first; auto used_node_index = used_node_list->at(i).second - 1; if (AnfAlgo::GetCNodeName(used_node) == prim::kPrimTupleGetItem->name()) { - MS_LOG(EXCEPTION) << "The used node of tuple item can't be tuple item."; + MS_LOG(EXCEPTION) << "The used node of tuple item " << used_node->DebugString() << " can't be tuple item."; } // node->used_node, if output format of node equals input format of used_node, diff --git a/mindspore/ccsrc/backend/session/ascend_session.cc b/mindspore/ccsrc/backend/session/ascend_session.cc index 13c69aef304..84aa125c455 100644 --- a/mindspore/ccsrc/backend/session/ascend_session.cc +++ b/mindspore/ccsrc/backend/session/ascend_session.cc @@ -225,14 +225,13 @@ size_t LoadCtrlInputTensor(const std::shared_ptr &graph, std::vecto *next_val = 0; next_loop_tensor->set_sync_status(kNeedSyncHostToDevice); // set loop_count to zero - MS_EXCEPTION_IF_NULL(inputs); inputs->push_back(next_loop_tensor); auto epoch_tensor = (*inputs_params)[kLoopSinkEpochIndex]; MS_EXCEPTION_IF_NULL(epoch_tensor); auto *epoch_val = static_cast(epoch_tensor->data_c()); MS_EXCEPTION_IF_NULL(epoch_val); - *epoch_val = graph->current_epoch(); + *epoch_val = SizeToInt(graph->current_epoch()); epoch_tensor->set_sync_status(kNeedSyncHostToDevice); inputs->push_back(epoch_tensor); MS_LOG(DEBUG) << "Load epoch_val:" << *epoch_val; @@ -609,7 +608,7 @@ void AscendSession::PreExecuteGraph(const std::shared_ptr &kernel_g } void AscendSession::PostExecuteGraph(const std::shared_ptr &kernel_graph, - const std::vector &inputs, VectorRef *const) { + const std::vector &, VectorRef *const) { // summary Summary(kernel_graph.get()); // load tensor from device for debugger @@ -1503,7 +1502,7 @@ void AscendSession::SyncInitialTenosrToDevice() { auto backend_parameter = graph_inputs[input_idx]; // sync data from host to device MS_EXCEPTION_IF_NULL(front_tensor); - size_t tensor_size = front_tensor->data().nbytes(); + size_t tensor_size = LongToSize(front_tensor->data().nbytes()); auto addr = AnfAlgo::GetOutputAddr(backend_parameter, 0); MS_EXCEPTION_IF_NULL(addr); if (!addr->SyncHostToDevice(trans::GetRuntimePaddingShape(backend_parameter, 0), tensor_size, @@ -1546,7 +1545,7 @@ void AscendSession::SelectKernel(NotNull root_graph) { size_t reduce_precision_count = 0; std::set memo; - (void)RecurseSelectKernelInfo(root_graph, NOT_NULL(&memo), &raise_precision_count, &reduce_precision_count); + RecurseSelectKernelInfo(root_graph, NOT_NULL(&memo), &raise_precision_count, &reduce_precision_count); memo.clear(); auto ms_context = MsContext::GetInstance(); diff --git a/mindspore/ccsrc/backend/session/cpu_session.cc b/mindspore/ccsrc/backend/session/cpu_session.cc index aac2ca36fe9..d4089e77d4b 100644 --- a/mindspore/ccsrc/backend/session/cpu_session.cc +++ b/mindspore/ccsrc/backend/session/cpu_session.cc @@ -163,7 +163,8 @@ void CPUSession::LoadInputData(const std::shared_ptr &kernel_graph, MS_EXCEPTION_IF_NULL(kernel_graph); auto &input_nodes = kernel_graph->inputs(); if (input_nodes.size() != inputs_const.size()) { - MS_LOG(EXCEPTION) << "Input size not equal to input node size!"; + MS_LOG(EXCEPTION) << "Input size " << inputs_const.size() << " is not equal to input node size " + << input_nodes.size(); } for (size_t input_idx = 0; input_idx < input_nodes.size(); ++input_idx) { auto &input_node = input_nodes[input_idx]; @@ -201,7 +202,7 @@ void CPUSession::PreExecuteGraph(const std::shared_ptr &kernel_grap } void CPUSession::PostExecuteGraph(const std::shared_ptr &kernel_graph, - const std::vector &inputs, VectorRef *const outputs) { + const std::vector &, VectorRef *const) { Summary(kernel_graph.get()); } diff --git a/mindspore/ccsrc/pipeline/jit/pipeline.cc b/mindspore/ccsrc/pipeline/jit/pipeline.cc index 3db37050b78..9e1f44436ed 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline.cc +++ b/mindspore/ccsrc/pipeline/jit/pipeline.cc @@ -1262,7 +1262,7 @@ void InitHccl() { if (!task_sink && mode == kGraphMode) { MS_LOG(INFO) << "mpi collective init."; if (!HcclCollectiveGroup::instance().InitCollective()) { - MS_LOG(EXCEPTION) << "HcclCollectiveGroup init failed."; + MS_LOG(EXCEPTION) << "Mpi init failed, please check if mpirun is used correctly."; } device_id = IntToUint(HcclCollectiveGroup::instance().GetDeviceId()); ms_context->set_param(MS_CTX_DEVICE_ID, device_id); diff --git a/mindspore/ccsrc/runtime/device/ascend/distribute/ascend_collective.cc b/mindspore/ccsrc/runtime/device/ascend/distribute/ascend_collective.cc index fa28e1a2e17..6da127a7dc3 100644 --- a/mindspore/ccsrc/runtime/device/ascend/distribute/ascend_collective.cc +++ b/mindspore/ccsrc/runtime/device/ascend/distribute/ascend_collective.cc @@ -48,7 +48,8 @@ bool HcclCollectiveGroup::InitCollective() { MS_LOG(EXCEPTION) << "Loading libascend_collective.so failed. Many reasons could cause this:\n1.libascend_collective.so is not " "installed.\n2.hccl is not " - "installed or found.\n3.mpi is not installed or found"; + "installed or found.\n3.mpi is not installed or found, please check if lib files of OpenMPI is added to " + "LD_LIBRATY_PATH."; } init_mpi_ = DlsymFuncObj(InitMPI, collective_handle_); finalize_mpi_ = DlsymFuncObj(FinalizeMPI, collective_handle_); diff --git a/mindspore/ccsrc/runtime/device/ascend/kernel_select_ascend.cc b/mindspore/ccsrc/runtime/device/ascend/kernel_select_ascend.cc index 56040efb0c6..b92da2c3fea 100644 --- a/mindspore/ccsrc/runtime/device/ascend/kernel_select_ascend.cc +++ b/mindspore/ccsrc/runtime/device/ascend/kernel_select_ascend.cc @@ -214,7 +214,7 @@ std::shared_ptr ChooseMatchedKernelInfo( UpdateCurMatchCounts(*kernel_info_ptr, kernel_node, &cur_kernel_info_match_counts); // Currently the selection policy is the match format count first, and then is datatype counts. if (PriorityChooseItem(cur_kernel_info_match_counts, &most_match_counts)) { - selected_index = SizeToInt(info_index); + selected_index = info_index; } } return kernel_info_list[selected_index]; diff --git a/mindspore/ccsrc/runtime/device/cpu/cpu_device_address.cc b/mindspore/ccsrc/runtime/device/cpu/cpu_device_address.cc index 3807f7d0b10..342879dc916 100644 --- a/mindspore/ccsrc/runtime/device/cpu/cpu_device_address.cc +++ b/mindspore/ccsrc/runtime/device/cpu/cpu_device_address.cc @@ -96,8 +96,8 @@ bool CPUDeviceAddress::SyncDeviceToHost(const ShapeVector &, size_t size, TypeId return true; } -bool CPUDeviceAddress::SyncHostToDevice(const ShapeVector & /* shape */, size_t size, TypeId type, const void *host_ptr, - const std::string &format) const { +bool CPUDeviceAddress::SyncHostToDevice(const ShapeVector &, size_t size, TypeId type, const void *host_ptr, + const std::string &) const { // The input or output may be empty. if ((size == 0) || (size_ == 0)) { MS_LOG(INFO) << "No need sync, host size: " << size << ", device size: " << size_; diff --git a/mindspore/ccsrc/runtime/device/cpu/cpu_kernel_runtime.cc b/mindspore/ccsrc/runtime/device/cpu/cpu_kernel_runtime.cc index 3a00045ca14..561986b82b6 100644 --- a/mindspore/ccsrc/runtime/device/cpu/cpu_kernel_runtime.cc +++ b/mindspore/ccsrc/runtime/device/cpu/cpu_kernel_runtime.cc @@ -189,7 +189,8 @@ tensor::TensorPtr CPUKernelRuntime::CreatTensorForOutput( MS_EXCEPTION_IF_NULL(tensor_to_node); size_t output_size = AnfAlgo::GetOutputTensorNum(node); if (index >= output_size) { - MS_LOG(EXCEPTION) << "Invalid input index " << index; + MS_LOG(EXCEPTION) << "For node " << node->DebugString() << ", index " << index << " exceed output size " + << output_size; } auto address = AnfAlgo::GetMutableOutputAddr(node, index); MS_EXCEPTION_IF_NULL(address); @@ -204,6 +205,9 @@ tensor::TensorPtr CPUKernelRuntime::CreatTensorForOutput( tensor = kernel_graph->GetInternalOutputTensor(node, index); if (tensor == nullptr) { size_t type_size = GetTypeByte(TypeIdToType(device_type_id)); + if (type_size == 0) { + MS_LOG(EXCEPTION) << "Invalid type_size " << type_size; + } size_t tensor_size = std::accumulate(temp_shape.begin(), temp_shape.end(), type_size, std::multiplies()); if (tensor_size < address->size_) { temp_shape.clear(); @@ -276,7 +280,7 @@ void CPUKernelRuntime::CreateOutputTensors(session::KernelGraph *kernel_graph, MS_EXCEPTION_IF_NULL(tensor_to_node); auto &input_nodes = kernel_graph->inputs(); if (input_nodes.size() != inputs.size()) { - MS_LOG(EXCEPTION) << "Input size not equal to input node size!"; + MS_LOG(EXCEPTION) << "Input size " << inputs.size() << " is not equal to input node size " << input_nodes.size(); } size_t input_idx = 0; @@ -300,7 +304,7 @@ void CPUKernelRuntime::BindInputTensorAddressPtr(const session::KernelGraph &ker const std::vector &inputs) { auto &input_nodes = kernel_graph.inputs(); if (input_nodes.size() != inputs.size()) { - MS_LOG(EXCEPTION) << "Input size not equal to input node size!"; + MS_LOG(EXCEPTION) << "Input size" << inputs.size() << " is not equal to input node size " << input_nodes.size(); } for (size_t input_idx = 0; input_idx < input_nodes.size(); ++input_idx) { auto &item = input_nodes[input_idx]; @@ -344,7 +348,9 @@ void CPUKernelRuntime::BindInputTensorAddressPtr(const session::KernelGraph &ker AnfAlgo::SetOutputInferTypeAndShape({AnfAlgo::GetOutputInferDataType(item, 0)}, {shape_tmp}, item.get()); } address->ref_count_ = INIT_NODE_REF; - tensor->set_device_address(address); + if (AnfAlgo::IsParameterWeight(input_param)) { + tensor->set_device_address(address); + } } } diff --git a/mindspore/ccsrc/runtime/device/cpu/cpu_memory_manager.cc b/mindspore/ccsrc/runtime/device/cpu/cpu_memory_manager.cc index 460a9f77e3e..d31be809160 100644 --- a/mindspore/ccsrc/runtime/device/cpu/cpu_memory_manager.cc +++ b/mindspore/ccsrc/runtime/device/cpu/cpu_memory_manager.cc @@ -72,15 +72,9 @@ void CPUMemoryManager::ResetDynamicMemory() { dynamic_mem_.clear(); } -CPUMemoryManager::~CPUMemoryManager() { - try { - MemFree(); - } catch (std::exception &e) { - MS_LOG(EXCEPTION) << "MemFree exception in ~CPUMemoryManager(), " << e.what(); - } -} +CPUMemoryManager::~CPUMemoryManager() { MemFree(); } -void CPUMemoryManager::MemFree() { +void CPUMemoryManager::MemFree() noexcept { if (mem_ptr_ != nullptr) { mem_ptr_ = nullptr; mem_size_ = 0; diff --git a/mindspore/ccsrc/runtime/device/cpu/cpu_memory_manager.h b/mindspore/ccsrc/runtime/device/cpu/cpu_memory_manager.h index 483126bf05c..e9959e9fd34 100644 --- a/mindspore/ccsrc/runtime/device/cpu/cpu_memory_manager.h +++ b/mindspore/ccsrc/runtime/device/cpu/cpu_memory_manager.h @@ -58,7 +58,7 @@ class CPUMemoryManager : public MemoryManager { private: uint8_t *MemMalloc(size_t size); - void MemFree(); + void MemFree() noexcept; CPUSimpleMemPlan mem_plan_; size_t mem_size_{0}; diff --git a/mindspore/parallel/_utils.py b/mindspore/parallel/_utils.py index 9fb7397002f..760b9d53eec 100644 --- a/mindspore/parallel/_utils.py +++ b/mindspore/parallel/_utils.py @@ -50,7 +50,7 @@ def _check_task_sink_envs(): return True if task_sink environment variables have been exported, False otherwise. """ import os - task_sink = os.getenv("SINGLE_OP_MODE") + task_sink = os.getenv("GRAPH_OP_RUN") if task_sink: try: if int(task_sink) == 1: