forked from mindspore-Ecosystem/mindspore
!22665 fix pclint and error log
Merge pull request !22665 from baihuawei/fixcodedex
This commit is contained in:
commit
1da5f2faa0
|
@ -282,11 +282,11 @@ bool CreateNodeDefBytes(const std::shared_ptr<AnfNode> &anf_node,
|
|||
uint64_t SetExtInfoShapeType(char *ext_info_buf, uint64_t ext_info_offset, UnknowShapeOpType type) {
|
||||
// deal1: unknown shape type
|
||||
auto *info = reinterpret_cast<ExtInfo *>(ext_info_buf + ext_info_offset);
|
||||
info->infoType = FWK_ADPT_EXT_SHAPE_TYPE;
|
||||
info->infoType = static_cast<int32_t>(FWK_ADPT_EXT_SHAPE_TYPE);
|
||||
info->infoLen = sizeof(int32_t);
|
||||
ext_info_offset += kExtInfoHeadSize;
|
||||
auto *shape_type = reinterpret_cast<int32_t *>(ext_info_buf + ext_info_offset);
|
||||
*shape_type = type;
|
||||
*shape_type = static_cast<int32_t>(type);
|
||||
ext_info_offset += info->infoLen;
|
||||
return ext_info_offset;
|
||||
}
|
||||
|
@ -295,8 +295,8 @@ uint64_t SetExtInfoInputShapeType(char *ext_info_buf, uint64_t ext_info_offset,
|
|||
const std::shared_ptr<AnfNode> &anf_node, size_t input_num) {
|
||||
// deal2:input ShapeAndType
|
||||
auto *info = reinterpret_cast<ExtInfo *>(ext_info_buf + ext_info_offset);
|
||||
info->infoType = FWK_ADPT_EXT_INPUT_SHAPE;
|
||||
info->infoLen = input_num * sizeof(ShapeAndType);
|
||||
info->infoType = static_cast<int32_t>(FWK_ADPT_EXT_INPUT_SHAPE);
|
||||
info->infoLen = SizeToInt(input_num * sizeof(ShapeAndType));
|
||||
ext_info_offset += kExtInfoHeadSize;
|
||||
|
||||
auto *inputs = reinterpret_cast<ShapeAndType *>(ext_info_buf + ext_info_offset);
|
||||
|
@ -335,8 +335,8 @@ uint64_t SetExtInfoOutputShapeType(char *ext_info_buf, uint64_t ext_info_offset,
|
|||
const std::shared_ptr<AnfNode> &anf_node, size_t output_num) {
|
||||
// deal3:output ShapeAndType
|
||||
auto *info = reinterpret_cast<ExtInfo *>(ext_info_buf + ext_info_offset);
|
||||
info->infoType = FWK_ADPT_EXT_OUTPUT_SHAPE;
|
||||
info->infoLen = output_num * sizeof(ShapeAndType);
|
||||
info->infoType = static_cast<int32_t>(FWK_ADPT_EXT_OUTPUT_SHAPE);
|
||||
info->infoLen = SizeToInt(output_num * sizeof(ShapeAndType));
|
||||
ext_info_offset += kExtInfoHeadSize;
|
||||
|
||||
auto *outputs = reinterpret_cast<ShapeAndType *>(ext_info_buf + ext_info_offset);
|
||||
|
@ -418,9 +418,7 @@ KernelModPtr AicpuOpBuild(const std::shared_ptr<AnfNode> &anf_node) {
|
|||
MS_LOG(EXCEPTION) << "Create nodeDefBytes failed!";
|
||||
}
|
||||
|
||||
if (!CreateExtInfo(anf_node, kernel_mod_ptr)) {
|
||||
MS_LOG(EXCEPTION) << "Create nodeDefBytes failed!";
|
||||
}
|
||||
CreateExtInfo(anf_node, kernel_mod_ptr);
|
||||
|
||||
if (!SetIOSize(anf_node, kernel_mod_ptr)) {
|
||||
MS_LOG(EXCEPTION) << "Set input output size list failed.";
|
||||
|
|
|
@ -50,7 +50,7 @@ bool AssignKernel::Launch(const std::vector<AddressPtr> &inputs, const std::vect
|
|||
std::vector<TaskInfoPtr> AssignKernel::GenTask(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
|
||||
const std::vector<AddressPtr> &, uint32_t stream_id) {
|
||||
if (inputs.size() != 2) {
|
||||
MS_LOG(EXCEPTION) << "inputs size is not two";
|
||||
MS_LOG(EXCEPTION) << "inputs size is not two, got " << inputs.size();
|
||||
}
|
||||
stream_id_ = stream_id;
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ bool MemCpyAsyncKernel::Launch(const std::vector<AddressPtr> &inputs, const std:
|
|||
return true;
|
||||
}
|
||||
if (outputs[0]->size < inputs[0]->size) {
|
||||
MS_LOG(EXCEPTION) << "rtMemcpyAsync destMax < src size";
|
||||
MS_LOG(EXCEPTION) << "rtMemcpyAsync destMax " << outputs[0]->size << " is less than src size " << inputs[0]->size;
|
||||
}
|
||||
// input x -> memcpy_async -> AllReduce
|
||||
if (outputs[0]->size > inputs[0]->size) {
|
||||
|
@ -78,7 +78,7 @@ void MemCpyAsyncKernel::GetInputOutputDataType(const AnfNodePtr &anf_node) {
|
|||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
size_t input_size = AnfAlgo::GetInputTensorNum(anf_node);
|
||||
if (input_size != 1) {
|
||||
MS_LOG(EXCEPTION) << "MemCpyAsync input size is not 1";
|
||||
MS_LOG(EXCEPTION) << "MemCpyAsync input size is not 1, got " << input_size;
|
||||
}
|
||||
input_type_id_ = AnfAlgo::GetPrevNodeOutputDeviceDataType(anf_node, 0);
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ void MemCpyAsyncKernel::GetInputOutputTotalCount(const AnfNodePtr &anf_node) {
|
|||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
size_t input_size = AnfAlgo::GetInputTensorNum(anf_node);
|
||||
if (input_size != 1) {
|
||||
MS_LOG(EXCEPTION) << "MemCpyAsync input size is not 1";
|
||||
MS_LOG(EXCEPTION) << "MemCpyAsync input size is not 1, got " << input_size;
|
||||
}
|
||||
size_t type_size = abstract::TypeIdSize(input_type_id_);
|
||||
std::vector<size_t> shape_i = AnfAlgo::GetInputDeviceShape(anf_node, 0);
|
||||
|
@ -134,15 +134,16 @@ device::DynamicKernelPtr MemCpyAsyncKernel::GenDynamicKernel(const CNodePtr &cno
|
|||
device::KernelRuntime::GenLaunchArgs(*this, cnode_ptr, &kernel_inputs, &kernel_workspaces, &kernel_outputs);
|
||||
|
||||
if (kernel_inputs.size() != 1) {
|
||||
MS_LOG(EXCEPTION) << "MemCpyAsync op inputs is not one";
|
||||
MS_LOG(EXCEPTION) << "MemCpyAsync op inputs is not one, got " << kernel_inputs.size();
|
||||
}
|
||||
|
||||
if (kernel_outputs.size() != 1) {
|
||||
MS_LOG(EXCEPTION) << "MemCpyAsync op output is not one";
|
||||
MS_LOG(EXCEPTION) << "MemCpyAsync op output is not one, got " << kernel_outputs.size();
|
||||
}
|
||||
|
||||
if (kernel_outputs[0]->size < kernel_inputs[0]->size) {
|
||||
MS_LOG(EXCEPTION) << "Check rtMemcpyAsync destMax < src size";
|
||||
MS_LOG(EXCEPTION) << "rtMemcpyAsync destMax " << kernel_outputs[0]->size << " is less than src size "
|
||||
<< kernel_inputs[0]->size;
|
||||
}
|
||||
// input x -> memcpy_async -> AllReduce
|
||||
if (kernel_outputs[0]->size > kernel_inputs[0]->size) {
|
||||
|
|
|
@ -35,7 +35,7 @@ bool StreamActiveKernel::Init(const AnfNodePtr &anf_node) {
|
|||
auto primitive = AnfAlgo::GetCNodePrimitive(anf_node);
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
if (!AnfAlgo::HasNodeAttr(kAttrActiveStreamList, anf_node->cast<CNodePtr>())) {
|
||||
MS_LOG(EXCEPTION) << "StreamActiveKernel has no attr kAttrActiveStreamList";
|
||||
MS_LOG(EXCEPTION) << "StreamActiveKernel " << anf_node->DebugString() << "has no attr kAttrActiveStreamList";
|
||||
}
|
||||
active_streams_index_ = GetValue<std::vector<uint32_t>>(primitive->GetAttr(kAttrActiveStreamList));
|
||||
return true;
|
||||
|
|
|
@ -85,7 +85,7 @@ void TensorCopySlices::GetInputOutputInfo(const AnfNodePtr &anf_node) {
|
|||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
size_t input_size = AnfAlgo::GetInputTensorNum(anf_node);
|
||||
if (input_size != 2) {
|
||||
MS_LOG(EXCEPTION) << "TensorCopySlices input size is not 2";
|
||||
MS_LOG(EXCEPTION) << "TensorCopySlices input size is not 2, got " << input_size;
|
||||
}
|
||||
input_type_id_ = AnfAlgo::GetPrevNodeOutputDeviceDataType(anf_node, 0);
|
||||
update_type_id_ = AnfAlgo::GetPrevNodeOutputDeviceDataType(anf_node, 0);
|
||||
|
@ -138,7 +138,8 @@ std::vector<TaskInfoPtr> TensorCopySlices::GenTask(const std::vector<AddressPtr>
|
|||
MS_LOG(EXCEPTION) << "outputs size is not 1.";
|
||||
}
|
||||
if (outputs[0]->size != inputs[0]->size) {
|
||||
MS_LOG(EXCEPTION) << "TensorCopySlices input size and output size not equal.";
|
||||
MS_LOG(EXCEPTION) << "TensorCopySlices input size " << inputs[0]->size << " is not equal to output size "
|
||||
<< outputs[0]->size;
|
||||
}
|
||||
|
||||
stream_id_ = stream_id;
|
||||
|
|
|
@ -95,7 +95,7 @@ void ProcessForTupleItem(const FuncGraphPtr &graph, const AnfNodePtr &node, int
|
|||
auto used_node = used_node_list->at(i).first;
|
||||
auto used_node_index = used_node_list->at(i).second - 1;
|
||||
if (AnfAlgo::GetCNodeName(used_node) == prim::kPrimTupleGetItem->name()) {
|
||||
MS_LOG(EXCEPTION) << "The used node of tuple item can't be tuple item.";
|
||||
MS_LOG(EXCEPTION) << "The used node of tuple item " << used_node->DebugString() << " can't be tuple item.";
|
||||
}
|
||||
|
||||
// node->used_node, if output format of node equals input format of used_node,
|
||||
|
|
|
@ -225,14 +225,13 @@ size_t LoadCtrlInputTensor(const std::shared_ptr<KernelGraph> &graph, std::vecto
|
|||
*next_val = 0;
|
||||
next_loop_tensor->set_sync_status(kNeedSyncHostToDevice);
|
||||
// set loop_count to zero
|
||||
MS_EXCEPTION_IF_NULL(inputs);
|
||||
inputs->push_back(next_loop_tensor);
|
||||
|
||||
auto epoch_tensor = (*inputs_params)[kLoopSinkEpochIndex];
|
||||
MS_EXCEPTION_IF_NULL(epoch_tensor);
|
||||
auto *epoch_val = static_cast<int32_t *>(epoch_tensor->data_c());
|
||||
MS_EXCEPTION_IF_NULL(epoch_val);
|
||||
*epoch_val = graph->current_epoch();
|
||||
*epoch_val = SizeToInt(graph->current_epoch());
|
||||
epoch_tensor->set_sync_status(kNeedSyncHostToDevice);
|
||||
inputs->push_back(epoch_tensor);
|
||||
MS_LOG(DEBUG) << "Load epoch_val:" << *epoch_val;
|
||||
|
@ -611,7 +610,7 @@ void AscendSession::PreExecuteGraph(const std::shared_ptr<KernelGraph> &kernel_g
|
|||
}
|
||||
|
||||
void AscendSession::PostExecuteGraph(const std::shared_ptr<KernelGraph> &kernel_graph,
|
||||
const std::vector<tensor::TensorPtr> &inputs, VectorRef *const) {
|
||||
const std::vector<tensor::TensorPtr> &, VectorRef *const) {
|
||||
// summary
|
||||
Summary(kernel_graph.get());
|
||||
// load tensor from device for debugger
|
||||
|
@ -1517,7 +1516,7 @@ void AscendSession::SyncInitialTenosrToDevice() {
|
|||
auto backend_parameter = graph_inputs[input_idx];
|
||||
// sync data from host to device
|
||||
MS_EXCEPTION_IF_NULL(front_tensor);
|
||||
size_t tensor_size = front_tensor->data().nbytes();
|
||||
size_t tensor_size = LongToSize(front_tensor->data().nbytes());
|
||||
auto addr = AnfAlgo::GetOutputAddr(backend_parameter, 0);
|
||||
MS_EXCEPTION_IF_NULL(addr);
|
||||
if (!addr->SyncHostToDevice(trans::GetRuntimePaddingShape(backend_parameter, 0), tensor_size,
|
||||
|
@ -1560,7 +1559,7 @@ void AscendSession::SelectKernel(NotNull<KernelGraphPtr> root_graph) {
|
|||
size_t reduce_precision_count = 0;
|
||||
|
||||
std::set<KernelGraphPtr> memo;
|
||||
(void)RecurseSelectKernelInfo(root_graph, NOT_NULL(&memo), &raise_precision_count, &reduce_precision_count);
|
||||
RecurseSelectKernelInfo(root_graph, NOT_NULL(&memo), &raise_precision_count, &reduce_precision_count);
|
||||
memo.clear();
|
||||
|
||||
auto ms_context = MsContext::GetInstance();
|
||||
|
|
|
@ -163,7 +163,8 @@ void CPUSession::LoadInputData(const std::shared_ptr<KernelGraph> &kernel_graph,
|
|||
MS_EXCEPTION_IF_NULL(kernel_graph);
|
||||
auto &input_nodes = kernel_graph->inputs();
|
||||
if (input_nodes.size() != inputs_const.size()) {
|
||||
MS_LOG(EXCEPTION) << "Input size not equal to input node size!";
|
||||
MS_LOG(EXCEPTION) << "Input size " << inputs_const.size() << " is not equal to input node size "
|
||||
<< input_nodes.size();
|
||||
}
|
||||
for (size_t input_idx = 0; input_idx < input_nodes.size(); ++input_idx) {
|
||||
auto &input_node = input_nodes[input_idx];
|
||||
|
@ -201,7 +202,7 @@ void CPUSession::PreExecuteGraph(const std::shared_ptr<KernelGraph> &kernel_grap
|
|||
}
|
||||
|
||||
void CPUSession::PostExecuteGraph(const std::shared_ptr<KernelGraph> &kernel_graph,
|
||||
const std::vector<tensor::TensorPtr> &inputs, VectorRef *const outputs) {
|
||||
const std::vector<tensor::TensorPtr> &, VectorRef *const) {
|
||||
Summary(kernel_graph.get());
|
||||
}
|
||||
|
||||
|
|
|
@ -1262,7 +1262,7 @@ void InitHccl() {
|
|||
if (!task_sink && mode == kGraphMode) {
|
||||
MS_LOG(INFO) << "mpi collective init.";
|
||||
if (!HcclCollectiveGroup::instance().InitCollective()) {
|
||||
MS_LOG(EXCEPTION) << "HcclCollectiveGroup init failed.";
|
||||
MS_LOG(EXCEPTION) << "Mpi init failed, please check if mpirun is used correctly.";
|
||||
}
|
||||
device_id = IntToUint(HcclCollectiveGroup::instance().GetDeviceId());
|
||||
ms_context->set_param<uint32_t>(MS_CTX_DEVICE_ID, device_id);
|
||||
|
|
|
@ -48,7 +48,8 @@ bool HcclCollectiveGroup::InitCollective() {
|
|||
MS_LOG(EXCEPTION)
|
||||
<< "Loading libascend_collective.so failed. Many reasons could cause this:\n1.libascend_collective.so is not "
|
||||
"installed.\n2.hccl is not "
|
||||
"installed or found.\n3.mpi is not installed or found";
|
||||
"installed or found.\n3.mpi is not installed or found, please check if lib files of OpenMPI is added to "
|
||||
"LD_LIBRATY_PATH.";
|
||||
}
|
||||
init_mpi_ = DlsymFuncObj(InitMPI, collective_handle_);
|
||||
finalize_mpi_ = DlsymFuncObj(FinalizeMPI, collective_handle_);
|
||||
|
|
|
@ -214,7 +214,7 @@ std::shared_ptr<kernel::KernelBuildInfo> ChooseMatchedKernelInfo(
|
|||
UpdateCurMatchCounts(*kernel_info_ptr, kernel_node, &cur_kernel_info_match_counts);
|
||||
// Currently the selection policy is the match format count first, and then is datatype counts.
|
||||
if (PriorityChooseItem(cur_kernel_info_match_counts, &most_match_counts)) {
|
||||
selected_index = SizeToInt(info_index);
|
||||
selected_index = info_index;
|
||||
}
|
||||
}
|
||||
return kernel_info_list[selected_index];
|
||||
|
|
|
@ -96,8 +96,8 @@ bool CPUDeviceAddress::SyncDeviceToHost(const ShapeVector &, size_t size, TypeId
|
|||
return true;
|
||||
}
|
||||
|
||||
bool CPUDeviceAddress::SyncHostToDevice(const ShapeVector & /* shape */, size_t size, TypeId type, const void *host_ptr,
|
||||
const std::string &format) const {
|
||||
bool CPUDeviceAddress::SyncHostToDevice(const ShapeVector &, size_t size, TypeId type, const void *host_ptr,
|
||||
const std::string &) const {
|
||||
// The input or output may be empty.
|
||||
if ((size == 0) || (size_ == 0)) {
|
||||
MS_LOG(INFO) << "No need sync, host size: " << size << ", device size: " << size_;
|
||||
|
|
|
@ -189,7 +189,8 @@ tensor::TensorPtr CPUKernelRuntime::CreatTensorForOutput(
|
|||
MS_EXCEPTION_IF_NULL(tensor_to_node);
|
||||
size_t output_size = AnfAlgo::GetOutputTensorNum(node);
|
||||
if (index >= output_size) {
|
||||
MS_LOG(EXCEPTION) << "Invalid input index " << index;
|
||||
MS_LOG(EXCEPTION) << "For node " << node->DebugString() << ", index " << index << " exceed output size "
|
||||
<< output_size;
|
||||
}
|
||||
auto address = AnfAlgo::GetMutableOutputAddr(node, index);
|
||||
MS_EXCEPTION_IF_NULL(address);
|
||||
|
@ -204,6 +205,9 @@ tensor::TensorPtr CPUKernelRuntime::CreatTensorForOutput(
|
|||
tensor = kernel_graph->GetInternalOutputTensor(node, index);
|
||||
if (tensor == nullptr) {
|
||||
size_t type_size = GetTypeByte(TypeIdToType(device_type_id));
|
||||
if (type_size == 0) {
|
||||
MS_LOG(EXCEPTION) << "Invalid type_size " << type_size;
|
||||
}
|
||||
size_t tensor_size = std::accumulate(temp_shape.begin(), temp_shape.end(), type_size, std::multiplies<size_t>());
|
||||
if (tensor_size < address->size_) {
|
||||
temp_shape.clear();
|
||||
|
@ -276,7 +280,7 @@ void CPUKernelRuntime::CreateOutputTensors(session::KernelGraph *kernel_graph,
|
|||
MS_EXCEPTION_IF_NULL(tensor_to_node);
|
||||
auto &input_nodes = kernel_graph->inputs();
|
||||
if (input_nodes.size() != inputs.size()) {
|
||||
MS_LOG(EXCEPTION) << "Input size not equal to input node size!";
|
||||
MS_LOG(EXCEPTION) << "Input size " << inputs.size() << " is not equal to input node size " << input_nodes.size();
|
||||
}
|
||||
|
||||
size_t input_idx = 0;
|
||||
|
@ -300,7 +304,7 @@ void CPUKernelRuntime::BindInputTensorAddressPtr(const session::KernelGraph &ker
|
|||
const std::vector<tensor::TensorPtr> &inputs) {
|
||||
auto &input_nodes = kernel_graph.inputs();
|
||||
if (input_nodes.size() != inputs.size()) {
|
||||
MS_LOG(EXCEPTION) << "Input size not equal to input node size!";
|
||||
MS_LOG(EXCEPTION) << "Input size" << inputs.size() << " is not equal to input node size " << input_nodes.size();
|
||||
}
|
||||
for (size_t input_idx = 0; input_idx < input_nodes.size(); ++input_idx) {
|
||||
auto &item = input_nodes[input_idx];
|
||||
|
@ -344,9 +348,11 @@ void CPUKernelRuntime::BindInputTensorAddressPtr(const session::KernelGraph &ker
|
|||
AnfAlgo::SetOutputInferTypeAndShape({AnfAlgo::GetOutputInferDataType(item, 0)}, {shape_tmp}, item.get());
|
||||
}
|
||||
address->ref_count_ = INIT_NODE_REF;
|
||||
if (AnfAlgo::IsParameterWeight(input_param)) {
|
||||
tensor->set_device_address(address);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CPUKernelRuntime::BindOutputTensorAddressPtr(const VectorRef *outputs) {
|
||||
MS_EXCEPTION_IF_NULL(outputs);
|
||||
|
|
|
@ -72,15 +72,9 @@ void CPUMemoryManager::ResetDynamicMemory() {
|
|||
dynamic_mem_.clear();
|
||||
}
|
||||
|
||||
CPUMemoryManager::~CPUMemoryManager() {
|
||||
try {
|
||||
MemFree();
|
||||
} catch (std::exception &e) {
|
||||
MS_LOG(EXCEPTION) << "MemFree exception in ~CPUMemoryManager(), " << e.what();
|
||||
}
|
||||
}
|
||||
CPUMemoryManager::~CPUMemoryManager() { MemFree(); }
|
||||
|
||||
void CPUMemoryManager::MemFree() {
|
||||
void CPUMemoryManager::MemFree() noexcept {
|
||||
if (mem_ptr_ != nullptr) {
|
||||
mem_ptr_ = nullptr;
|
||||
mem_size_ = 0;
|
||||
|
|
|
@ -58,7 +58,7 @@ class CPUMemoryManager : public MemoryManager {
|
|||
|
||||
private:
|
||||
uint8_t *MemMalloc(size_t size);
|
||||
void MemFree();
|
||||
void MemFree() noexcept;
|
||||
CPUSimpleMemPlan mem_plan_;
|
||||
|
||||
size_t mem_size_{0};
|
||||
|
|
|
@ -50,7 +50,7 @@ def _check_task_sink_envs():
|
|||
return True if task_sink environment variables have been exported, False otherwise.
|
||||
"""
|
||||
import os
|
||||
task_sink = os.getenv("SINGLE_OP_MODE")
|
||||
task_sink = os.getenv("GRAPH_OP_RUN")
|
||||
if task_sink:
|
||||
try:
|
||||
if int(task_sink) == 1:
|
||||
|
|
Loading…
Reference in New Issue