diff --git a/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_mod.cc b/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_mod.cc index 2a753bbc62b..b3b52c54839 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_mod.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_kernel_mod.cc @@ -194,7 +194,7 @@ std::vector AicpuOpKernelMod::GenTask(const std::vector } AicpuTaskInfoPtr task_info_ptr = std::make_shared( - kernel_name_, stream_id, node_so_, node_name_, node_def_str_, ext_info_, input_data_addrs, output_data_addrs, + unique_name_, stream_id, node_so_, node_name_, node_def_str_, ext_info_, input_data_addrs, output_data_addrs, NeedDump()); MS_LOG(INFO) << "AicpuOpKernelMod GenTask end"; diff --git a/mindspore/ccsrc/backend/kernel_compiler/akg/ascend/akg_ascend_kernel_mod.cc b/mindspore/ccsrc/backend/kernel_compiler/akg/ascend/akg_ascend_kernel_mod.cc index ffc26bafb01..4761f359ae5 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/akg/ascend/akg_ascend_kernel_mod.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/akg/ascend/akg_ascend_kernel_mod.cc @@ -119,7 +119,7 @@ std::vector AkgKernelMod::GenTask(const std::vector &in MS_LOG(DEBUG) << "The block_dim is:" << block_dim; TbeTaskInfoPtr task_info_ptr = std::make_shared( - kernel_name_, stream_id, stub_func, block_dim, args, args_size, sm_desc, binary, binary_size, meta_data, + unique_name_, stream_id, stub_func, block_dim, args, args_size, sm_desc, binary, binary_size, meta_data, input_data_addrs, output_data_addrs, workspace_addrs, NeedDump()); return {task_info_ptr}; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/ascend_kernel_mod.h b/mindspore/ccsrc/backend/kernel_compiler/ascend_kernel_mod.h index 231539d7871..2ec92b2529e 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/ascend_kernel_mod.h +++ b/mindspore/ccsrc/backend/kernel_compiler/ascend_kernel_mod.h @@ -34,7 +34,8 @@ class AscendKernelMod : public KernelMod { uint32_t stream_id() { return stream_id_; } virtual bool NeedDump() { const auto &dump_json = DumpJsonParser::GetInstance(); - return dump_json.NeedDump(kernel_name_) && dump_json.async_dump_enabled() && dump_json.op_debug_mode() == 0; + return dump_json.NeedDump(fullname_) && dump_json.async_dump_enabled() && dump_json.op_debug_mode() == 0 && + !is_monad_; } protected: diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/reduce_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/reduce_cpu_kernel.cc index d218243837e..c9aad40e1af 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/reduce_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/reduce_cpu_kernel.cc @@ -52,7 +52,7 @@ void ReduceCPUKernel::InitKernel(const CNodePtr &kernel_node) { reduce_type_ = kReduceAny; reduce_func_ = [](const T *input, size_t pos, T *out) { *out |= input[pos]; }; } else { - MS_LOG(EXCEPTION) << "Unsupported reduce operation: " << kernel_name_ << " for bool."; + MS_LOG(EXCEPTION) << "Unsupported reduce operation: " << fullname_ << " for bool."; } } else { if (kernel_name == "ReduceMax") { diff --git a/mindspore/ccsrc/backend/kernel_compiler/hccl/hccl_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/hccl/hccl_kernel.cc index fb2976961d8..01ba0fdae9f 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/hccl/hccl_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/hccl/hccl_kernel.cc @@ -234,7 +234,7 @@ std::vector HcclKernel::GenTask(const std::vector &inpu } results.emplace_back( - std::make_shared(kernel_name_, stream_id, hccl::HcclAdapter::GetHcclType(anf_node), input_data_addr, + std::make_shared(unique_name_, stream_id, hccl::HcclAdapter::GetHcclType(anf_node), input_data_addr, output_data_addr, workspace_addr, task.workspace_size, task.stream_num, private_def, hccl::HcclAdapter::GetInstance().GetHcclOpsKernelInfoStore(), hccl_count_, root_id_, op_type_, data_type, group_, NeedDump())); diff --git a/mindspore/ccsrc/backend/kernel_compiler/kernel.h b/mindspore/ccsrc/backend/kernel_compiler/kernel.h index f0bfd247144..3a66a85d679 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/kernel.h @@ -142,10 +142,14 @@ class KernelMod { virtual void ReleaseResource() {} virtual ~KernelMod() = default; - void set_kernel_name(const std::string &kernel_name) { kernel_name_ = kernel_name; } + void set_unique_name(const std::string &unique_name) { unique_name_ = unique_name; } + void set_fullname(const std::string &fullname) { fullname_ = fullname; } + void set_is_monad(bool is_monad) { is_monad_ = is_monad; } protected: - std::string kernel_name_; + std::string unique_name_; + std::string fullname_; + bool is_monad_{false}; }; using KernelModPtr = std::shared_ptr; } // namespace kernel diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc index b1f528576d4..1a23e9cd033 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/assign.cc @@ -55,7 +55,7 @@ std::vector AssignKernel::GenTask(const std::vector &in stream_id_ = stream_id; std::shared_ptr task_info_ptr = - std::make_shared(kernel_name_, stream_id, inputs[0]->addr, inputs[0]->size, inputs[1]->addr, + std::make_shared(unique_name_, stream_id, inputs[0]->addr, inputs[0]->size, inputs[1]->addr, inputs[1]->size, RT_MEMCPY_DEVICE_TO_DEVICE, false); MS_EXCEPTION_IF_NULL(task_info_ptr); return {task_info_ptr}; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/label_goto.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/label_goto.cc index 82964cd4e31..1eec5ecbb52 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/label_goto.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/label_goto.cc @@ -54,7 +54,7 @@ std::vector LabelGotoKernel::GenTask(const std::vector MS_LOG(INFO) << "LabelGotoKernel GenTask label:" << label_ << ", stream id:" << stream_id; std::vector task_info_list; std::shared_ptr task_info_ptr = - std::make_shared(kernel_name_, stream_id, label_); + std::make_shared(unique_name_, stream_id, label_); MS_EXCEPTION_IF_NULL(task_info_ptr); task_info_list.emplace_back(task_info_ptr); return task_info_list; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/label_set.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/label_set.cc index 8dc947c0182..57e14cd4d56 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/label_set.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/label_set.cc @@ -53,7 +53,7 @@ std::vector LabelSetKernel::GenTask(const std::vector & const std::vector &, uint32_t stream_id) { MS_LOG(INFO) << "LabelSetKernel GenTask label:" << label_ << ", stream id:" << stream_id; std::vector task_info_list; - std::shared_ptr task_info_ptr = std::make_shared(kernel_name_, stream_id, label_); + std::shared_ptr task_info_ptr = std::make_shared(unique_name_, stream_id, label_); MS_EXCEPTION_IF_NULL(task_info_ptr); task_info_list.emplace_back(task_info_ptr); return task_info_list; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/label_switch.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/label_switch.cc index 50352e1c231..2f408385c00 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/label_switch.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/label_switch.cc @@ -64,7 +64,7 @@ std::vector LabelSwitchKernel::GenTask(const std::vector task_info_list; cond_ = inputs[0]->addr; - auto task_info_ptr = std::make_shared(kernel_name_, stream_id, label_size_, label_list_, cond_); + auto task_info_ptr = std::make_shared(unique_name_, stream_id, label_size_, label_list_, cond_); MS_EXCEPTION_IF_NULL(task_info_ptr); task_info_list.emplace_back(task_info_ptr); return task_info_list; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/memcpy_async.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/memcpy_async.cc index a7c0c927bc6..5e7da21df62 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/memcpy_async.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/memcpy_async.cc @@ -122,7 +122,7 @@ std::vector MemCpyAsyncKernel::GenTask(const std::vector task_info_ptr = - std::make_shared(kernel_name_, stream_id, outputs[0]->addr, outputs[0]->size, inputs[0]->addr, + std::make_shared(unique_name_, stream_id, outputs[0]->addr, outputs[0]->size, inputs[0]->addr, inputs[0]->size, RT_MEMCPY_DEVICE_TO_DEVICE, NeedDump()); MS_EXCEPTION_IF_NULL(task_info_ptr); return {task_info_ptr}; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/profiling_kernel_mod.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/profiling_kernel_mod.cc index ad8e1beeeb1..6c57df7810d 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/profiling_kernel_mod.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/profiling_kernel_mod.cc @@ -63,7 +63,7 @@ std::vector ProfilingKernelMod::GenTask(const std::vector task_info_ptr = - std::make_shared(kernel_name_, stream_id, log_id_, notify_, flags_); + std::make_shared(unique_name_, stream_id, log_id_, notify_, flags_); return {task_info_ptr}; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/recv.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/recv.cc index c6147982a39..d5359b5aeb3 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/recv.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/recv.cc @@ -57,7 +57,7 @@ std::vector RecvKernel::GenTask(const std::vector &, co const std::vector &, uint32_t stream_id) { MS_LOG(INFO) << "RecvKernel GenTask event_id_:" << event_id_ << ", stream_id_:" << stream_id; stream_id_ = stream_id; - EventWaitTaskInfoPtr task_info_ptr = std::make_shared(kernel_name_, stream_id, event_id_); + EventWaitTaskInfoPtr task_info_ptr = std::make_shared(unique_name_, stream_id, event_id_); MS_EXCEPTION_IF_NULL(task_info_ptr); return {task_info_ptr}; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/send.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/send.cc index 81354ce4952..d88551cdedb 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/send.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/send.cc @@ -56,7 +56,7 @@ std::vector SendKernel::GenTask(const std::vector &, co const std::vector &, uint32_t stream_id) { MS_LOG(INFO) << "SendKernel GenTask event id:" << event_id_ << ", stream id:" << stream_id; stream_id_ = stream_id; - EventRecordTaskInfoPtr task_info_ptr = std::make_shared(kernel_name_, stream_id, event_id_); + EventRecordTaskInfoPtr task_info_ptr = std::make_shared(unique_name_, stream_id, event_id_); MS_EXCEPTION_IF_NULL(task_info_ptr); return {task_info_ptr}; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/stream_active.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/stream_active.cc index f6dcb43b096..ebd26dadec6 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/stream_active.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/stream_active.cc @@ -71,7 +71,7 @@ std::vector StreamActiveKernel::GenTask(const std::vector task_info_list; for (auto &index : active_streams_index_) { std::shared_ptr task_info_ptr = - std::make_shared(kernel_name_, stream_id, index); + std::make_shared(unique_name_, stream_id, index); MS_EXCEPTION_IF_NULL(task_info_ptr); task_info_list.emplace_back(task_info_ptr); MS_LOG(INFO) << "StreamActiveKernel GenTask: streamId:" << stream_id << ", Active streamId:" << index; diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/stream_switch.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/stream_switch.cc index 3c172560097..5b6fd0fa5d3 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/stream_switch.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/stream_switch.cc @@ -91,7 +91,7 @@ std::vector StreamSwitchKernel::GenTask(const std::vector(cond_) << ", true_stream_index_:" << true_stream_index_ << ", stream_id:" << stream_id; std::shared_ptr task_info_ptr = std::make_shared( - kernel_name_, stream_id, true_stream_index_, loop_cnt, ites_per_loop, cond_, data_type_); + unique_name_, stream_id, true_stream_index_, loop_cnt, ites_per_loop, cond_, data_type_); MS_EXCEPTION_IF_NULL(task_info_ptr); return {task_info_ptr}; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/rts/tensor_copy_slices.cc b/mindspore/ccsrc/backend/kernel_compiler/rts/tensor_copy_slices.cc index c4f606f9221..fcbf2067a78 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/rts/tensor_copy_slices.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/rts/tensor_copy_slices.cc @@ -142,10 +142,10 @@ std::vector TensorCopySlices::GenTask(const std::vector stream_id_ = stream_id; std::shared_ptr task_info_ptr1 = - std::make_shared(kernel_name_, stream_id, outputs[0]->addr, outputs[0]->size, inputs[0]->addr, + std::make_shared(unique_name_, stream_id, outputs[0]->addr, outputs[0]->size, inputs[0]->addr, inputs[0]->size, RT_MEMCPY_DEVICE_TO_DEVICE, NeedDump()); std::shared_ptr task_info_ptr2 = std::make_shared( - kernel_name_, stream_id, VoidPointerOffset(outputs[0]->addr, offset_), copy_size_, inputs[1]->addr, copy_size_, + unique_name_, stream_id, VoidPointerOffset(outputs[0]->addr, offset_), copy_size_, inputs[1]->addr, copy_size_, RT_MEMCPY_DEVICE_TO_DEVICE, NeedDump()); return {task_info_ptr1, task_info_ptr2}; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_mod.cc b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_mod.cc index a49076e1b8b..64fc7e8a10e 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_mod.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_mod.cc @@ -103,7 +103,7 @@ std::vector TbeKernelMod::GenTask(const std::vector &in MS_LOG(INFO) << "block_dim is:" << block_dim_; TbeTaskInfoPtr task_info_ptr = std::make_shared( - kernel_name_, stream_id, stub_func, block_dim_, args, 0, sm_desc, nullptr, 0, meta_data, input_data_addrs, + unique_name_, stream_id, stub_func, block_dim_, args, 0, sm_desc, nullptr, 0, meta_data, input_data_addrs, output_data_addrs, workspace_addrs, NeedDump()); return {task_info_ptr}; } diff --git a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc index 10efe648258..1ead50c9e95 100644 --- a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc @@ -2163,5 +2163,16 @@ bool AnfRuntimeAlgorithm::IsControlOpExecInBackend(const AnfNodePtr &node) { static std::set control_ops_exec_in_backend = {kBpropCutOpName}; return control_ops_exec_in_backend.find(AnfAlgo::GetCNodeName(node)) != control_ops_exec_in_backend.end(); } + +bool AnfRuntimeAlgorithm::IsNodeInputContainMonad(const AnfNodePtr &node) { + auto input_size = GetInputTensorNum(node); + for (size_t i = 0; i < input_size; ++i) { + auto input_with_index = GetPrevNodeOutput(node, i); + if (HasAbstractMonad(input_with_index.first)) { + return true; + } + } + return false; +} } // namespace session } // namespace mindspore diff --git a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h index 3a1bea2d27e..a7a38f95bec 100644 --- a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h +++ b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h @@ -300,6 +300,8 @@ class AnfRuntimeAlgorithm { // executed in vm. For example, the operator "bprop_cut" will be compiled into kernel graph and be launch // in backend in PyNative mode. static bool IsControlOpExecInBackend(const AnfNodePtr &node); + + static bool IsNodeInputContainMonad(const AnfNodePtr &node); }; } // namespace session using AnfAlgo = session::AnfRuntimeAlgorithm; diff --git a/mindspore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.cc b/mindspore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.cc index 6f1bd7a9142..97d1dfda45d 100644 --- a/mindspore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.cc +++ b/mindspore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.cc @@ -549,7 +549,7 @@ CNodePtr AscendKernelRuntime::GetErrorNodeName(uint32_t streamid, uint32_t taski if (task_id == taskid && stream_id == streamid) { auto &execute_node = current_graph_->execution_order(); auto node = std::find_if(execute_node.begin(), execute_node.end(), - [&iter](const auto &node) { return node->fullname_with_scope() == iter.first; }); + [&iter](const auto &node) { return node->UniqueName() == iter.first; }); if (node != execute_node.end()) { return *node; } diff --git a/mindspore/ccsrc/runtime/device/ascend/dump/data_dumper.cc b/mindspore/ccsrc/runtime/device/ascend/dump/data_dumper.cc index 284524c684b..c115985a657 100644 --- a/mindspore/ccsrc/runtime/device/ascend/dump/data_dumper.cc +++ b/mindspore/ccsrc/runtime/device/ascend/dump/data_dumper.cc @@ -100,8 +100,8 @@ void DataDumper::LoadDumpInfo() { if (!KernelNeedDump(kernel)) { continue; } - MS_LOG(INFO) << "[DataDump] LoadDumpInfo kernel:" << kernel->fullname_with_scope(); - dump_kernel_names_.emplace_back(kernel->fullname_with_scope()); + MS_LOG(INFO) << "[DataDump] LoadDumpInfo kernel:" << kernel->UniqueName(); + dump_kernel_names_.emplace_back(kernel->UniqueName()); DumpJsonParser::GetInstance().MatchKernel(kernel->fullname_with_scope()); aicpu::dump::Task task; @@ -251,7 +251,7 @@ void DataDumper::ReleaseDevMem(void **ptr) const { void DataDumper::ConstructDumpTask(NotNull kernel, NotNull dump_task) const { dump_task->set_end_graph(false); - auto iter = runtime_info_map_.find(kernel->fullname_with_scope()); + auto iter = runtime_info_map_.find(kernel->UniqueName()); if (iter == runtime_info_map_.end()) { MS_LOG(EXCEPTION) << "[DataDump] kernel name not found in runtime_info_map"; } @@ -389,6 +389,10 @@ void DataDumper::DumpKernelOutput(const CNodePtr &kernel, void *args, NotNullfullname_with_scope(); + return; + } MS_LOG(INFO) << "[DataDump] DumpKernelOutput start. Kernel:" << kernel->fullname_with_scope(); auto input_size = AnfAlgo::GetInputTensorNum(kernel); auto output_size = AnfAlgo::GetOutputTensorNum(kernel); @@ -423,6 +427,10 @@ void DataDumper::DumpKernelInput(const CNodePtr &kernel, void *args, NotNullfullname_with_scope(); + return; + } MS_LOG(INFO) << "[DataDump] DumpKernelInput start. Kernel:" << kernel->fullname_with_scope(); auto input_size = AnfAlgo::GetInputTensorNum(kernel); uint64_t offset = 0; diff --git a/mindspore/ccsrc/runtime/device/ascend/ge_runtime/runtime_model.cc b/mindspore/ccsrc/runtime/device/ascend/ge_runtime/runtime_model.cc index 65604c7123b..51ecd80a6d4 100644 --- a/mindspore/ccsrc/runtime/device/ascend/ge_runtime/runtime_model.cc +++ b/mindspore/ccsrc/runtime/device/ascend/ge_runtime/runtime_model.cc @@ -203,7 +203,8 @@ void RuntimeModel::DistributeTask() { std::shared_ptr runtime_tuple = std::make_shared(task_id, stream_id, task->Args()); auto emplace_ret = runtime_info_map_.emplace(task->task_name(), runtime_tuple); if (!emplace_ret.second) { - MS_LOG(WARNING) << "Task name exist: " << task->task_name(); + // The task_name is (fullname_with_scope + UniqueId). There should be no duplication. + MS_LOG(EXCEPTION) << "Task name exist: " << task->task_name(); } } } diff --git a/mindspore/ccsrc/runtime/device/ascend/ge_types_convert.cc b/mindspore/ccsrc/runtime/device/ascend/ge_types_convert.cc index 41b1add8a1f..d68cd3ab426 100644 --- a/mindspore/ccsrc/runtime/device/ascend/ge_types_convert.cc +++ b/mindspore/ccsrc/runtime/device/ascend/ge_types_convert.cc @@ -36,7 +36,7 @@ ge::proto::DataType GeTypesConvert::GetGeDataType(TypeId type_id) { MS_LOG(INFO) << "Vm origin type_id:" << type_id; auto iter = data_type_map.find(type_id); if (iter == data_type_map.end()) { - MS_LOG(EXCEPTION) << "Invalid data type:" << type_id; + MS_LOG(EXCEPTION) << "MindSpore data type:" << TypeIdLabel(type_id) << " can't been found in GE."; } return iter->second; } diff --git a/mindspore/ccsrc/runtime/device/ascend/tasksink/task_generator.cc b/mindspore/ccsrc/runtime/device/ascend/tasksink/task_generator.cc index 63c5455bceb..956c0ef1e75 100644 --- a/mindspore/ccsrc/runtime/device/ascend/tasksink/task_generator.cc +++ b/mindspore/ccsrc/runtime/device/ascend/tasksink/task_generator.cc @@ -147,7 +147,9 @@ bool TaskGenerator::LaunchKernel(const CNodePtr &anf_node_ptr, uint32_t stream_i AddressPtrList kernel_outputs; auto kernel_mod = AnfAlgo::GetKernelMod(anf_node_ptr); MS_EXCEPTION_IF_NULL(kernel_mod); - kernel_mod->set_kernel_name(anf_node_ptr->fullname_with_scope()); + kernel_mod->set_unique_name(anf_node_ptr->UniqueName()); + kernel_mod->set_fullname(anf_node_ptr->fullname_with_scope()); + kernel_mod->set_is_monad(AnfAlgo::IsNodeInputContainMonad(anf_node_ptr) && HasAbstractMonad(anf_node_ptr)); auto op_name = AnfAlgo::GetCNodeName(anf_node_ptr); constexpr size_t kNonePlaceholderIdx = 3; if ((op_name == kSplitOpName || op_name == kSplitVOpName) && AnfAlgo::HasNodeAttr(kAttrNonTask, anf_node_ptr)) { diff --git a/mindspore/core/ir/anf.h b/mindspore/core/ir/anf.h index d1c731d49f1..7d4a2607525 100644 --- a/mindspore/core/ir/anf.h +++ b/mindspore/core/ir/anf.h @@ -153,6 +153,7 @@ class AnfNode : public Base { std::size_t hash() const override { return this->hash_(this); } virtual std::string fullname_with_scope() { return ""; } + std::string UniqueName() { return fullname_with_scope() + "_" + UniqueId(); } virtual std::string DebugString(int recursive_level = 1) const { return ToString(); } virtual std::string DebugString(bool recursive) const { return DebugString(recursive ? 1 : 0); }