From ff753147196aafe5c685c62ae88050d6231d696e Mon Sep 17 00:00:00 2001 From: shenwei41 Date: Sat, 12 Feb 2022 15:31:38 +0800 Subject: [PATCH] upgrade ascend 20220211 --- graphengine | 2 +- .../optimizer/common_backend_optimization.cc | 2 - .../pass/convert_const_input_to_attr.cc | 11 +++ .../common/session/anf_runtime_algorithm.cc | 4 +- .../backend/common/session/session_basic.cc | 18 ++++- .../ccsrc/backend/graph_compiler/backend.cc | 1 - mindspore/ccsrc/kernel/kash/kernel_pack.cc | 1 + mindspore/ccsrc/kernel/kernel.h | 1 + .../device/executor/ai_core_dynamic_kernel.cc | 3 +- .../hal/device/ge_runtime/runtime_model.cc | 2 +- .../device/ascend/kernel/aicpu/aicpu_util.h | 8 +- .../kernel/tbe/dynamic_tbe_kernel_mod.cc | 42 ++++++---- .../kernel/tbe/dynamic_tbe_kernel_mod.h | 1 + .../device/ascend/kernel/tbe/tbe_adapter.cc | 1 + .../device/ascend/kernel/tbe/tbe_adapter.h | 5 ++ .../kernel/tbe/tbe_json/tbe_json_creator.cc | 5 ++ .../ascend/kernel/tbe/tbe_kernel_mod.cc | 2 +- .../device/ascend/kernel/tbe/tbe_utils.cc | 10 +-- .../device/ascend/kernel/tbe/tbe_utils.h | 3 +- .../optimizer/ascend_backend_optimization.cc | 3 - mindspore/ccsrc/transform/graph_ir/convert.cc | 7 +- mindspore/ccsrc/utils/utils.h | 3 + .../tbe_compiler/tbe_adapter.py | 6 +- .../ops/_op_impl/tbe/strided_slice_grad_d.py | 24 +++--- .../mindspore/run_check/_check_version.py | 2 +- .../const_to_attr_strided_slice_grad_test.cc | 77 ------------------- tests/ut/cpp/stub/runtime/runtime_stub.cc | 2 + tests/ut/cpp/stub/tdt/tdt_mock.cc | 39 ---------- version.txt | 2 +- 29 files changed, 114 insertions(+), 173 deletions(-) delete mode 100644 tests/ut/cpp/pre_activate/pass/const_to_attr_strided_slice_grad_test.cc delete mode 100644 tests/ut/cpp/stub/tdt/tdt_mock.cc diff --git a/graphengine b/graphengine index 008fafbb15d..6b57c837b3f 160000 --- a/graphengine +++ b/graphengine @@ -1 +1 @@ -Subproject commit 008fafbb15d6e1126b424dbb8689a4795578b5b6 +Subproject commit 6b57c837b3f33a479817d9ddfeebfe4d19afe66b diff --git a/mindspore/ccsrc/backend/common/optimizer/common_backend_optimization.cc b/mindspore/ccsrc/backend/common/optimizer/common_backend_optimization.cc index 72795d0568d..17ec1e50eb3 100644 --- a/mindspore/ccsrc/backend/common/optimizer/common_backend_optimization.cc +++ b/mindspore/ccsrc/backend/common/optimizer/common_backend_optimization.cc @@ -24,7 +24,6 @@ #include "backend/common/pass/convert_tuple_output_to_maketuple.h" #include "backend/common/pass/convert_const_input_to_tensor_input.h" #include "backend/common/pass/convert_tuple_input_to_dynamic_input.h" -#include "backend/common/pass/const_to_attr_strided_slice_grad.h" #include "backend/common/pass/convert_const_scalar_to_tensor.h" #include "backend/common/pass/convert_attr_to_unify_mindir.h" #include "backend/common/pass/add_training_attr.h" @@ -61,7 +60,6 @@ void BackendCommonOptimization(const std::shared_ptr &kern common_pm->AddPass(std::make_shared()); common_pm->AddPass(std::make_shared()); common_pm->AddPass(std::make_shared()); - common_pm->AddPass(std::make_shared()); common_pm->AddPass(std::make_shared()); common_pm->AddPass(std::make_shared()); common_pm->AddPass(std::make_shared()); diff --git a/mindspore/ccsrc/backend/common/pass/convert_const_input_to_attr.cc b/mindspore/ccsrc/backend/common/pass/convert_const_input_to_attr.cc index 084e5b37067..20140cfd488 100644 --- a/mindspore/ccsrc/backend/common/pass/convert_const_input_to_attr.cc +++ b/mindspore/ccsrc/backend/common/pass/convert_const_input_to_attr.cc @@ -67,6 +67,17 @@ const AnfNodePtr ConvertConstInputToAttr::Process(const FuncGraphPtr &, const An } } } + if (device == kAscendDevice && + NeedConvertToValueNodeSet.find(AnfAlgo::GetCNodeName(cnode)) != NeedConvertToValueNodeSet.end() && + !AnfAlgo::HasNodeAttr(kAttrNeedConvertToValueNode, cnode)) { + auto input_attrs = reg.GetConstInputAttrInfo(); + std::vector need_convert_to_constant; + std::transform(input_attrs.begin(), input_attrs.end(), std::back_inserter(need_convert_to_constant), + [](size_t i) { return i + 1; }); + AnfAlgo::SetNodeAttr(kAttrNeedConvertToValueNode, MakeValue(need_convert_to_constant), cnode); + return nullptr; + } + ConstInputToAttr(cnode, reg.GetConstInputAttrInfo()); return node; diff --git a/mindspore/ccsrc/backend/common/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/backend/common/session/anf_runtime_algorithm.cc index 2e99849a52a..9f825f91a53 100644 --- a/mindspore/ccsrc/backend/common/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/backend/common/session/anf_runtime_algorithm.cc @@ -149,7 +149,9 @@ static std::map, std::mapname(), {{{0, 2}, {1, 0}, {2, 1}}, {{2, 0}, {0, 1}, {1, 2}}}}, {prim::kPrimApplyCenteredRMSProp->name(), {{{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 5}, {5, 6}, {6, 7}, {7, 8}, {8, 4}}, - {{0, 0}, {1, 1}, {2, 2}, {3, 3}, {5, 4}, {6, 5}, {7, 6}, {8, 7}, {4, 8}}}}}; + {{0, 0}, {1, 1}, {2, 2}, {3, 3}, {5, 4}, {6, 5}, {7, 6}, {8, 7}, {4, 8}}}}, + {prim::kPrimStridedSliceGrad->name(), + {{{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 0}}, {{1, 0}, {2, 1}, {3, 2}, {4, 3}, {0, 4}}}}}; std::vector GetAllOutputWithIndexInner(const AnfNodePtr &node) { std::vector ret; diff --git a/mindspore/ccsrc/backend/common/session/session_basic.cc b/mindspore/ccsrc/backend/common/session/session_basic.cc index 9018a5c5a7e..45e5410fbc6 100644 --- a/mindspore/ccsrc/backend/common/session/session_basic.cc +++ b/mindspore/ccsrc/backend/common/session/session_basic.cc @@ -1454,7 +1454,9 @@ void SessionBasic::GetForwardOpOutputRefCount(const KernelGraph *graph, const st MS_EXCEPTION_IF_NULL(real_input); if (real_input->isa()) { const auto &tensor = GetValueNodeOutputTensor(real_input, kernel_with_index.second); - MS_EXCEPTION_IF_NULL(tensor); + if (tensor == nullptr) { + continue; + } if (forward_op_output_id.find(tensor->id()) != forward_op_output_id.end()) { (*forward_op_output_tensor_id)[tensor->id()] += 1; } @@ -1630,6 +1632,11 @@ void SessionBasic::GetOpInputTensors(const CNodePtr &cnode, InputTensorInfo *input_tensor_info) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(input_tensor_info); + auto has_const_input_to_attr = AnfAlgo::HasNodeAttr(kAttrNeedConvertToValueNode, cnode); + std::vector const_input_attr_index = {}; + if (has_const_input_to_attr) { + const_input_attr_index = AnfAlgo::GetNodeAttr>(cnode, kAttrNeedConvertToValueNode); + } const auto input_tensor_num = AnfAlgo::GetInputTensorNum(cnode); for (size_t i = 1; i <= input_tensor_num; i += 1) { const auto &input = cnode->input(i); @@ -1641,8 +1648,13 @@ void SessionBasic::GetOpInputTensors(const CNodePtr &cnode, tensor = GetValueNodeOutputTensor(real_input, kernel_with_index.second); const auto &value_ptr = GetValueNode(real_input); MS_EXCEPTION_IF_NULL(value_ptr); - input_tensor_info->input_tensors_mask.emplace_back(value_ptr->isa() ? kValueNodeTensorMask - : kParameterDataTensorMask); + auto is_value_node = value_ptr->isa(); + if (has_const_input_to_attr) { + is_value_node = + std::find(const_input_attr_index.begin(), const_input_attr_index.end(), i) != const_input_attr_index.end(); + } + input_tensor_info->input_tensors_mask.emplace_back(is_value_node ? kValueNodeTensorMask + : kParameterDataTensorMask); } else if (real_input->isa()) { tensor = GetParameterOutputTensor(real_input, parameter_index, graph_inputs); input_tensor_info->input_tensors_mask.emplace_back(tensor->is_parameter() ? kParameterWeightTensorMask diff --git a/mindspore/ccsrc/backend/graph_compiler/backend.cc b/mindspore/ccsrc/backend/graph_compiler/backend.cc index f45c744b81f..a754da403c8 100644 --- a/mindspore/ccsrc/backend/graph_compiler/backend.cc +++ b/mindspore/ccsrc/backend/graph_compiler/backend.cc @@ -867,7 +867,6 @@ void MindRTBackend::RunGraphBySingleOp(const std::vector &graphs for (const auto &kernel : graph->execution_order()) { InputTensorInfo input_tensor_info; VectorRef op_outputs; - if (!AnfAlgo::IsControlOpExecInBackend(kernel)) { OpRunInfo op_run_info; GraphInfo graph_info; diff --git a/mindspore/ccsrc/kernel/kash/kernel_pack.cc b/mindspore/ccsrc/kernel/kash/kernel_pack.cc index aa8047b4a86..db259c40c71 100644 --- a/mindspore/ccsrc/kernel/kash/kernel_pack.cc +++ b/mindspore/ccsrc/kernel/kash/kernel_pack.cc @@ -203,6 +203,7 @@ void KernelPack::ParseKernelJson(const nlohmann::json &js) { } } kernel_json_info_.sha256 = js["sha256"]; + kernel_json_info_.has_kernel_list = js.find("kernelList") != js.end(); } bool KernelPack::LoadKernelMeta(const std::string &json_f) { diff --git a/mindspore/ccsrc/kernel/kernel.h b/mindspore/ccsrc/kernel/kernel.h index fe4e89e80c9..dfcd1a7d7d0 100644 --- a/mindspore/ccsrc/kernel/kernel.h +++ b/mindspore/ccsrc/kernel/kernel.h @@ -128,6 +128,7 @@ struct KernelJsonInfo { std::vector parameters; std::string sha256; std::vector workspaces; + bool has_kernel_list = false; uint32_t op_para_size; KernelJsonInfo() : block_dim(0), op_para_size(0) {} }; diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/device/executor/ai_core_dynamic_kernel.cc b/mindspore/ccsrc/plugin/device/ascend/hal/device/executor/ai_core_dynamic_kernel.cc index 2d1d0ebfd68..13a767d6536 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/device/executor/ai_core_dynamic_kernel.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/device/executor/ai_core_dynamic_kernel.cc @@ -94,8 +94,7 @@ void AiCoreDynamicKernel::Execute() { rtL2Ctrl_t *l2ctrl = nullptr; auto args_size = static_cast(UlongToUint(sizeof(void *)) * runtime_args_.size()); if (handle_ != nullptr) { - const auto dev_func = - origin_key_.find("kernel0") != origin_key_.npos ? origin_key_ : origin_key_ + "_" + std::to_string(tiling_key_); + const auto dev_func = std::to_string(tiling_key_); const auto kernel_info = node_info + "/" + std::to_string(tiling_key_); if (RT_ERROR_NONE != rtKernelLaunchWithHandle(handle_, dev_func.c_str(), block_dim_, runtime_args_.data(), args_size, l2ctrl, stream_, kernel_info.c_str())) { diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/runtime_model.cc b/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/runtime_model.cc index 5fd9a144494..8c17e882b75 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/runtime_model.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/runtime_model.cc @@ -122,7 +122,7 @@ void RuntimeModel::InitLabel(const std::shared_ptr &davinci_model) } rtLabel_t rt_label = nullptr; - rtError_t rt_ret = rtLabelCreateEx(&rt_label, stream_list_[label_set_task_info->stream_id()]); + rtError_t rt_ret = rtLabelCreateExV2(&rt_label, rt_model_handle_, stream_list_[label_set_task_info->stream_id()]); if (rt_ret != RT_ERROR_NONE) { MS_LOG(EXCEPTION) << "Call rt api rtLabelCreate failed, ret: " << rt_ret; } diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_util.h b/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_util.h index d4e32921cb8..71554609482 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_util.h +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_util.h @@ -77,10 +77,10 @@ constexpr auto kEnvironGet = "EnvironGet"; constexpr auto kEnvironDestroyAll = "EnvironDestroyAll"; const std::set kCpuKernelOps{kIdentity, kMaskedSelect, kMaskedSelectGrad, kDynamicStitch, kSearchSorted, kResizeBilinear, kResizeBilinearGrad, kScatterElements}; -const std::set kCacheKernelOps{kUpdateCache, kCacheSwapTable, kSubAndFilter, kPadAndShift, - kDropout3D, kDropout2D, kNonMaxSuppressionV3}; -const std::set kCpuKernelBaseOps{kGetNext, kInitData, kRandomChoiceWithMask, kEnvironCreate, - kEnvironSet, kEnvironGet, kEnvironDestroyAll}; +const std::set kCacheKernelOps{kUpdateCache, kCacheSwapTable, kSubAndFilter, kPadAndShift, kDropout3D, + kDropout2D, kNonMaxSuppressionV3, kGetNext, kInitData, kPrint}; +const std::set kCpuKernelBaseOps{kRandomChoiceWithMask, kEnvironCreate, kEnvironSet, kEnvironGet, + kEnvironDestroyAll}; const std::set kDynamicInputOps{ kPrint, kPack, kMeshgrid, kStackInitOpName, kStackDestroyOpName, kStackPushOpName, kStackPopOpName, kDynamicStitch}; struct AicpuParamHead { diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/dynamic_tbe_kernel_mod.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/dynamic_tbe_kernel_mod.cc index ab53fa258cc..75a508f45f6 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/dynamic_tbe_kernel_mod.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/dynamic_tbe_kernel_mod.cc @@ -102,10 +102,17 @@ void DynamicTbeKernelMod::InitOp() { } // gen FuncStub - if (handle_ == nullptr) { + if (func_stub_ == nullptr || handle_ == nullptr) { auto func_stub = KernelManager::GenFuncStub(*kernel_pack_, false, &block_dim_, true, &handle_, &origin_key_); - if (func_stub != 1) { - MS_LOG(EXCEPTION) << "GenFuncStub failed."; + if (kernel_pack_->kernel_json_info().has_kernel_list) { + if (func_stub != 1) { + MS_LOG(EXCEPTION) << "GenFuncStub failed."; + } + } else { + if (func_stub == 0) { + MS_LOG(EXCEPTION) << "GenFuncStub failed."; + } + func_stub_ = reinterpret_cast(func_stub); } } @@ -286,16 +293,25 @@ bool DynamicTbeKernelMod::Launch(const std::vector &inputs, const st rtL2Ctrl_t *l2ctrl = nullptr; auto args_size = static_cast(UlongToUint(sizeof(void *)) * runtimeargs.size()); auto node_info = cnode->fullname_with_scope(); - const auto dev_func = - origin_key_.find("kernel0") != origin_key_.npos ? origin_key_ : origin_key_ + "_" + std::to_string(tiling_key_); - const auto kernel_info = node_info + "/" + std::to_string(tiling_key_); - // cppcheck-suppress unreadVariable - auto lock = device::KernelRuntime::LockRuntime(); - auto ret = rtKernelLaunchWithHandle(handle_, dev_func.c_str(), block_dim_, runtimeargs.data(), args_size, l2ctrl, - stream_ptr, kernel_info.c_str()); - if (ret != RT_ERROR_NONE) { - MS_LOG(ERROR) << "Call runtime rtKernelLaunchWithHandle error. Node info: " << node_info; - return false; + if (kernel_pack_->kernel_json_info().has_kernel_list) { + const auto dev_func = std::to_string(tiling_key_); + const auto kernel_info = node_info + "/" + std::to_string(tiling_key_); + // cppcheck-suppress unreadVariable + auto lock = device::KernelRuntime::LockRuntime(); + auto ret = rtKernelLaunchWithHandle(handle_, dev_func.c_str(), block_dim_, runtimeargs.data(), args_size, l2ctrl, + stream_ptr, kernel_info.c_str()); + if (ret != RT_ERROR_NONE) { + MS_LOG(ERROR) << "Call runtime rtKernelLaunchWithHandle error. Node info: " << node_info; + return false; + } + } else { + // cppcheck-suppress unreadVariable + auto lock = device::KernelRuntime::LockRuntime(); + auto ret = rtKernelLaunch(func_stub_, block_dim_, runtimeargs.data(), args_size, l2ctrl, stream_ptr); + if (ret != RT_ERROR_NONE) { + MS_LOG(ERROR) << "Call runtime rtKernelLaunch error. Node info: " << node_info; + return false; + } } return true; diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/dynamic_tbe_kernel_mod.h b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/dynamic_tbe_kernel_mod.h index 01ef831b0ec..3cda7fa93d3 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/dynamic_tbe_kernel_mod.h +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/dynamic_tbe_kernel_mod.h @@ -57,6 +57,7 @@ class DynamicTbeKernelMod : public TbeKernelMod { void *tiling_data_ptr_ = nullptr; uint64_t tiling_key_{0}; void *handle_ = nullptr; + void *func_stub_ = nullptr; std::string origin_key_{""}; std::string op_compile_info_{}; bool need_skip_execute_ = false; diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_adapter.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_adapter.cc index c6c8f90d3d9..5791d4431a0 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_adapter.cc @@ -73,6 +73,7 @@ std::unordered_set TbeAdapter::input_order_adjusted_ops_ = {kConv2D kLayerNormBetaGammaBackpropOpName, kMinimumGradOpName, kMaximumGradOpName, + kStridedSliceGradOpName, kApplyCenteredRMSPropOpName}; bool TbeAdapter::IsSpecialFusionComputeNode(const std::vector &compute_nodes) { diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_adapter.h b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_adapter.h index 074a76bd912..34c79926381 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_adapter.h +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_adapter.h @@ -102,6 +102,11 @@ class TbeAdapter { inputs_json->push_back(inputs_list[kIndex7]); inputs_json->push_back(inputs_list[kIndex8]); inputs_json->push_back(inputs_list[kIndex4]); + } else if (op_name == kStridedSliceGradOpName) { + for (size_t i = 1; i < inputs_list.size(); ++i) { + inputs_json->push_back(inputs_list[i]); + } + inputs_json->push_back(inputs_list[kIndex0]); } else { if (inputs_list.size() < kIndex2) { MS_LOG(EXCEPTION) << "Op " << op_name << " should have at least " << kIndex2 << " inputs, but got " diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_json/tbe_json_creator.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_json/tbe_json_creator.cc index d2faf1765bb..8cddecea94e 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_json/tbe_json_creator.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_json/tbe_json_creator.cc @@ -522,6 +522,11 @@ void TbeJsonCreator::GenInputConstValue(const AnfNodePtr &anf_node, size_t real_ auto value = value_node->value(); MS_EXCEPTION_IF_NULL(value); ParseConstValue(value, input_desc); + } else if (input_node->isa()) { + auto param = input_node->cast(); + auto value = param->default_param(); + MS_EXCEPTION_IF_NULL(value); + ParseConstValue(value, input_desc); } else { MS_LOG(ERROR) << "The operator " << anf_node->fullname_with_scope() << "'s input" << real_input_index << "'s value depend is " << value_depend << ", but its input node is a " << input_node->type_name() diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_kernel_mod.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_kernel_mod.cc index 164233aaf48..9172870aa1a 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_kernel_mod.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_kernel_mod.cc @@ -165,7 +165,7 @@ device::DynamicKernelPtr TbeKernelMod::GenDynamicKernel(const CNodePtr &cnode_pt std::string origin_key; void *handle = nullptr; auto func_stub = KernelManager::GenFuncStub(*kernel_pack_, false, &block_dim, dynamic_flag, &handle, &origin_key); - if (dynamic_flag) { + if (kernel_json_info.has_kernel_list) { if (func_stub != 1) { MS_LOG(EXCEPTION) << "GenFuncStub failed."; } diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_utils.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_utils.cc index 1bd2f57d162..91f24a8ae7a 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_utils.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_utils.cc @@ -236,7 +236,7 @@ KernelPackPtr TbeUtils::InsertCache(const std::string &kernel_name, const std::s } int KernelManager::BinaryRegister(const mindspore::kernel::FlexArray &kernel_buffer, void **module, const string &magic, - const bool dynamic_flag) { + bool has_kernel_list) { static std::map magic_maps = {{"RT_DEV_BINARY_MAGIC_PLAIN", RT_DEV_BINARY_MAGIC_PLAIN}, {"RT_DEV_BINARY_MAGIC_PLAIN_AICPU", RT_DEV_BINARY_MAGIC_PLAIN_AICPU}, {"RT_DEV_BINARY_MAGIC_PLAIN_AIVEC", RT_DEV_BINARY_MAGIC_PLAIN_AIVEC}, @@ -255,7 +255,7 @@ int KernelManager::BinaryRegister(const mindspore::kernel::FlexArray &kernel_buf dev_bin.magic = iter->second; dev_bin.length = kernel_buffer.len; dev_bin.version = 0; - auto ret = dynamic_flag ? rtRegisterAllKernel(&dev_bin, module) : rtDevBinaryRegister(&dev_bin, module); + auto ret = has_kernel_list ? rtRegisterAllKernel(&dev_bin, module) : rtDevBinaryRegister(&dev_bin, module); if (RT_ERROR_NONE != ret) { MS_LOG(INFO) << "Call runtime rtDevBinaryRegister error."; return -1; @@ -288,20 +288,20 @@ uintptr_t KernelManager::GenFuncStub(const mindspore::kernel::KernelPack &kernel if (iter != info_table_.end()) { auto kernelmeta = iter->second; *block_dim = kernelmeta->block_dim_; - if (!dynamic_flag) { + if (!kernel_json_info.has_kernel_list) { return kernelmeta->func_stub_; } } } void *module = nullptr; - if (BinaryRegister((*kernel_pack.GetKernel()), &module, magic, dynamic_flag) != 0) { + if (BinaryRegister((*kernel_pack.GetKernel()), &module, magic, kernel_json_info.has_kernel_list) != 0) { MS_LOG(INFO) << "Call runtime BinaryRegister error."; if (module != nullptr) { (void)rtDevBinaryUnRegister(module); } return 0; } - if (dynamic_flag) { + if (kernel_json_info.has_kernel_list) { *handle = module; *origin_key = func_name; return 1; diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_utils.h b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_utils.h index 85588611b85..3269a9aba88 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_utils.h +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_utils.h @@ -104,8 +104,7 @@ class KernelManager { private: KernelManager() = default; ~KernelManager() = default; - static int BinaryRegister(const FlexArray &kernel_buffer, void **module, const string &magic, - const bool dynamic_flag); + static int BinaryRegister(const FlexArray &kernel_buffer, void **module, const string &magic, bool has_kernel_list); static std::unordered_map info_table_; static std::atomic kernel_stub_gen_; static std::mutex info_table_mutex_; diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ascend_backend_optimization.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ascend_backend_optimization.cc index 4f177582be7..bd3f55664b0 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ascend_backend_optimization.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ascend_backend_optimization.cc @@ -580,9 +580,6 @@ void AscendUnifyMindIR(const std::shared_ptr &graph) { unify_mindir_pm->AddPass(std::make_shared()); unify_mindir_pm->AddPass(std::make_shared()); unify_mindir_pm->AddPass(std::make_shared()); - unify_mindir_pm->AddPass(std::make_shared()); - unify_mindir_pm->AddPass(std::make_shared()); - unify_mindir_pm->AddPass(std::make_shared()); unify_mindir_pm->AddPass(std::make_shared()); unify_mindir_pm->AddPass(std::make_shared()); unify_mindir_pm->AddPass(std::make_shared()); diff --git a/mindspore/ccsrc/transform/graph_ir/convert.cc b/mindspore/ccsrc/transform/graph_ir/convert.cc index 72500d6540b..b9a3db1d1a9 100644 --- a/mindspore/ccsrc/transform/graph_ir/convert.cc +++ b/mindspore/ccsrc/transform/graph_ir/convert.cc @@ -597,7 +597,12 @@ DfGraphConvertor &DfGraphConvertor::ConvertAllNode() { MS_LOG(INFO) << "Dataset param is " << param.ToString() << "."; // GetNext auto iter_getnext_op = make_shared("get_next_tmp"); - (void)iter_getnext_op->set_attr_output_types(param.ge_types()); + std::vector getnext_types; + const auto &origin_ge_types = param.ge_types(); + std::transform( + origin_ge_types.begin(), origin_ge_types.end(), std::back_inserter(getnext_types), + [](int64_t t_num) -> enum ge::DataType { return static_cast(t_num); }); + (void)iter_getnext_op->set_attr_output_types(getnext_types); (void)iter_getnext_op->set_attr_output_shapes(param.shapes()); (void)iter_getnext_op->set_attr_channel_name(param.queue_name()); diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index b5e8a8bab8f..7c3efef2053 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -527,6 +527,7 @@ constexpr auto kAttrPlaceHolderIndex = "placeholder_index"; constexpr auto kAttrMicro = "micro"; constexpr auto kAttrJsonFileName = "json_file_name"; constexpr auto kAttrNeedDropInput = "need_drop_input"; +constexpr auto kAttrNeedConvertToValueNode = "need_convert_to_value_node"; constexpr auto kAttrSendSrcNodeName = "send_src_node_name"; constexpr auto kAttrSendDstNodeName = "send_dst_node_name"; constexpr auto kAttrSendDstRanks = "send_dst_ranks"; @@ -773,6 +774,8 @@ const std::set DynamicShapeConstInputToAttr = {kCastOpName, kE kReduceMinOpName, kReduceMeanOpName, kReduceMaxOpName, kReduceAllOpName, kReduceAnyOpName, kConcatOpName}; +const std::set NeedConvertToValueNodeSet = {kStridedSliceGradOpName}; + const std::set DynamicShapeConstInputToAttrCPU = { kCastOpName, kExpandDimsOpName, kEmbeddingLookupOpName, kReduceMinOpName, kReduceMeanOpName, kReduceMaxOpName, kReduceAllOpName, kReduceAnyOpName, kConcatOpName, kReduceSumOpName}; diff --git a/mindspore/python/mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py b/mindspore/python/mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py index d5523711a55..b780b6d5fd6 100644 --- a/mindspore/python/mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +++ b/mindspore/python/mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py @@ -403,7 +403,6 @@ def _pre_build_compute_op_info(compute_op, job): _normalize_module_name(op_module_name, py_module_path) unknown_shape = compute_op["unknown_shape"] int64_mode = compute_op["int64mode"] - dynamic_compile_static = compute_op["dynamic_compile_static"] res = check_op_impl_mode(op_module_name, op_func_name) op_impl_mode = job.content["SocInfo"]["op_impl_mode"] op_impl_mode_list = job.content["SocInfo"]["op_impl_mode_list"] @@ -416,7 +415,7 @@ def _pre_build_compute_op_info(compute_op, job): options = get_options_info(job.content) dispatch_prebuild_task(job.source_id, job.id, l1_size, op_module_name, op_full_name, op_type, op_func_name, unknown_shape, - (inputs, outputs, attrs, options), int64_mode, dynamic_compile_static, unknown_shape, + (inputs, outputs, attrs, options), int64_mode, unknown_shape, None, job.pass_list) @@ -477,13 +476,12 @@ def build_single_pre_op(job: TbeJob): _normalize_module_name(op_module_name, py_module_path) unknown_shape = compute_op_info["unknown_shape"] int64_mode = compute_op_info["int64mode"] - dynamic_compile_static = compute_op_info["dynamic_compile_static"] op_pattern = compute_op_info["pattern"] options = get_options_info(job.content) fuzz_build_info = get_fuzz_build_info(job.content) dispatch_single_op_compile_task(job.source_id, job.id, l1_size, op_module_name, op_name, op_type, op_func_name, op_kernel_name, unknown_shape, (inputs, outputs, attrs, options), int64_mode, - None, None, dynamic_compile_static, unknown_shape, op_pattern, + None, None, unknown_shape, op_pattern, json.dumps(fuzz_build_info), None, job.pass_list) return True diff --git a/mindspore/python/mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py b/mindspore/python/mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py index d1f8a1e23ae..42309898f4b 100644 --- a/mindspore/python/mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +++ b/mindspore/python/mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py @@ -19,26 +19,28 @@ from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType strided_slice_grad_d_op_info = TBERegOp("StridedSliceGrad") \ .fusion_type("OPAQUE") \ .async_flag(False) \ - .binfile_name("strided_slice_grad_d.so") \ + .binfile_name("strided_slice_grad.so") \ .compute_cost(10) \ - .kernel_name("strided_slice_grad_d") \ + .kernel_name("strided_slice_grad") \ .partial_flag(True) \ - .attr("shapex", "required", "listInt", "all") \ - .attr("begin", "required", "listInt", "all") \ - .attr("end", "required", "listInt", "all") \ - .attr("strides", "required", "listInt", "all") \ + .dynamic_compile_static(True) \ .attr("begin_mask", "optional", "int", "all", "0") \ .attr("end_mask", "optional", "int", "all", "0") \ .attr("ellipsis_mask", "optional", "int", "all", "0") \ .attr("new_axis_mask", "optional", "int", "all", "0") \ .attr("shrink_axis_mask", "optional", "int", "all", "0") \ .input(0, "dy", False, "required", "all") \ + .input(1, "shapex", False, "required", "all", "optional") \ + .input(2, "begin", False, "required", "all", "optional") \ + .input(3, "end", False, "required", "all", "optional") \ + .input(4, "strides", False, "required", "all", "optional") \ .output(0, "output", False, "required", "all") \ - .dtype_format(DataType.I8_Default, DataType.I8_Default) \ - .dtype_format(DataType.U8_Default, DataType.U8_Default) \ - .dtype_format(DataType.I32_Default, DataType.I32_Default) \ - .dtype_format(DataType.F16_Default, DataType.F16_Default) \ - .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, + DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, + DataType.I32_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, + DataType.I32_Default, DataType.F32_Default) \ .get_op_info() diff --git a/mindspore/python/mindspore/run_check/_check_version.py b/mindspore/python/mindspore/run_check/_check_version.py index 673f2b8c100..565df05f3f2 100644 --- a/mindspore/python/mindspore/run_check/_check_version.py +++ b/mindspore/python/mindspore/run_check/_check_version.py @@ -206,7 +206,7 @@ class AscendEnvChecker(EnvChecker): """ascend environment check""" def __init__(self): - self.version = ["1.80"] + self.version = ["1.81"] atlas_nnae_version = "/usr/local/Ascend/nnae/latest/fwkacllib/version.info" atlas_toolkit_version = "/usr/local/Ascend/ascend-toolkit/latest/fwkacllib/version.info" hisi_fwk_version = "/usr/local/Ascend/fwkacllib/version.info" diff --git a/tests/ut/cpp/pre_activate/pass/const_to_attr_strided_slice_grad_test.cc b/tests/ut/cpp/pre_activate/pass/const_to_attr_strided_slice_grad_test.cc deleted file mode 100644 index cd39a2d4c9d..00000000000 --- a/tests/ut/cpp/pre_activate/pass/const_to_attr_strided_slice_grad_test.cc +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "common/backend_common_test.h" -#include "frontend/operator/ops.h" -#include "debug/anf_ir_dump.h" -#include "common/py_func_graph_fetcher.h" -#include "backend/common/session/anf_runtime_algorithm.h" -#include "backend/common/optimizer/optimizer.h" -#include "backend/common/optimizer/pass_manager.h" -#include "backend/common/pass/const_to_attr_strided_slice_grad.h" -#include "utils/utils.h" -#include "utils/ms_utils.h" - -namespace mindspore { -namespace opt { -class TestHWConstToAttrStridedSliceGrad : public BackendCommon { - public: - TestHWConstToAttrStridedSliceGrad() : getPyFun_("gtest_input.pre_activate.const_to_attr_strided_slice_grad", true) {} - ~TestHWConstToAttrStridedSliceGrad() override = default; - - public: - UT::PyFuncGraphFetcher getPyFun_; -}; - -TEST_F(TestHWConstToAttrStridedSliceGrad, test_strided_slice_grad) { - FuncGraphPtr g = getPyFun_.CallAndParseRet("test_const_to_attr_strided_slice_grad", "before"); - ASSERT_TRUE(g != nullptr); - FuncGraphPtr g_after = getPyFun_.CallAndParseRet("test_const_to_attr_strided_slice_grad", "after"); - ASSERT_TRUE(g_after != nullptr); - - auto ret = g->get_return(); - ASSERT_TRUE(ret != nullptr); - EXPECT_NE(ret->input(1), nullptr); - EXPECT_NE(ret->input(1)->cast(), nullptr); - auto cnode = ret->input(1)->cast(); - EXPECT_FALSE(AnfAlgo::HasNodeAttr("shapex", cnode)); - EXPECT_FALSE(AnfAlgo::HasNodeAttr("begin", cnode)); - EXPECT_FALSE(AnfAlgo::HasNodeAttr("end", cnode)); - EXPECT_FALSE(AnfAlgo::HasNodeAttr("strides", cnode)); - EXPECT_FALSE(CheckEqualGraph(g, g_after)); - - std::vector shp_x{16, 1, 1024}; - auto x_abstract = std::make_shared(kFloat32, shp_x); - AbstractBasePtrList args_spec_list{x_abstract}; - auto kg = GetKernelGraph(g, args_spec_list); - ASSERT_TRUE(kg != nullptr); - - ret = kg->get_return(); - ASSERT_TRUE(ret != nullptr); - EXPECT_NE(ret->input(1), nullptr); - EXPECT_NE(ret->input(1)->cast(), nullptr); - auto make_tuple = ret->input(1)->cast(); - ASSERT_TRUE(make_tuple != nullptr); - EXPECT_NE(make_tuple->input(1), nullptr); - EXPECT_NE(make_tuple->input(1)->cast(), nullptr); - cnode = make_tuple->input(1)->cast(); - EXPECT_TRUE(AnfAlgo::HasNodeAttr("shapex", cnode)); - EXPECT_TRUE(AnfAlgo::HasNodeAttr("begin", cnode)); - EXPECT_TRUE(AnfAlgo::HasNodeAttr("end", cnode)); - EXPECT_TRUE(AnfAlgo::HasNodeAttr("strides", cnode)); - EXPECT_TRUE(CheckEqualGraph(kg, g_after)); -} -} // namespace opt -} // namespace mindspore diff --git a/tests/ut/cpp/stub/runtime/runtime_stub.cc b/tests/ut/cpp/stub/runtime/runtime_stub.cc index fea2933ce51..ef0be5d7b1e 100644 --- a/tests/ut/cpp/stub/runtime/runtime_stub.cc +++ b/tests/ut/cpp/stub/runtime/runtime_stub.cc @@ -185,6 +185,8 @@ RTS_API rtError_t rtModelGetId(rtModel_t model, uint32_t *modelId) { return RT_E RTS_API rtError_t rtLabelCreateEx(rtLabel_t *label, rtStream_t stream) { return RT_ERROR_NONE; } +RTS_API rtError_t rtLabelCreateExV2(rtLabel_t *lbl, rtModel_t mdl, rtStream_t stm) { return RT_ERROR_NONE; } + RTS_API rtError_t rtCpuKernelLaunchWithFlag(const void *soName, const void *kernelName, uint32_t blockDim, const void *args, uint32_t argsSize, rtSmDesc_t *smDesc, rtStream_t stream, uint32_t flags) { diff --git a/tests/ut/cpp/stub/tdt/tdt_mock.cc b/tests/ut/cpp/stub/tdt/tdt_mock.cc deleted file mode 100644 index 6b9c6f95a4b..00000000000 --- a/tests/ut/cpp/stub/tdt/tdt_mock.cc +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TDT_MOCK_H -#define TDT_MOCK_H - -#include "tdt/tsd_client.h" - -namespace tdt { -StatusFactory* StatusFactory::GetInstance() { - static StatusFactory instance; - return &instance; -} - -void StatusFactory::RegisterErrorNo(const uint32_t err, const std::string& desc) { return; } - -std::string StatusFactory::GetErrDesc(const uint32_t err) { return "Error"; } - -std::string StatusFactory::GetErrCodeDesc(uint32_t errCode) { return "Error"; } - -StatusFactory::StatusFactory() {} - -std::mutex& StatusFactory::GetMutex() { return GetInstance()->rwMutex_; } - -} // namespace tdt -#endif // TDT_MOCK_H diff --git a/version.txt b/version.txt index ce6a70b9d84..9dbb0c0052e 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -1.6.0 \ No newline at end of file +1.7.0 \ No newline at end of file