diff --git a/CMakeLists.txt b/CMakeLists.txt index c9db97a10b9..2b44a837aa8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -110,17 +110,17 @@ include_directories(${PYTHON_INCLUDE_DIRS}) set(MS_CCSRC_PATH ${CMAKE_SOURCE_DIR}/mindspore/ccsrc) set(MS_CCSRC_BUILD_PATH ${BUILD_PATH}/mindspore/mindspore/ccsrc) -if(ENABLE_D OR ENABLE_ACL OR ENABLE_TESTCASES) - include(${CMAKE_SOURCE_DIR}/cmake/dependency_graphengine.cmake) -endif() - if(NOT MSVC) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=hidden") endif() include(${CMAKE_SOURCE_DIR}/cmake/init.cmake) -add_subdirectory_with_faster_option(mindspore/ccsrc) add_subdirectory(mindspore/core) +if(ENABLE_D OR ENABLE_ACL OR ENABLE_TESTCASES) + include(${CMAKE_SOURCE_DIR}/cmake/dependency_graphengine.cmake) +endif() + +add_subdirectory_with_faster_option(mindspore/ccsrc) if(ENABLE_TESTCASES OR ENABLE_CPP_ST) add_subdirectory(tests) diff --git a/cmake/ascend_variables.cmake b/cmake/ascend_variables.cmake index b5faa0a783e..b52efdff9e2 100644 --- a/cmake/ascend_variables.cmake +++ b/cmake/ascend_variables.cmake @@ -9,14 +9,16 @@ set(ASCEND_DRIVER_HAL_PATH ${ASCEND_PATH}/driver/lib64/driver) # CANN packages set(ASCEND_CANN_RUNTIME_PATH ${ASCEND_PATH}/latest/lib64) -set(ASCEND_CANN_OPP_PATH ${ASCEND_PATH}/latest/opp/op_impl/built-in/ai_core/tbe/op_tiling) -set(ASCEND_CANN_OPP_PATH_TEMP ${ASCEND_PATH}/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling) +set(ASCEND_CANN_OPP_PATH ${ASCEND_PATH}/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux) +set(ASCEND_CANN_OPP_AARCH64_PATH ${ASCEND_CANN_OPP_PATH}/aarch64) +set(ASCEND_CANN_OPP_X86_64_PATH ${ASCEND_CANN_OPP_PATH}/x86_64) set(ASCEND_CANN_PLUGIN_PATH ${ASCEND_CANN_RUNTIME_PATH}/plugin/opskernel) # Ascend-toolkit packages set(ASCEND_TOOLKIT_RUNTIME_PATH ${ASCEND_PATH}/ascend-toolkit/latest/lib64) -set(ASCEND_TOOLKIT_OPP_PATH ${ASCEND_PATH}/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe/op_tiling) -set(ASCEND_TOOLKIT_OPP_PATH_TEMP ${ASCEND_PATH}/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling) +set(ASCEND_TOOLKIT_OPP_PATH ${ASCEND_PATH}/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux) +set(ASCEND_TOOLKIT_OPP_AARCH64_PATH ${ASCEND_TOOLKIT_OPP_PATH}/aarch64) +set(ASCEND_TOOLKIT_OPP_X86_64_PATH ${ASCEND_TOOLKIT_OPP_PATH}/x86_64) set(ASCEND_TOOLKIT_PLUGIN_PATH ${ASCEND_TOOLKIT_RUNTIME_PATH}/plugin/opskernel) # nnae packages (for rpath only) diff --git a/cmake/graphengine_variables.cmake b/cmake/graphengine_variables.cmake new file mode 100644 index 00000000000..001103ae59b --- /dev/null +++ b/cmake/graphengine_variables.cmake @@ -0,0 +1,14 @@ +# path variables for graphengine submodule, it has to be included after mindspore/core +# and minspore/ccsrc to prevent conflict of op headers +if(ENABLE_D OR ENABLE_ACL OR ENABLE_TESTCASES) + include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc) + include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc/external) + include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc/framework) + include_directories(${CMAKE_SOURCE_DIR}/graphengine/base) + include_directories(${CMAKE_SOURCE_DIR}/graphengine/third_party/fwkacllib/inc) + include_directories(${CMAKE_SOURCE_DIR}/graphengine/third_party/fwkacllib/inc/aicpu) + include_directories(${CMAKE_SOURCE_DIR}/graphengine/third_party/fwkacllib/inc/toolchain) + include_directories(${CMAKE_SOURCE_DIR}/graphengine/metadef/inc) + include_directories(${CMAKE_SOURCE_DIR}/graphengine/metadef/inc/external) + include_directories(${CMAKE_SOURCE_DIR}/graphengine/metadef/inc/external/graph) +endif() \ No newline at end of file diff --git a/cmake/mind_expression.cmake b/cmake/mind_expression.cmake index 0da14c56ef9..37aaac00081 100644 --- a/cmake/mind_expression.cmake +++ b/cmake/mind_expression.cmake @@ -64,18 +64,6 @@ if(ENABLE_GPU AND GPU_BACKEND_CUDA) endif() endif() -if(ENABLE_D OR ENABLE_ACL OR ENABLE_TESTCASES) - include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc) - include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc/external) - include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc/framework) - include_directories(${CMAKE_SOURCE_DIR}/graphengine/third_party/fwkacllib/inc) - include_directories(${CMAKE_SOURCE_DIR}/graphengine/third_party/fwkacllib/inc/aicpu) - include_directories(${CMAKE_SOURCE_DIR}/graphengine/third_party/fwkacllib/inc/toolchain) - include_directories(${CMAKE_SOURCE_DIR}/graphengine/metadef/inc) - include_directories(${CMAKE_SOURCE_DIR}/graphengine/metadef/inc/external) - include_directories(${CMAKE_SOURCE_DIR}/graphengine/metadef/inc/external/graph) -endif() - if(ENABLE_MINDDATA) include(${CMAKE_SOURCE_DIR}/cmake/external_libs/icu4c.cmake) include(${CMAKE_SOURCE_DIR}/cmake/external_libs/opencv.cmake) diff --git a/config/super_bar_config.json b/config/super_bar_config.json index c311b05c8d7..b6c6c1e9775 100644 --- a/config/super_bar_config.json +++ b/config/super_bar_config.json @@ -415,7 +415,8 @@ "TransData ": "support boll", "ScatterNdD ": "Accuracy issues", "Trace": "Hadn't adapted tbe implementation", - "AssignAdd": "Frac_nz in pangu not support" + "AssignAdd": "Frac_nz in pangu not support", + "Range": "not support dynamic shape with tiling failed" }, "SkipNodes": [ "BroadcastTo", @@ -444,7 +445,8 @@ "ACos", "TransData", "ScatterNdD", - "AssignAdd" + "AssignAdd", + "Range" ], "FallbackOps": { "DeformableOffsets": [ diff --git a/graphengine b/graphengine index 23600180612..f5f74bb7a12 160000 --- a/graphengine +++ b/graphengine @@ -1 +1 @@ -Subproject commit 236001806129e36c0f48b240c4f61b2e1d92c470 +Subproject commit f5f74bb7a124f99bbaeaec17b55aa466fdd34285 diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 86683f30c7b..b7534df8a5e 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -5,6 +5,10 @@ include_directories(${CMAKE_BINARY_DIR}) include_directories(${CMAKE_SOURCE_DIR}/mindspore/core/mindrt/include) include_directories(${CMAKE_SOURCE_DIR}/mindspore/core/mindrt/src) +# graphengine include directories +if(ENABLE_D OR ENABLE_ACL OR ENABLE_TESTCASES) + include(${CMAKE_SOURCE_DIR}/cmake/graphengine_variables.cmake) +endif() set(SERVER_FLATBUFFER_OUTPUT "${CMAKE_BINARY_DIR}/schema") diff --git a/mindspore/ccsrc/cxx_api/model/acl/model_converter.cc b/mindspore/ccsrc/cxx_api/model/acl/model_converter.cc index 4c821b4c601..886b5fcee05 100644 --- a/mindspore/ccsrc/cxx_api/model/acl/model_converter.cc +++ b/mindspore/ccsrc/cxx_api/model/acl/model_converter.cc @@ -19,6 +19,7 @@ #include "include/transform/graph_ir/utils.h" #include "cxx_api/model/model_converter_utils/multi_process.h" #include "graph/model.h" +#include "graph/utils/graph_utils_ex.h" #include "acl/acl_rt.h" #include "cxx_api/model/aoe/auto_tune_process.h" #include "plugin/device/ascend/optimizer/ge_optimization.h" @@ -194,7 +195,7 @@ Buffer ModelConverter::LoadMindIR(const FuncGraphPtr &func_graph) { } ge::Model model; ge::Buffer model_data; - model.SetGraph(*df_graph); + model.SetGraph(::ge::GraphUtilsEx::GetComputeGraph(*df_graph)); auto ge_ret = model.Save(model_data); if (ge_ret != ge::SUCCESS) { MS_LOG(ERROR) << "Save ge model to buffer failed."; @@ -263,7 +264,8 @@ Buffer ModelConverter::LoadAscendIRInner(const Buffer &model_data) { return Buffer(); } - transform::DfGraphPtr df_graph = std::make_shared(load_model.GetGraph()); + transform::DfGraphPtr df_graph = + std::make_shared(::ge::GraphUtilsEx::CreateGraphFromComputeGraph(load_model.GetGraph())); if (df_graph == nullptr) { MS_LOG(ERROR) << "Convert FuncGraph to AscendIR failed."; return Buffer(); diff --git a/mindspore/ccsrc/plugin/device/ascend/CMakeLists.txt b/mindspore/ccsrc/plugin/device/ascend/CMakeLists.txt index 759eea9902e..ddbccd88e26 100644 --- a/mindspore/ccsrc/plugin/device/ascend/CMakeLists.txt +++ b/mindspore/ccsrc/plugin/device/ascend/CMakeLists.txt @@ -6,11 +6,22 @@ include_directories(${CMAKE_SOURCE_DIR}/mindspore/ccsrc/minddata/dataset) set(ASCEND_RPATH ${ASCEND_RPATH}:/usr/local/Ascend/nnae/latest/lib64) set(ASCEND_RPATH ${ASCEND_RPATH}:/usr/local/Ascend/ascend-toolkit/latest/lib64) set(ASCEND_RPATH ${ASCEND_RPATH}:/usr/local/Ascend/latest/lib64) -set(ASCEND_RPATH ${ASCEND_RPATH}:/usr/local/Ascend/opp/built-in/op_impl/ai_core/tbe/op_tiling) -set(ASCEND_RPATH ${ASCEND_RPATH}:/usr/local/Ascend/nnae/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling) -set(ASCEND_RPATH - ${ASCEND_RPATH}:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling) -set(ASCEND_RPATH ${ASCEND_RPATH}:/usr/local/Ascend/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling) +if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64") + set(ASCEND_RPATH +${ASCEND_RPATH}:/usr/local/Ascend/nnae/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64) + set(ASCEND_RPATH +${ASCEND_RPATH}:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64) + set(ASCEND_RPATH +${ASCEND_RPATH}:/usr/local/Ascend/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64) +else() + set(ASCEND_RPATH +${ASCEND_RPATH}:/usr/local/Ascend/nnae/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64) + set(ASCEND_RPATH +${ASCEND_RPATH}:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64) + set(ASCEND_RPATH +${ASCEND_RPATH}:/usr/local/Ascend/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/aarch64) +endif() + ### cxx api need file ### if(ENABLE_ACL) @@ -87,8 +98,11 @@ if(MODE_ASCEND_ALL) find_library(PLATFORM platform ${ASCEND_CANN_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH}) find_library(OPT_FEATURE opt_feature ${ASCEND_CANN_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH}) find_library(adump_server libadump_server.a ${ASCEND_CANN_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH}) - find_library(OPTILING optiling ${ASCEND_CANN_OPP_PATH} ${ASCEND_TOOLKIT_OPP_PATH} - ${ASCEND_CANN_OPP_PATH_TEMP} ${ASCEND_TOOLKIT_OPP_PATH_TEMP}) + if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64") + find_library(OPTILING optiling ${ASCEND_CANN_OPP_X86_64_PATH} ${ASCEND_TOOLKIT_OPP_X86_64_PATH}) + else() + find_library(OPTILING optiling ${ASCEND_CANN_OPP_AARCH64_PATH} ${ASCEND_TOOLKIT_OPP_AARCH64_PATH}) + endif() find_library(ACL_OP_COMPILER acl_op_compiler ${ASCEND_CANN_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH}) target_link_libraries(mindspore_ascend PRIVATE ${RUNTIME_LIB} ${TSDCLIENT} ${DATATRANSFER} ${ERROR_MANAGER} diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_memory_adapter.cc b/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_memory_adapter.cc index e4d96aac194..91fecb5189c 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_memory_adapter.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_memory_adapter.cc @@ -254,7 +254,7 @@ size_t AscendMemAdapter::GetDeviceMemSizeFromContext() const { uint8_t *AscendMemAdapter::MallocFromRts(size_t size) const { uint8_t *ptr = nullptr; - auto ret = rtMalloc(reinterpret_cast(&ptr), size, RT_MEMORY_HBM); + auto ret = rtMalloc(reinterpret_cast(&ptr), size, RT_MEMORY_HBM, 0); if (ret != ACL_RT_SUCCESS) { if (ret == ACL_ERROR_RT_MEMORY_ALLOCATION) { auto context_ptr = MsContext::GetInstance(); diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/device/dump/data_dumper.cc b/mindspore/ccsrc/plugin/device/ascend/hal/device/dump/data_dumper.cc index de7fed1a146..686d56bea33 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/device/dump/data_dumper.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/device/dump/data_dumper.cc @@ -277,12 +277,12 @@ void DataDumper::OpDebugRegister() { MS_LOG(EXCEPTION) << "[DataDump] Call rt api rtGetRtCapability failed, ret = " << rt_ret; } auto memory_type = (value == static_cast(RT_CAPABILITY_SUPPORT)) ? RT_MEMORY_TS : RT_MEMORY_HBM; - rt_ret = rtMalloc(&op_debug_buffer_addr_, kOpDebugHostMemSize, memory_type); + rt_ret = rtMalloc(&op_debug_buffer_addr_, kOpDebugHostMemSize, memory_type, 0); if (rt_ret != RT_ERROR_NONE) { MS_LOG(EXCEPTION) << "[DataDump] Call rt api rtMalloc failed, ret = " << rt_ret; } - rt_ret = rtMalloc(&op_debug_dump_args_, kOpDebugDevMemSize, RT_MEMORY_HBM); + rt_ret = rtMalloc(&op_debug_dump_args_, kOpDebugDevMemSize, RT_MEMORY_HBM, 0); if (rt_ret != RT_ERROR_NONE) { MS_LOG(EXCEPTION) << "[DataDump] Call rtMalloc failed, ret = " << rt_ret; } diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/device/dump/dumper_base.cc b/mindspore/ccsrc/plugin/device/ascend/hal/device/dump/dumper_base.cc index f5f875af25a..57fb114c34b 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/device/dump/dumper_base.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/device/dump/dumper_base.cc @@ -101,7 +101,7 @@ void RtLoadDumpData(const aicpu::dump::OpMappingInfo &dump_info, void **ptr) { return; } - rtError_t rt_ret = rtMalloc(ptr, proto_size, RT_MEMORY_HBM); + rtError_t rt_ret = rtMalloc(ptr, proto_size, RT_MEMORY_HBM, 0); if (rt_ret != RT_ERROR_NONE) { MS_LOG(EXCEPTION) << "[DumperBase] Call rtMalloc failed"; } diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/device/dump/kernel_dumper.cc b/mindspore/ccsrc/plugin/device/ascend/hal/device/dump/kernel_dumper.cc index 32f89b73f45..251748be993 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/device/dump/kernel_dumper.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/device/dump/kernel_dumper.cc @@ -184,7 +184,7 @@ void KernelDumper::ExecutorDumpOp(const aicpu::dump::OpMappingInfo &op_mapping_i } std::string proto_json; (void)google::protobuf::util::MessageToJsonString(op_mapping_info, &proto_json); - rtError_t rt_ret = rtMalloc(&proto_dev_mem_, proto_size, RT_MEMORY_HBM); + rtError_t rt_ret = rtMalloc(&proto_dev_mem_, proto_size, RT_MEMORY_HBM, 0); if (rt_ret != RT_ERROR_NONE) { MS_LOG(ERROR) << "[KernelDumper] Call rt api rtMalloc failed, ret = " << rt_ret; return; @@ -196,7 +196,7 @@ void KernelDumper::ExecutorDumpOp(const aicpu::dump::OpMappingInfo &op_mapping_i return; } - rt_ret = rtMalloc(&proto_size_dev_mem_, sizeof(size_t), RT_MEMORY_HBM); + rt_ret = rtMalloc(&proto_size_dev_mem_, sizeof(size_t), RT_MEMORY_HBM, 0); if (rt_ret != RT_ERROR_NONE) { MS_LOG(ERROR) << "[KernelDumper] Call rt api rtMalloc failed, ret = " << rt_ret; return; @@ -372,7 +372,7 @@ void KernelDumper::MallocP2PDebugMem(const void *const op_debug_addr) { MS_LOG(EXCEPTION) << "[KernelDumper] Call rt api rtGetRtCapability failed, ret = " << rt_ret; } auto memory_type = (value == static_cast(RT_CAPABILITY_SUPPORT)) ? RT_MEMORY_TS : RT_MEMORY_HBM; - rtMalloc(&p2p_debug_addr_, kDebugP2pSize, memory_type); + rtMalloc(&p2p_debug_addr_, kDebugP2pSize, memory_type, 0); rtMemcpy(p2p_debug_addr_, sizeof(uint64_t), &debug_addrs_tmp, sizeof(uint64_t), RT_MEMCPY_HOST_TO_DEVICE); } @@ -416,7 +416,7 @@ void KernelDumper::OpDebugRegisterForStream(const CNodePtr &kernel) { MS_LOG(EXCEPTION) << "[KernelDumper] Call rt api rtGetRtCapability failed, ret = " << rt_ret; } auto memory_type = (value == static_cast(RT_CAPABILITY_SUPPORT)) ? RT_MEMORY_TS : RT_MEMORY_HBM; - rt_ret = rtMalloc(&op_debug_task->op_debug_addr, kOpDebugMemorySize, memory_type); + rt_ret = rtMalloc(&op_debug_task->op_debug_addr, kOpDebugMemorySize, memory_type, 0); if (rt_ret != RT_ERROR_NONE) { MS_LOG(EXCEPTION) << "[KernelDumper] Call rt api rtMalloc failed, ret = " << rt_ret; } diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/aicpu_task.cc b/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/aicpu_task.cc index 5b470b48be3..bcd17027e2b 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/aicpu_task.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/aicpu_task.cc @@ -91,7 +91,7 @@ void AicpuTask::Distribute() { sizeof(uint32_t); // Malloc device memory for args - rtError_t rt_ret = rtMalloc(&args_, args_size_, RT_MEMORY_HBM); + rtError_t rt_ret = rtMalloc(&args_, args_size_, RT_MEMORY_HBM, 0); if (rt_ret != RT_ERROR_NONE) { MS_LOG(EXCEPTION) << "Call rt api rtMalloc failed, ret: " << rt_ret; } @@ -165,7 +165,7 @@ void AicpuTask::SetAicpuParamHead(uint32_t args_size, uint32_t io_addrs_num) { } } // alloc extinfo address - rtError_t flag = rtMalloc(&ext_info_addr_, ext_info_handler->GetExtInfoLen(), RT_MEMORY_HBM); + rtError_t flag = rtMalloc(&ext_info_addr_, ext_info_handler->GetExtInfoLen(), RT_MEMORY_HBM, 0); if (flag != RT_ERROR_NONE) { MS_LOG(EXCEPTION) << "Call rt api rtMalloc failed, ret: " << flag; } diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/label_goto_task.cc b/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/label_goto_task.cc index 732c7e7aab9..6ba8b30f4f5 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/label_goto_task.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/label_goto_task.cc @@ -61,7 +61,7 @@ void LabelGotoTask::Distribute() { MS_EXCEPTION_IF_NULL(label_info_); if (index_value_ == nullptr) { - rtError_t rt_ret = rtMalloc(&index_value_, sizeof(uint64_t), RT_MEMORY_HBM); + rtError_t rt_ret = rtMalloc(&index_value_, sizeof(uint64_t), RT_MEMORY_HBM, 0); if (rt_ret != RT_ERROR_NONE) { MS_LOG(EXCEPTION) << "Call rt api rtMalloc failed, ret: " << rt_ret; } diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/label_manager.cc b/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/label_manager.cc index 9e34ea60a4a..3614a81606c 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/label_manager.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/label_manager.cc @@ -106,7 +106,7 @@ std::shared_ptr LabelManager::GetLabelInfo(rtModel_t model, const st return nullptr; } - rt_ret = rtMalloc(&label_info, label_info_size, (value == RT_CAPABILITY_SUPPORT) ? RT_MEMORY_TS : RT_MEMORY_HBM); + rt_ret = rtMalloc(&label_info, label_info_size, (value == RT_CAPABILITY_SUPPORT) ? RT_MEMORY_TS : RT_MEMORY_HBM, 0); if (rt_ret != RT_ERROR_NONE) { MS_LOG(ERROR) << "Call rt api rtMalloc failed, ret: " << rt_ret; return nullptr; diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/tbe_task.cc b/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/tbe_task.cc index 54ab97f834d..0d1e1c946cb 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/tbe_task.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/device/ge_runtime/task/tbe_task.cc @@ -78,7 +78,7 @@ void TbeTask::Distribute() { task_info_->workspace_addrs().cend()); args_size_ = static_cast(tensor_device_addrs.size() * sizeof(void *)); - rt_ret = rtMalloc(&args_, args_size_, RT_MEMORY_HBM); + rt_ret = rtMalloc(&args_, args_size_, RT_MEMORY_HBM, 0); if (rt_ret != RT_ERROR_NONE) { MS_LOG(EXCEPTION) << "Call rt api rtMalloc failed, ret: " << rt_ret << " mem size " << args_size_; } diff --git a/mindspore/ccsrc/plugin/device/ascend/hal/hardware/ascend_deprecated_interface.cc b/mindspore/ccsrc/plugin/device/ascend/hal/hardware/ascend_deprecated_interface.cc index 51bf918700b..15268d80256 100644 --- a/mindspore/ccsrc/plugin/device/ascend/hal/hardware/ascend_deprecated_interface.cc +++ b/mindspore/ccsrc/plugin/device/ascend/hal/hardware/ascend_deprecated_interface.cc @@ -31,6 +31,7 @@ #include "plugin/device/ascend/hal/profiler/parallel_strategy_profiling.h" #include "plugin/device/ascend/optimizer/enhancer/add_placeholder_for_dynamic_rnn.h" #include "cxx_api/graph/acl/acl_env_guard.h" +#include "graph/utils/graph_utils_ex.h" using mindspore::abstract::AbstractScalar; using mindspore::abstract::AbstractTensor; @@ -166,7 +167,7 @@ void AscendDeprecatedInterface::ExportDFGraph(const std::string &file_name, cons } // get model stream ::ge::Model model("", ""); - model.SetGraph(*ge_graph); + model.SetGraph(::ge::GraphUtilsEx::GetComputeGraph(*ge_graph)); ::ge::Buffer model_data; auto ge_ret = model.Save(model_data); if (ge_ret != ::ge::SUCCESS) { diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_kernel_load.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_kernel_load.cc index 0eddc91b75e..669808881bf 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_kernel_load.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_kernel_load.cc @@ -224,14 +224,14 @@ bool AicpuOpKernelLoad::CacheBinaryFileToDevice(const uintptr_t &resource_id, st void *d_aicpu_data = nullptr; void *d_so_name = nullptr; - status = rtMalloc(&d_aicpu_data, aicpu_data_length, RT_MEMORY_HBM); + status = rtMalloc(&d_aicpu_data, aicpu_data_length, RT_MEMORY_HBM, 0); if (status != RT_ERROR_NONE) { MS_LOG(ERROR) << "Call rtMalloc failed, size:" << aicpu_data_length << ", ret = 0x" << status; return false; } allocated_mem->emplace_back(d_aicpu_data); - status = rtMalloc(&d_so_name, so_name.size(), RT_MEMORY_HBM); + status = rtMalloc(&d_so_name, so_name.size(), RT_MEMORY_HBM, 0); if (status != RT_ERROR_NONE) { MS_LOG(ERROR) << "Call rtMalloc failed, size:" << so_name.size() << ", ret = 0x" << status; return false; @@ -267,7 +267,7 @@ bool AicpuOpKernelLoad::CacheBinaryFileToDevice(const uintptr_t &resource_id, st void *args = nullptr; uint32_t args_size = sizeof(CustAicpuSoBuf) * v_cust_so.size(); - status = rtMalloc(&args, args_size, RT_MEMORY_HBM); + status = rtMalloc(&args, args_size, RT_MEMORY_HBM, 0); if (status != RT_ERROR_NONE) { MS_LOG(ERROR) << "Call rtMalloc failed, size:" << args_size << ", ret = 0x" << status; return false; diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_convert_utils.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_convert_utils.cc index b303452eff9..1b555ea44a7 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_convert_utils.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_convert_utils.cc @@ -78,7 +78,7 @@ const std::map type_id_str_maps = { const std::unordered_map type_nbyte_maps = { {"float16", sizeof(float) / 2}, {"float32", sizeof(float)}, {"float64", sizeof(float) * 2}, {"int8", sizeof(int) / 4}, - {"uint1", sizeof(int) / 4}, {"int16", sizeof(int) / 2}, + {"int1", sizeof(int) / 8}, {"int16", sizeof(int) / 2}, {"int32", sizeof(int)}, {"int64", sizeof(int) * 2}, {"uint8", sizeof(int) / 4}, {"uint16", sizeof(int) / 2}, {"uint32", sizeof(int)}, {"uint64", sizeof(int) * 2}, diff --git a/mindspore/ccsrc/utils/python_fallback_running.cc b/mindspore/ccsrc/utils/python_fallback_running.cc index f1967f7ed98..0b133652375 100644 --- a/mindspore/ccsrc/utils/python_fallback_running.cc +++ b/mindspore/ccsrc/utils/python_fallback_running.cc @@ -14,6 +14,9 @@ * limitations under the License. */ #include "include/common/utils/python_fallback_running.h" +#include "ops/tuple_get_item.h" +#include "ops/primitive_c.h" +#include "mindapi/src/helper.h" namespace mindspore { ScopedFallbackRunning::ScopedFallbackRunning() { on_ = true; } diff --git a/mindspore/lite/tools/converter/adapter/acl/CMakeLists.txt b/mindspore/lite/tools/converter/adapter/acl/CMakeLists.txt index 2aa5e01f1b9..2767dd3a7a1 100644 --- a/mindspore/lite/tools/converter/adapter/acl/CMakeLists.txt +++ b/mindspore/lite/tools/converter/adapter/acl/CMakeLists.txt @@ -2,6 +2,7 @@ include_directories(${TOP_DIR}/graphengine/metadef/inc/external) include_directories(${TOP_DIR}/graphengine/inc) include_directories(${TOP_DIR}/graphengine/inc/external) include_directories(${TOP_DIR}/graphengine/ge) +include_directories(${TOP_DIR}/graphengine/base) include_directories(${TOP_DIR}/graphengine/metadef/inc) include_directories(${TOP_DIR}/graphengine/inc/framework) include_directories(${TOP_DIR}/graphengine/third_party/fwkacllib/inc) diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py index 130af2a6553..59e43462748 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py @@ -16,7 +16,7 @@ from __future__ import absolute_import from te import tik -from topi.cce import util +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py index 093bb050b22..7ad57f4b592 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py @@ -18,8 +18,8 @@ from __future__ import absolute_import import te from te import tvm -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType batch_norm_op_info = TBERegOp("BatchNormFoldD") \ diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py index dd85f282b04..b13839f4b78 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py @@ -17,8 +17,8 @@ import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType SHAPE_SIZE_LIMIT = 2147483648 diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py index 303037050ff..83ae20a23f0 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py @@ -17,8 +17,8 @@ import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType SHAPE_SIZE_LIMIT = 2147483648 diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py index 7de09388315..98df652b93b 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py @@ -17,9 +17,9 @@ import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from te.platform.cce_build import build_config -from topi import generic -from topi.cce import util +from tbe.common.buildcfg import build_config +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType SHAPE_SIZE_LIMIT = 2147483648 @@ -101,5 +101,5 @@ def batchnorm_fold2_grad_reduce(dout, x, dout_reduce, dout_x_reduce, kernel_name return from impl.bn_training_reduce import bn_training_reduce_schedule_nd sch, tensor_list = bn_training_reduce_schedule_nd(res_list) - with build_config: + with build_config(): tvm.build(sch, tensor_list, "cce", name=kernel_name) diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py index 09e36fd03b0..21a8a8d166e 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py @@ -18,8 +18,8 @@ from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType import te.lang.cce from te import tvm -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util batch_norm_op_info = TBERegOp("BatchNormFoldGradD") \ .fusion_type("OPAQUE") \ diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py index a18e378b2a8..40922cb07b2 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py @@ -17,7 +17,7 @@ from __future__ import absolute_import import logging from te import tik -from topi.cce import util +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)s %(message)s") diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/correction_mul.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/correction_mul.py index 9fb8cfb7c97..86e1b7a449d 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/correction_mul.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/correction_mul.py @@ -17,8 +17,8 @@ import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType SHAPE_SIZE_LIMIT = 2147483648 diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py index 6c11ce68554..21ce63e42c8 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py @@ -17,8 +17,8 @@ import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType SHAPE_SIZE_LIMIT = 2147483648 diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py index 9e93806224a..9310502f5a7 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py @@ -15,7 +15,7 @@ """dsd back impl""" from __future__ import absolute_import from te import tik -from topi.cce import util +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import DataType, TBERegOp, op_info_register dsd_grad_info = TBERegOp('DSDGrad') \ diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/dsd_impl.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/dsd_impl.py index 2c406f13b96..a34d7e18820 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/dsd_impl.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/dsd_impl.py @@ -15,7 +15,7 @@ """ dense sparse to densne matmul""" from __future__ import absolute_import from te import tik -from topi.cce import util +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import DataType, TBERegOp, op_info_register dsd_matmul_info = TBERegOp('DSDMatmul') \ diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py index b95c33547ec..0f367d30249 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py @@ -17,8 +17,8 @@ import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType fake_learned_scale_quant_perchannel_op_info = TBERegOp("FakeLearnedScaleQuantPerChannel") \ diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py index a3d9b4325ab..2537e9faa27 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py @@ -17,8 +17,8 @@ import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType NEG_SCALAR_MIN_FP16 = -(2 ** (-24)) diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py index 90a40457b19..1da1dc45189 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py @@ -17,8 +17,8 @@ import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py index a53a8212431..44ba2656269 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py @@ -18,8 +18,8 @@ from functools import reduce as functools_reduce import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType fake_learned_scale_quant_perlayer_op_info = TBERegOp("FakeLearnedScaleQuantPerLayer") \ diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py index fcc134d35e5..b01adf4b0af 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py @@ -19,8 +19,8 @@ from functools import reduce as functools_reduce import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType NEG_SCALAR_MIN_FP16 = -(2 ** (-24)) diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py index d4af1f6381f..3596165afab 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py @@ -19,8 +19,8 @@ from functools import reduce as functools_reduce import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py index 52d89ec0d2f..aaf6c30aeb3 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py @@ -19,8 +19,8 @@ from __future__ import absolute_import import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType fake_quant_perchannel_op_info = TBERegOp("FakeQuantPerChannel") \ diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py index 47df576857c..e971f71973c 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py @@ -19,8 +19,8 @@ from __future__ import absolute_import import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType SHAPE_SIZE_LIMIT = 2147483648 diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py index ee5177ed84f..c933349b2d5 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py @@ -20,8 +20,8 @@ from functools import reduce as functools_reduce import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType fake_quant_per_layer_op_info = TBERegOp("FakeQuantPerLayer") \ diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py index 341369d8bcb..c2fb35925a1 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py @@ -20,8 +20,8 @@ from functools import reduce as functools_reduce import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType SHAPE_SIZE_LIMIT = 2147483648 diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py index 01a36b23a43..af177799b02 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py @@ -16,7 +16,7 @@ from __future__ import absolute_import from te import tik -from topi.cce import util +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/img2col_impl.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/img2col_impl.py index 44152283d72..ef1220bb844 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/img2col_impl.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/img2col_impl.py @@ -16,7 +16,7 @@ from __future__ import absolute_import from te import tik -from topi.cce import util +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType cus_img2col_info = TBERegOp("CusImg2Col") \ diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py index 402a9592238..f488a5303f3 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py @@ -21,8 +21,8 @@ import te.lang.cce import te.platform.cce_params as cce from te import tik from te import tvm -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType from mindspore.ops._op_impl._custom_op._basic import _shape_check, _get_bias, _get_input_shape diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py index d4984ffd663..0cbbbe7d0cc 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py @@ -20,7 +20,7 @@ matmul from __future__ import absolute_import from te import tik -from topi.cce import util +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType matmul_cube_dense_right_op_info = TBERegOp("CusMatMulCubeDenseRight") \ diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py index 383cb3ee7e1..7fa59553112 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py @@ -22,7 +22,7 @@ import collections import te.platform.cce_params as cce from te import tik -from topi.cce import util +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType from mindspore.ops._op_impl._custom_op._basic import _shape_check, _get_bias, _get_input_shape diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py index e8637418d42..1e2f6cd2ce6 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py @@ -23,7 +23,7 @@ from collections import namedtuple import logging from te import tik -from topi.cce import util +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType # General limitation of the size for input shape: 2**31 diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py index 5a69e8dc5c9..e4bdbc6f4a0 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py @@ -22,8 +22,8 @@ from impl.matmul_vector import matmul_vector_cce import te.platform.cce_params as cce import te.lang.cce from te import tvm -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType from mindspore.ops._op_impl._custom_op._basic import _shape_check, _get_bias, _get_input_shape diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py index bfc79b91a09..6c750972618 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py @@ -16,7 +16,7 @@ from __future__ import absolute_import from te import tik -from topi.cce import util +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType cus_matrix_combine_op_info = TBERegOp("CusMatrixCombine") \ diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py index c414d81a940..02377fb8dd9 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py @@ -17,8 +17,8 @@ import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType minmax_update_perchannel_op_info = TBERegOp("MinMaxUpdatePerChannel") \ diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py index 4d2096d55ba..e5b387a9527 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py @@ -18,8 +18,8 @@ from functools import reduce as functools_reduce import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType minmax_update_perlayer_op_info = TBERegOp("MinMaxUpdatePerLayer") \ diff --git a/mindspore/python/mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py b/mindspore/python/mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py index 3d914f38892..7e7c6716b75 100644 --- a/mindspore/python/mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +++ b/mindspore/python/mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py @@ -16,7 +16,7 @@ from __future__ import absolute_import from te import tik -from topi.cce import util +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType cus_transpose02314_op_info = TBERegOp("CusTranspose02314") \ diff --git a/mindspore/python/mindspore/run_check/_check_version.py b/mindspore/python/mindspore/run_check/_check_version.py index 1785fe476fb..7eeffb48646 100644 --- a/mindspore/python/mindspore/run_check/_check_version.py +++ b/mindspore/python/mindspore/run_check/_check_version.py @@ -255,7 +255,7 @@ class AscendEnvChecker(EnvChecker): def __init__(self, library_path): self.library_path = library_path - self.version = ["1.84"] + self.version = ["6.3"] atlas_nnae_version = "/usr/local/Ascend/nnae/latest/compiler/version.info" atlas_toolkit_version = "/usr/local/Ascend/ascend-toolkit/latest/compiler/version.info" hisi_fwk_version = "/usr/local/Ascend/latest/compiler/version.info" @@ -331,7 +331,7 @@ class AscendEnvChecker(EnvChecker): def check_deps_version(self): """ - te, topi, hccl wheel package version check + te and hccl wheel package version check in order to update the change of 'LD_LIBRARY_PATH' env, run a sub process """ @@ -345,12 +345,6 @@ class AscendEnvChecker(EnvChecker): attention_warning = True logger.warning(f"MindSpore version {mindspore_version} and \"te\" wheel package version {v} does not " "match, reference to the match info on: https://www.mindspore.cn/install") - from topi import version as topiver - v = '.'.join(topiver.version.split('.')[0:2]) - if v not in supported_version: - attention_warning = True - logger.warning(f"MindSpore version {mindspore_version} and \"topi\" wheel package version {v} does not " - "match, reference to the match info on: https://www.mindspore.cn/install") from hccl import sys_version as hccl_version v = '.'.join(hccl_version.__sys_version__.split('.')[0:2]) if v not in supported_version: @@ -361,7 +355,7 @@ class AscendEnvChecker(EnvChecker): # pylint: disable=broad-except except Exception as e: logger.error("CheckFailed:", e.args) - logger.error("MindSpore relies on the 3 whl packages of \"te\", \"topi\" and \"hccl\" in the \"latest\" " + logger.error("MindSpore relies on whl packages of \"te\" and \"hccl\" in the \"latest\" " "folder of the Ascend AI software package (Ascend Data Center Solution), please check whether" " they are installed correctly or not, reference to the match info on: " "https://www.mindspore.cn/install") diff --git a/tests/st/cpp/CMakeLists.txt b/tests/st/cpp/CMakeLists.txt index 019cdde3d17..d82506115ae 100644 --- a/tests/st/cpp/CMakeLists.txt +++ b/tests/st/cpp/CMakeLists.txt @@ -8,6 +8,11 @@ include_directories(${CMAKE_SOURCE_DIR}/mindspore/core) include_directories(${CMAKE_BINARY_DIR}) include_directories(${CUDA_INCLUDE_DIRS}) +# graphengine include directories +if(ENABLE_D OR ENABLE_ACL OR ENABLE_TESTCASES) + include(${CMAKE_SOURCE_DIR}/cmake/graphengine_variables.cmake) +endif() + if(ENABLE_ACL) add_definitions(-D ENABLE_ACL) endif() diff --git a/tests/st/networks/models/bert/bert_performance/test_bert_tdt_lossscale.py b/tests/st/networks/models/bert/bert_performance/test_bert_tdt_lossscale.py index 2efee342174..b5d058fa979 100644 --- a/tests/st/networks/models/bert/bert_performance/test_bert_tdt_lossscale.py +++ b/tests/st/networks/models/bert/bert_performance/test_bert_tdt_lossscale.py @@ -174,7 +174,7 @@ class TimeMonitor(Callback): self.per_step_mseconds_list.append(epoch_mseconds / self.data_size) -@pytest.mark.level0 +@pytest.mark.level1 @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training @pytest.mark.env_onecard diff --git a/tests/st/ops/ascend/test_deformable_conv.py b/tests/st/ops/ascend/test_deformable_conv.py index ca9686a59e8..d5ce24a076c 100644 --- a/tests/st/ops/ascend/test_deformable_conv.py +++ b/tests/st/ops/ascend/test_deformable_conv.py @@ -23,7 +23,7 @@ from mindspore import Tensor context.set_context(device_target="Ascend") -@pytest.mark.level0 +@pytest.mark.level1 @pytest.mark.platform_x86_ascend_training @pytest.mark.platform_arm_ascend_training @pytest.mark.env_onecard diff --git a/tests/st/ops/ascend/test_tbe_ops/test_deformable_offsets_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_deformable_offsets_grad.py index 24affc52b41..b1a92f7496f 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_deformable_offsets_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_deformable_offsets_grad.py @@ -59,7 +59,7 @@ class Grad(nn.Cell): return self.grad(self.network)(x, w, offset, output_grad) -@pytest.mark.level0 +@pytest.mark.level1 @pytest.mark.platform_x86_ascend_training @pytest.mark.platform_arm_ascend_training @pytest.mark.env_onecard diff --git a/tests/st/ops/ascend/test_unsorted_segment_op.py b/tests/st/ops/ascend/test_unsorted_segment_op.py index 316cbc0c27e..63bf365f8f0 100644 --- a/tests/st/ops/ascend/test_unsorted_segment_op.py +++ b/tests/st/ops/ascend/test_unsorted_segment_op.py @@ -126,7 +126,7 @@ class TestUnsortedSegmentArithmeticNet(nn.Cell): return self.func(x, segment_ids, self.num_segments) -@pytest.mark.level0 +@pytest.mark.level1 @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training @pytest.mark.env_onecard diff --git a/tests/st/ops/custom_ops_tbe/add3_impl.py b/tests/st/ops/custom_ops_tbe/add3_impl.py index 36f296d4c1e..33ddb7c17b4 100644 --- a/tests/st/ops/custom_ops_tbe/add3_impl.py +++ b/tests/st/ops/custom_ops_tbe/add3_impl.py @@ -16,8 +16,8 @@ from __future__ import absolute_import import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType @fusion_manager.register("add3") diff --git a/tests/st/ops/custom_ops_tbe/conv_layer.py b/tests/st/ops/custom_ops_tbe/conv_layer.py index dad5a8c8697..67da3b6ffca 100755 --- a/tests/st/ops/custom_ops_tbe/conv_layer.py +++ b/tests/st/ops/custom_ops_tbe/conv_layer.py @@ -15,9 +15,9 @@ import te.lang.cce from te import tvm from te.platform import CUBE_MKN -from topi import generic -from topi.cce import util -from topi.cce.util import is_v200_version +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util +from tbe.tvm.topi.cce.util import is_v200_version # pylint: disable=R0912,R0913,R0914,R0915,E1101 # the dim of shape in conv must be 4 diff --git a/tests/st/ops/custom_ops_tbe/square_impl.py b/tests/st/ops/custom_ops_tbe/square_impl.py index 4a87e9105b3..5d0260f94ed 100644 --- a/tests/st/ops/custom_ops_tbe/square_impl.py +++ b/tests/st/ops/custom_ops_tbe/square_impl.py @@ -17,8 +17,8 @@ from __future__ import absolute_import import te.lang.cce from te import tvm from te.platform.fusion_manager import fusion_manager -from topi import generic -from topi.cce import util +from tbe.tvm.topi import generic +from tbe.tvm.topi.cce import util from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType diff --git a/tests/st/ops/dynamic_shape/test_dynamic_setitem.py b/tests/st/ops/dynamic_shape/test_dynamic_setitem.py index 4d56540033f..93577272a8b 100644 --- a/tests/st/ops/dynamic_shape/test_dynamic_setitem.py +++ b/tests/st/ops/dynamic_shape/test_dynamic_setitem.py @@ -114,7 +114,7 @@ class TensorSetItem(nn.Cell): return tensor1, tensor2 -@pytest.mark.level0 +@pytest.mark.level1 @pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_gpu_training @pytest.mark.platform_arm_ascend_training diff --git a/tests/st/ops/graph_kernel/custom/test_custom_tbe.py b/tests/st/ops/graph_kernel/custom/test_custom_tbe.py index 9c834c14813..9d51f85062c 100644 --- a/tests/st/ops/graph_kernel/custom/test_custom_tbe.py +++ b/tests/st/ops/graph_kernel/custom/test_custom_tbe.py @@ -33,7 +33,7 @@ from mindspore.ops.composite.multitype_ops.zeros_like_impl import zeros_like def square_with_bias(input_x, output_y, bias=0.0, kernel_name="square_with_bias"): import te.lang.cce from te import tvm - from topi.cce import util + from tbe.tvm.topi.cce import util shape = input_x.get("shape") dtype = input_x.get("dtype").lower() diff --git a/tests/ut/cpp/CMakeLists.txt b/tests/ut/cpp/CMakeLists.txt index 5d09d62accb..a4cbebb40db 100644 --- a/tests/ut/cpp/CMakeLists.txt +++ b/tests/ut/cpp/CMakeLists.txt @@ -34,6 +34,10 @@ include_directories(${CMAKE_BINARY_DIR}/proto/metadef_protos) include_directories(${CMAKE_BINARY_DIR}/proto/ge) include_directories(${CUDA_INCLUDE_DIRS}) include_directories(${CMAKE_SOURCE_DIR}/mindspore/ccsrc/plugin/device/cpu/kernel) +# graphengine include directories +if(ENABLE_D OR ENABLE_ACL OR ENABLE_TESTCASES) + include(${CMAKE_SOURCE_DIR}/cmake/graphengine_variables.cmake) +endif() MESSAGE("check ut_test ${CMAKE_BINARY_DIR}") link_directories(${MS_CCSRC_BUILD_PATH}) diff --git a/tests/ut/cpp/stub/runtime/runtime_stub.cc b/tests/ut/cpp/stub/runtime/runtime_stub.cc index 4f8f7a86257..787e6e2b9ee 100644 --- a/tests/ut/cpp/stub/runtime/runtime_stub.cc +++ b/tests/ut/cpp/stub/runtime/runtime_stub.cc @@ -30,7 +30,7 @@ rtError_t rtEventCreateWithFlag(rtEvent_t *event, uint32_t flag) { return RT_ERR rtError_t rtEventElapsedTime(float *time, rtEvent_t start, rtEvent_t end) { return RT_ERROR_NONE; } -rtError_t rtMalloc(void **devPtr, uint64_t size, rtMemType_t type) { return RT_ERROR_NONE; } +rtError_t rtMalloc(void **devPtr, uint64_t size, rtMemType_t type, const uint16_t moduleId) { return RT_ERROR_NONE; } rtError_t rtMemcpy(void *dst, uint64_t destMax, const void *src, uint64_t count, rtMemcpyKind_t kind) { return RT_ERROR_NONE;