unify ge and vm backend

This commit is contained in:
xiao_yao1994 2022-01-06 15:29:15 +08:00
parent d094e689f8
commit 0843910260
35 changed files with 221 additions and 206 deletions

View File

@ -53,20 +53,7 @@ if(ENABLE_GPU)
endif()
endif()
if(ENABLE_GE)
include_directories(${CMAKE_SOURCE_DIR}/third_party/ge/include)
include_directories(${CMAKE_SOURCE_DIR}/third_party/ge/include/external)
include_directories(${CMAKE_SOURCE_DIR}/third_party/ge/include/external/graph)
link_directories(${CMAKE_SOURCE_DIR}/third_party/ge/lib)
elseif(ENABLE_D OR ENABLE_ACL OR ENABLE_TESTCASES)
include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc)
include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc/external)
include_directories(${CMAKE_SOURCE_DIR}/graphengine/metadef/inc)
include_directories(${CMAKE_SOURCE_DIR}/graphengine/metadef/inc/external)
include_directories(${CMAKE_SOURCE_DIR}/graphengine/metadef/inc/external/graph)
endif()
if(ENABLE_GE OR ENABLE_D OR ENABLE_ACL OR ENABLE_TESTCASES)
if(ENABLE_D OR ENABLE_ACL OR ENABLE_TESTCASES)
include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc)
include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc/external)
include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc/framework)

View File

@ -1,7 +1,6 @@
option(ENABLE_D "Enable d" OFF)
option(ENABLE_GPU "Enable gpu" OFF)
option(ENABLE_CPU "Enable cpu" OFF)
option(ENABLE_GE "Enable graph engine as backend to execute" OFF)
option(ENABLE_MINDDATA "Enable minddata compile" OFF)
option(ENABLE_TRAIN "Enable ge train, default off(only infer)" OFF)
option(ENABLE_TESTCASES "Run testcases switch, default off" OFF)
@ -30,7 +29,7 @@ option(BUILD_DEV_MODE "MindSpore build nightly dev mode" OFF)
option(ENABLE_FAST_HASH_TABLE "Enable use fast hash table instead of std ones" ON)
option(USE_LLVM "use llvm" OFF)
if(NOT ENABLE_D AND NOT ENABLE_TESTCASES AND NOT ENABLE_ACL AND NOT ENABLE_GE)
if(NOT ENABLE_D AND NOT ENABLE_TESTCASES AND NOT ENABLE_ACL)
set(ENABLE_GLIBCXX ON)
endif()
@ -73,7 +72,7 @@ if(NOT BUILD_PATH)
set(BUILD_PATH "${CMAKE_SOURCE_DIR}/build")
endif()
if(ENABLE_GE OR ENABLE_D)
if(ENABLE_D)
set(ENABLE_TDTQUE ON)
endif()
@ -86,8 +85,8 @@ if(ENABLE_CPU)
add_compile_definitions(ENABLE_CPU)
endif()
if(ENABLE_GE)
add_compile_definitions(ENABLE_GE)
if(ENABLE_D)
add_compile_definitions(ENABLE_D)
add_compile_definitions(CUSTOM_OP)
endif()
@ -117,7 +116,7 @@ if(ENABLE_LOAD_ANF_IR)
add_compile_definitions(ENABLE_LOAD_ANF_IR)
endif()
if(ENABLE_TESTCASES OR (NOT ENABLE_D AND NOT ENABLE_GE))
if(ENABLE_TESTCASES OR (NOT ENABLE_D))
add_compile_definitions(NO_DLIB=1)
endif()

View File

@ -17,11 +17,7 @@ set(CPACK_CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE})
set(CPACK_PYTHON_EXE ${Python3_EXECUTABLE})
set(CPACK_PYTHON_VERSION ${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR})
if(ENABLE_GE)
set(CPACK_MS_BACKEND "ge")
set(CPACK_MS_TARGET "ascend or cpu")
set(CPACK_MS_PACKAGE_NAME "mindspore")
elseif(ENABLE_GPU)
if(ENABLE_GPU)
set(CPACK_MS_BACKEND "ms")
set(CPACK_MS_TARGET "gpu or cpu")
if(BUILD_DEV_MODE)
@ -232,8 +228,7 @@ if(ENABLE_CPU AND NOT WIN32)
)
endif()
if(NOT ENABLE_GE)
if(ENABLE_D OR ENABLE_ACL)
if(ENABLE_D OR ENABLE_ACL)
if(DEFINED ENV{ASCEND_CUSTOM_PATH})
set(ASCEND_PATH $ENV{ASCEND_CUSTOM_PATH})
else()
@ -248,7 +243,7 @@ if(NOT ENABLE_GE)
COMPONENT mindspore
)
endif()
elseif(ENABLE_TESTCASES)
elseif(ENABLE_TESTCASES)
install(
FILES
${CMAKE_BINARY_DIR}/graphengine/metadef/graph/libgraph.so
@ -256,7 +251,6 @@ if(NOT ENABLE_GE)
DESTINATION ${INSTALL_LIB_DIR}
COMPONENT mindspore
)
endif()
endif()
if(MS_BUILD_GRPC)

View File

@ -297,7 +297,7 @@ if(MODE_ASCEND_ALL OR MODE_ASCEND_ACL)
endif()
endif()
if(ENABLE_GE)
if(ENABLE_D)
find_library(GE_RUNNER ge_runner ${ASCEND_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH})
find_library(GRAPH graph ${ASCEND_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH})
find_library(HCCL hccl ${ASCEND_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH})

View File

@ -50,8 +50,7 @@ bool CreateSessionAndGraphRunner() {
options["ge.enablePrintOpPass"] = "0";
sess = transform::GraphRunner::NewSession(options);
if (sess == nullptr) {
MS_LOG(ERROR) << "Init data graph failed, because of create Ge session failed";
return false;
MS_LOG(WARNING) << "Init data graph failed, because of create Ge session failed";
} else {
transform::DfGraphManager::GetInstance().SetGeSession(sess);
}

View File

@ -20,7 +20,7 @@ set_property(SOURCE ${PARSER_SRC_FILES} PROPERTY COMPILE_DEFINITIONS SUBMODULE_I
file(GLOB_RECURSE ANALYZER_SRC_FILES "static_analysis/*.cc")
set_property(SOURCE ${ANALYZER_SRC_FILES} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_ANALYZER)
if(ENABLE_GE OR ENABLE_D)
if(ENABLE_D)
file(GLOB_RECURSE _PIPELINE_GE_SRC_FILES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "pipeline_ge.cc")
list(APPEND _PIPELINE_SRC_FILES ${_PIPELINE_GE_SRC_FILES})
endif()

View File

@ -77,7 +77,7 @@
#include "distributed/cluster/cluster_context.h"
#endif
#if ((defined ENABLE_GE) || (defined ENABLE_D))
#ifdef ENABLE_D
#include "pipeline/jit/pipeline_ge.h"
#include "transform/graph_ir/convert.h"
#include "transform/graph_ir/df_graph_manager.h"
@ -85,6 +85,7 @@
#include "runtime/device/ascend/profiling/profiling_manager.h"
#include "runtime/device/ascend/distribute/ascend_collective.h"
#endif
#ifdef ENABLE_DUMP_IR
#include "debug/rdr/running_data_recorder.h"
#include "debug/rdr/recorder_manager.h"
@ -714,8 +715,15 @@ py::dict GraphExecutorPy::GetAllreduceFusion(const std::string &phase) {
// Not support multi thread, not support nested call too.
// Here using nested_called flg to avoid nested call.
void GraphExecutorPy::DelNetRes(const py::set &id) {
#ifdef ENABLE_GE
#ifdef ENABLE_D
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
std::string backend = ms_context->backend_policy();
if (backend == "ge") {
FinalizeBackend();
} else {
ConfigManager::GetInstance().ResetIterNum();
}
#else
ConfigManager::GetInstance().ResetIterNum();
#endif
@ -728,9 +736,8 @@ void GraphExecutorPy::DelNetRes(const py::set &id) {
MS_LOG(ERROR) << "Expect string phase, but got " << py::str(item);
}
}
#ifdef ENABLE_GE
if (!id.empty() && info_.size() == 0) {
#ifdef ENABLE_D
if (backend == "ge" && !id.empty() && info_.size() == 0) {
// because Ge only support one Session exist at the same time ,so we delete the old one
transform::DfGraphManager::GetInstance().DeleteGraphRunner();
transform::DfGraphManager::GetInstance().EraseAnfGraph();
@ -986,9 +993,6 @@ bool GraphExecutorPy::CompileInner(const py::object &source_obj, const py::tuple
MS_LOG(INFO) << "Start compiling, phase: " << phase;
MS_LOG(DEBUG) << "source: {" << py::str(source_obj) << "}\nargs: " << py::str(const_cast<py::tuple &>(args));
#ifdef ENABLE_GE
GetGeBackendPolicy();
#endif
ExecutorInfoPtr executor_info = std::make_shared<ExecutorInfo>();
ResourcePtr resource = std::make_shared<Resource>(source_obj);
@ -1372,26 +1376,25 @@ py::object GraphExecutorPy::Run(const py::tuple &args, const py::object &phase_o
MS_LOG(EXCEPTION) << "Run failed, phase input is not a str";
}
auto phase = py::cast<std::string>(phase_obj);
std::string backend = MsContext::GetInstance()->backend_policy();
#ifdef ENABLE_GE
if (backend == "ge") {
auto ms_context = MsContext::GetInstance();
#ifdef ENABLE_D
if (ms_context->backend_policy() == "ge") {
return ExecDFGraph(info_, args, phase);
}
#else
#endif
auto ret_val = std::make_shared<py::object>();
if (info_.count(phase) != 0 && info_[phase]->func_graph != nullptr) {
if (IsGraphOutputValueNodeOrParameter(info_[phase]->func_graph->output(), args, ret_val)) {
return *ret_val;
}
}
if (backend == "ge") {
if (ms_context->backend_policy() == "ge") {
// Virtual output constructed for test cases.
if (!args.empty()) {
return args[0];
}
return args;
}
#endif
auto iter = info_.find(phase);
if (iter == info_.end()) {
MS_LOG(EXCEPTION) << "No executor info. found for phase: " << phase;
@ -1420,7 +1423,7 @@ py::object GraphExecutorPy::Run(const py::tuple &args, const py::object &phase_o
}
MS_LOG(INFO) << "VM loop size " << vm_loop << ", loopsink size " << vm_loop;
py::object ret;
MS_LOG(DEBUG) << "Eval run" << backend;
MS_LOG(DEBUG) << "Eval run" << ms_context->backend_policy();
auto output = execute_info->func_graph->output()->abstract();
MS_EXCEPTION_IF_NULL(output);
for (int64_t i = 0; i < vm_loop; i++) {
@ -1429,11 +1432,11 @@ py::object GraphExecutorPy::Run(const py::tuple &args, const py::object &phase_o
}
MS_LOG(DEBUG) << "Run end";
return ret;
}
} // namespace pipeline
FuncGraphPtr GraphExecutorPy::BuildGraph(const py::dict &init_params, const std::string &phase,
const py::object &broadcast_params) {
#if ((defined ENABLE_GE) || (defined ENABLE_D))
#ifdef ENABLE_D
return BuildDFGraph(info_, init_params, phase, broadcast_params);
#else
return nullptr;
@ -1459,8 +1462,13 @@ void GraphExecutorPy::UpdataParamNodeDefaultInput(
}
void GraphExecutorPy::RunInitGraph(const py::dict &init_params, const std::string &phase) const {
#ifdef ENABLE_GE
#ifdef ENABLE_D
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
auto backend = ms_context->backend_policy();
if (backend == "ge") {
RunGEInitGraph(init_params, phase);
}
#endif
}
@ -1486,9 +1494,9 @@ bool InitExecDataset(const std::string &queue_name, int64_t iter_num, int64_t ba
const std::vector<TypePtr> &types, const std::vector<std::vector<int64_t>> &shapes,
const std::vector<int64_t> &input_indexes, const std::string &phase, bool need_run) {
std::string name = MsContext::GetInstance()->backend_policy();
#ifndef NO_DLIB
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
#ifndef NO_DLIB
if (!context::IsTsdOpened(ms_context) || !context::IsGeInited(ms_context)) {
InitPipeline();
}
@ -1499,15 +1507,13 @@ bool InitExecDataset(const std::string &queue_name, int64_t iter_num, int64_t ba
if (name == kMsConvert || name == kMsVm) {
return InitExecDatasetVm(queue_name, iter_num, batch_size, types, shapes, input_indexes, need_run);
}
#ifdef ENABLE_GE
return InitExecDatasetGe(queue_name, iter_num, batch_size, types, shapes, input_indexes, phase);
#else
std::string backend = MsContext::GetInstance()->backend_policy();
std::string backend = ms_context->backend_policy();
#ifdef ENABLE_D
if (backend == "ge") {
return true;
return InitExecDatasetGe(queue_name, iter_num, batch_size, types, shapes, input_indexes, phase);
}
#endif
return false;
return backend == "ge" ? true : false;
}
bool InitExecDatasetVm(const std::string &queue_name, int64_t size, int64_t batch_size,
@ -1616,12 +1622,17 @@ std::string GetJitLevel() {
void ResetOpId() { mindspore::id_generator::reset_id(); }
void InitHccl() {
#ifdef ENABLE_GE
(void)InitPipeline();
#else
mindspore::parse::python_adapter::set_python_env_flag(true);
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
#ifdef ENABLE_D
auto backend = ms_context->backend_policy();
if (backend == "ge") {
(void)InitPipeline();
return;
}
#endif
mindspore::parse::python_adapter::set_python_env_flag(true);
uint32_t device_id = ms_context->get_param<uint32_t>(MS_CTX_DEVICE_ID);
#if ENABLE_D
bool task_sink = true;
@ -1656,17 +1667,21 @@ void InitHccl() {
} else {
(void)context::OpenTsd(ms_context);
}
#endif
}
void FinalizeHccl() {
#ifdef ENABLE_GE
#ifdef ENABLE_D
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
auto backend = ms_context->backend_policy();
if (backend == "ge") {
(void)FinalizeBackend();
#else
return;
}
#endif
session::ExecutorManager::Instance().Clear();
device::DeviceContextManager::GetInstance().ClearDeviceContexts();
device::KernelRuntimeManager::Instance().ClearRuntimeResource();
#endif
}
uint32_t GetHcclRankId() {
@ -1688,7 +1703,7 @@ uint32_t GetHcclRankSize() {
}
void ExportGraph(const std::string &file_name, const std::string &, const std::string &phase) {
#if ((defined ENABLE_GE) || (defined ENABLE_D))
#ifdef ENABLE_D
ExportDFGraph(file_name, phase);
#else
MS_EXCEPTION(ValueError) << "Only support export file in 'AIR' format with Ascend backend.";
@ -1811,14 +1826,23 @@ void ClearResAtexit() {
opt::python_pass::PyPassManager::GetInstance()->ClearRes();
MS_LOG(INFO) << "End clear PyPassManager.";
#ifdef ENABLE_GE
#ifdef ENABLE_D
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
if (ms_context->backend_policy() == "ge") {
transform::DfGraphManager::GetInstance().ClearGraph();
transform::OpAdapterMap::get().clear();
} else {
MS_LOG(INFO) << "Start clear ConfigManager...";
ConfigManager::GetInstance().ResetIterNum();
MS_LOG(INFO) << "End clear ConfigManager.";
}
#else
MS_LOG(INFO) << "Start clear ConfigManager...";
ConfigManager::GetInstance().ResetIterNum();
MS_LOG(INFO) << "End clear ConfigManager.";
#endif
ReleaseGeTsd();
MS_LOG(INFO) << "Start clear python_adapter...";
parse::python_adapter::ResetPythonScope();

View File

@ -1,6 +1,6 @@
file(GLOB_RECURSE _PYNATIVE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "pynative_execute.cc")
if(ENABLE_GE)
if(ENABLE_D)
file(GLOB_RECURSE _GE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "pynative_execute_ge.cc")
list(APPEND _PYNATIVE_SRC_LIST ${_GE_SRC_LIST})
endif()

View File

@ -60,7 +60,7 @@ struct OpExecInfo {
PrimitivePyPtr py_primitive;
AbstractBasePtr abstract;
py::list op_inputs;
#ifdef ENABLE_GE
#ifdef ENABLE_D
py::dict op_attrs;
#endif
std::vector<int64_t> inputs_mask;

View File

@ -61,7 +61,7 @@
#include "runtime/hardware/device_context_manager.h"
#include "vm/transform.h"
#ifdef ENABLE_GE
#ifdef ENABLE_D
#include "pipeline/pynative/pynative_execute_ge.h"
#endif
@ -2011,10 +2011,16 @@ MsBackendPolicy ForwardExecutor::InitEnv(const OpExecInfoPtr &op_exec_info) {
MS_EXCEPTION_IF_NULL(op_exec_info);
MS_LOG(DEBUG) << "RunOp start, op name is: " << op_exec_info->op_name;
parse::python_adapter::set_python_env_flag(true);
MsBackendPolicy backend_policy;
#if (!defined ENABLE_GE)
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
MsBackendPolicy backend_policy = kMsBackendMsPrior;
#ifdef ENABLE_D
if (ms_context->backend_policy() == "ge") {
context::PynativeInitGe(ms_context);
backend_policy = kMsBackendGeOnly;
}
#else
if (!context::IsTsdOpened(ms_context)) {
if (!context::OpenTsd(ms_context)) {
MS_LOG(EXCEPTION) << "Open tsd failed";
@ -2025,11 +2031,6 @@ MsBackendPolicy ForwardExecutor::InitEnv(const OpExecInfoPtr &op_exec_info) {
} else {
backend_policy = kMsBackendVmOnly;
}
#else
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
context::PynativeInitGe(ms_context);
backend_policy = kMsBackendGeOnly;
#endif
if (kVmOperators.find(op_exec_info->op_name) != kVmOperators.end()) {
backend_policy = kMsBackendVmOnly;
@ -2049,7 +2050,7 @@ py::object ForwardExecutor::RunOpWithBackendPolicy(MsBackendPolicy backend_polic
break;
}
case kMsBackendGePrior: {
#ifdef ENABLE_GE
#ifdef ENABLE_D
// use GE first, use vm when GE fails
MS_LOG(DEBUG) << "RunOp use GE first backend";
result = RunOpInGE(op_exec_info, status);

View File

@ -216,7 +216,6 @@ PynativeStatusCode ConvertAttributes(const OpExecInfoPtr &op_exec_info, const st
MS_LOG(ERROR) << "Failed to AddGraph into graph manager";
return PYNATIVE_GRAPH_MANAGER_ERR;
}
return PYNATIVE_SUCCESS;
}

View File

@ -1,11 +1,8 @@
if(ENABLE_GE OR ENABLE_D OR ENABLE_ACL)
if(ENABLE_D OR ENABLE_ACL)
file(GLOB_RECURSE _TRANSFORM_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.cc")
list(REMOVE_ITEM _TRANSFORM_SRC_LIST "graph_ir/op_declare/hcom_ops_declare.cc")
set_property(SOURCE ${_TRANSFORM_SRC_LIST} PROPERTY COMPILE_DEFINITIONS
SUBMODULE_ID=mindspore::SubModuleId::SM_GE_ADPT)
add_library(_mindspore_transform_graph_ir_obj OBJECT ${_TRANSFORM_SRC_LIST})
if(NOT ENABLE_GE)
target_compile_definitions(_mindspore_transform_graph_ir_obj PRIVATE NO_GE_CLIENT)
endif()
endif()

View File

@ -35,7 +35,7 @@
#include "ops/array_ops.h"
#include "ops/elewise_calculation_ops.h"
#include "ops/math_ops.h"
#ifdef ENABLE_GE
#ifdef ENABLE_D
#include "ops/save_ops.h"
#endif
@ -422,7 +422,7 @@ DfGraphConvertor &DfGraphConvertor::InitParam(const TensorOrderMap &tensors) {
return *this;
}
#if (defined ENABLE_GE)
#if (defined ENABLE_D)
void DfGraphConvertor::BuildSaveCheckpointGraph() {
std::vector<Operator> graph_inputs;
ge::op::Save save_op("save_parms");
@ -545,9 +545,13 @@ DfGraphConvertor &DfGraphConvertor::GenerateCheckpointGraph() {
MS_LOG(ERROR) << "Invalid AnfGraph in GenerateCheckpointGraph";
return *this;
}
#if (defined ENABLE_GE)
#ifdef ENABLE_D
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
if (ms_context->backend_policy() == "ge") {
BuildSaveCheckpointGraph();
// Restoring from checkpoint file is done by pyfront, not in graph now.
}
#endif
return *this;
}
@ -566,9 +570,13 @@ DfGraphConvertor &DfGraphConvertor::ConvertAllNode() {
compute_sout_ << "digraph {" << endl;
init_sout_.clear();
init_sout_ << "digraph {" << endl;
#if (defined ENABLE_GE)
#ifdef ENABLE_D
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
if (ms_context->backend_policy() == "ge") {
checkpoint_sout_.clear();
checkpoint_sout_ << "digraph {" << endl;
}
#endif
restore_checkpoint_sout_.clear();
restore_checkpoint_sout_ << "digraph {" << endl;

View File

@ -30,6 +30,7 @@
#include <sstream>
#include "utils/hash_map.h"
#include "utils/ms_context.h"
#include "ir/anf.h"
#include "ir/func_graph.h"
#include "transform/graph_ir/util.h"
@ -55,10 +56,14 @@ class DfGraphConvertor {
explicit DfGraphConvertor(const AnfGraphPtr &anf_graph) : anf_graph_(anf_graph) {
MS_EXCEPTION_IF_NULL(anf_graph);
df_graph_ = std::make_shared<DfGraph>(anf_graph_->ToString());
#if (!defined ENABLE_GE) || (defined ENABLE_INFER)
training_ = anf_graph->has_flag("training");
#else
#if (defined ENABLE_D) && (!defined ENABLE_INFER)
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
if (ms_context->backend_policy() == "ge") {
training_ = ENABLE_TRAIN;
}
#else
training_ = anf_graph->has_flag("training");
#endif
distribute_ = anf_graph->has_flag("broadcast_flag");
if (anf_graph->has_flag("broadcast_flag")) {

View File

@ -27,34 +27,34 @@
#include "sys/time.h"
#include "utils/utils.h"
#include "utils/callbacks.h"
#ifdef ENABLE_GE
#ifdef ENABLE_D
#include "utils/callbacks_ge.h"
#endif
#include "utils/ms_context.h"
#ifdef NO_GE_CLIENT
namespace ge {
Session::Session(const std::map<std::string, std::string> &options) {
if (options.empty()) {
MS_LOG(ERROR) << "session input options is empty";
}
sessionId_ = 0;
}
Session::~Session() {}
} // namespace ge
#endif
#ifndef ENABLE_LITE_ASCEND
namespace py = pybind11;
#endif
namespace mindspore {
namespace transform {
std::shared_ptr<ge::Session> GraphRunner::NewSession(const SessionOptions &sess_options) {
std::shared_ptr<ge::Session> ret = std::make_shared<ge::Session>(sess_options);
#ifdef ENABLE_D
std::shared_ptr<ge::Session> ret;
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
if (ms_context->backend_policy() == "ge") {
ret = std::make_shared<ge::Session>(sess_options);
if (ret == nullptr) {
MS_LOG(ERROR) << "Create GE session failed";
MS_LOG(EXCEPTION) << "Create GE session failed!";
return nullptr;
}
MS_LOG(INFO) << "Create new GE session success";
MS_LOG(INFO) << "Create new GE session success!";
return ret;
}
#endif
MS_LOG(WARNING) << "no GE client, return nullptr!";
return nullptr;
}
GraphRunner::GraphRunner(const GraphRunnerOptions &options)
@ -68,12 +68,14 @@ GraphRunner::GraphRunner(const GraphRunnerOptions &options)
} else {
sess_ = NewSession(options.options);
if (sess_ == nullptr) {
MS_LOG(EXCEPTION) << "GraphRunner initialize failed!!";
return;
MS_LOG(WARNING) << "graph runner sess_ is nullptr!";
}
}
#if (defined ENABLE_GE)
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
#ifdef ENABLE_D
if (ms_context->backend_policy() == "ge") {
// register the callback function
if (sess_->RegisterCallBackFunc(callbacks::kCheckPoint, callbacks::CheckpointSaveCallback) != ge::GRAPH_SUCCESS) {
MS_LOG(EXCEPTION) << "register callback failed!";
@ -84,15 +86,18 @@ GraphRunner::GraphRunner(const GraphRunnerOptions &options)
MS_LOG(EXCEPTION) << "register summary callback failed!";
return;
}
}
#endif
std::vector<DfGraphWrapperPtr> wrappers = graph_manager_.GetAllGraphs();
if (wrappers.empty()) {
MS_LOG(INFO) << "The GraphManager is empty!!";
return;
}
#ifdef ENABLE_D
if (ms_context->backend_policy() != "ge") {
return;
}
#ifdef ENABLE_GE
for (auto &it : wrappers) {
std::set<string> saved_graph = graph_manager_.GetSavedGraphs();
auto iter_find = saved_graph.find(std::to_string(it->id_));
@ -137,17 +142,20 @@ Status GraphRunner::RunGraph(const RunOptions &options, const std::vector<GeTens
struct timeval start_time, end_time;
(void)gettimeofday(&start_time, nullptr);
#ifdef ENABLE_GE
#ifdef ENABLE_D
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
if (ms_context->backend_policy() == "ge") {
if (sess_ == nullptr) {
MS_LOG(ERROR) << "The GE session is null, can't run the graph!";
return Status::FAILED;
}
ge::Status ret = sess_->RunGraph(wrap_ptr->id_, ge_inputs, ge_outputs);
if (ret != ge::GRAPH_SUCCESS) {
MS_LOG(ERROR) << "Call GE RunGraph Failed, ret is: " << ret;
return Status::FAILED;
}
}
#else
ge_outputs.swap(ge_inputs);
#endif

View File

@ -29,7 +29,7 @@
#include "ir/primitive.h"
#include "ir/value.h"
#include "transform/graph_ir/types.h"
#ifdef ENABLE_GE
#ifdef ENABLE_D
#ifdef OPEN_SOURCE
#include "graph/types.h"
#endif

View File

@ -23,7 +23,7 @@ DYN_INPUT_MAP(Print) = {{1, DYN_INPUT_DESC(x)}};
ATTR_MAP(Print) = EMPTY_ATTR_MAP;
REG_ADPT_DESC(Print, kNamePrint, ADPT_DESC(Print))
#ifdef ENABLE_GE
#ifdef ENABLE_D
INPUT_MAP(Assert) = {{1, INPUT_DESC(input_condition)}};
DYN_INPUT_MAP(Assert) = {{2, DYN_INPUT_DESC(input_data)}};
ATTR_MAP(Assert) = {{"summarize", ATTR_DESC(summarize, AnyTraits<int64_t>())}};

View File

@ -26,7 +26,7 @@ namespace mindspore::transform {
DECLARE_OP_ADAPTER(Print)
DECLARE_OP_USE_DYN_INPUT(Print)
#ifdef ENABLE_GE
#ifdef ENABLE_D
DECLARE_OP_ADAPTER(Assert)
DECLARE_OP_USE_DYN_INPUT(Assert)
#endif

View File

@ -55,11 +55,8 @@ INPUT_MAP(ApplyAdamD) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(m)},
ATTR_MAP(ApplyAdamD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits<bool>())},
{"use_nesterov", ATTR_DESC(use_nesterov, AnyTraits<bool>())}};
OUTPUT_MAP(ApplyAdamD) = {{0, OUTPUT_DESC(var)}, {1, OUTPUT_DESC(m)}, {2, OUTPUT_DESC(v)}};
#ifdef ENABLE_GE
REG_ADPT_DESC(ApplyAdamD, kNameApplyAdam, ADPT_DESC(ApplyAdamD))
#else
REG_ADPT_DESC(ApplyAdam, kNameApplyAdam, ADPT_DESC(ApplyAdam))
#endif
// ApplyAdagradD
INPUT_MAP(ApplyAdagradD) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INPUT_DESC(lr)}, {4, INPUT_DESC(grad)}};

View File

@ -4,10 +4,6 @@ if(CMAKE_SYSTEM_NAME MATCHES "Windows")
list(REMOVE_ITEM _UTILS_SRC_LIST ${_UTILS_SIGNAL_SRC_FILES})
endif()
if(NOT ENABLE_GE)
file(GLOB_RECURSE _UTILS_GE_SRC_FILES ./callbacks_ge.cc)
list(REMOVE_ITEM _UTILS_SRC_LIST ${_UTILS_GE_SRC_FILES})
endif()
if(NOT ENABLE_D AND NOT ENABLE_TESTCASES)
file(GLOB_RECURSE _UTILS_D_SRC_FILES ./runtime_error_codes.cc)
list(REMOVE_ITEM _UTILS_SRC_LIST ${_UTILS_D_SRC_FILES})

View File

@ -14,6 +14,7 @@
* limitations under the License.
*/
#ifdef ENABLE_D
#include "utils/callbacks_ge.h"
#include "pybind11/pybind11.h"
#include "ir/param_info.h"
@ -186,3 +187,4 @@ uint32_t MS_EXPORT SummarySaveCallback(uint32_t graph_id, const std::map<std::st
}
} // namespace callbacks
} // namespace mindspore
#endif

View File

@ -16,6 +16,7 @@
#ifndef MINDSPORE_CCSRC_UTILS_CALLBACKS_GE_H_
#define MINDSPORE_CCSRC_UTILS_CALLBACKS_GE_H_
#ifdef ENABLE_D
#include <map>
#include <vector>
#include <string>
@ -32,5 +33,5 @@ uint32_t CheckpointSaveCallback(uint32_t, const std::map<std::string, ge::Tensor
uint32_t SummarySaveCallback(uint32_t, const std::map<std::string, ge::Tensor> &);
} // namespace callbacks
} // namespace mindspore
#endif
#endif // MINDSPORE_CCSRC_UTILS_CALLBACKS_GE_H_

View File

@ -28,7 +28,7 @@
#include "toolchain/plog.h"
#include "common/util/error_manager/error_manager.h"
#endif
#ifdef ENABLE_GE
#ifdef ENABLE_D
#include "transform/graph_ir/df_graph_manager.h"
#endif
#include "profiler/device/profiling.h"
@ -37,7 +37,7 @@ namespace py = pybind11;
namespace mindspore {
namespace context {
#ifdef ENABLE_GE
#ifdef ENABLE_D
using mindspore::transform::DfGraphManager;
#endif
@ -157,7 +157,10 @@ void GetGeOptions(const std::shared_ptr<MsContext> &ms_context_ptr, std::map<std
if (ms_context_ptr == nullptr) {
MS_LOG(EXCEPTION) << "nullptr";
}
#ifdef ENABLE_GE
if (ms_context_ptr->backend_policy() != "ge") {
return;
}
#ifdef ENABLE_D
(*ge_options)["device_id"] = "0";
(*ge_options)["ge.exec.enableDump"] = std::to_string(ms_context_ptr->get_param<bool>(MS_CTX_ENABLE_DUMP));
(*ge_options)["ge.exec.dumpPath"] = ms_context_ptr->get_param<std::string>(MS_CTX_SAVE_DUMP_PATH);
@ -275,7 +278,10 @@ bool InitGe(const std::shared_ptr<MsContext> &ms_context_ptr) {
if (ms_context_ptr == nullptr) {
MS_LOG(EXCEPTION) << "nullptr";
}
#ifdef ENABLE_GE
#ifdef ENABLE_D
if (ms_context_ptr->backend_policy() != "ge") {
return true;
}
if (ms_context_ptr->get_param<bool>(MS_CTX_IS_PYNATIVE_GE_INIT)) {
return true;
}
@ -319,7 +325,10 @@ bool FinalizeGe(const std::shared_ptr<MsContext> &ms_context_ptr, bool force) {
if (ms_context_ptr == nullptr) {
MS_LOG(EXCEPTION) << "nullptr";
}
#ifdef ENABLE_GE
#ifdef ENABLE_D
if (ms_context_ptr->backend_policy() != "ge") {
return true;
}
if (ms_context_ptr->get_param<uint32_t>(MS_CTX_GE_REF) == 0) {
return true;
}
@ -365,9 +374,7 @@ bool IsGeInited(const std::shared_ptr<MsContext> &ms_context_ptr) {
struct DeviceTypeSetRegister {
DeviceTypeSetRegister() {
MsContext::device_type_seter([](std::shared_ptr<MsContext> &device_type_seter) {
#ifdef ENABLE_GE
device_type_seter.reset(new (std::nothrow) MsContext("ge", kAscendDevice));
#elif defined(ENABLE_D)
#if defined(ENABLE_D)
device_type_seter.reset(new (std::nothrow) MsContext("ms", kAscendDevice));
#elif defined(ENABLE_GPU)
device_type_seter.reset(new (std::nothrow) MsContext("ms", kGPUDevice));

View File

@ -35,7 +35,7 @@
#include "runtime/hardware/device_context_manager.h"
#include "runtime/framework/graph_compiler.h"
#include "utils/scoped_long_running.h"
#ifdef ENABLE_GE
#ifdef ENABLE_D
#include "utils/callbacks_ge.h"
#endif
#ifdef ENABLE_DEBUGGER

View File

@ -28,7 +28,7 @@
#include "utils/ms_context.h"
#include "ps/ps_context.h"
#include "utils/anf_utils.h"
#ifdef ENABLE_GE
#ifdef ENABLE_D
#include "transform/graph_ir/convert.h"
#endif
namespace mindspore {
@ -652,7 +652,7 @@ bool GraphPartition::IsCut(const AnfNodePtr &node) {
return true;
}
}
#ifdef ENABLE_GE
#ifdef ENABLE_D
if (backend_name_ == kGeVm) {
auto name = GetCNodeFuncName(cnode);
auto adpt = transform::DfGraphConvertor::FindAdapter(name);

View File

@ -25,7 +25,7 @@
#include <vector>
#include "abstract/abstract_value.h"
#ifdef ENABLE_GE
#ifdef ENABLE_D
#include "transform/graph_ir/convert.h"
#endif
#include "ir/graph_utils.h"

View File

@ -330,7 +330,8 @@ class _Context:
'max_device_memory': set_max_device_memory,
'mempool_block_size': set_mempool_block_size,
'print_file_path': set_print_file_path,
'env_config_path': set_env_config_path
'env_config_path': set_env_config_path,
'backend_policy': set_backend_policy
}
@property
@ -607,7 +608,8 @@ def _check_target_specific_cfgs(device, arg_key):
enable_graph_kernel=bool, reserve_class_name_in_scope=bool, check_bprop=bool,
max_device_memory=str, print_file_path=str, enable_sparse=bool, max_call_depth=int,
env_config_path=str, graph_kernel_flags=str, save_compile_cache=bool,
load_compile_cache=bool, grad_for_scalar=bool, pynative_synchronize=bool, mempool_block_size=str)
load_compile_cache=bool, grad_for_scalar=bool, pynative_synchronize=bool, mempool_block_size=str,
backend_policy=str)
def set_context(**kwargs):
"""
Set context for running environment.
@ -678,6 +680,8 @@ def set_context(**kwargs):
| | enable_compile_cache | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | compile_cache_path | CPU/GPU/Ascend |
| +------------------------------+----------------------------+
| | backend_policy | Ascend |
+-------------------------+------------------------------+----------------------------+
Args:
@ -815,6 +819,9 @@ def set_context(**kwargs):
If the specified directory does not exist, the system will automatically create the directory.
The cache will be saved to the directory of `compile_cache_path/rank_${rank_id}/`. The `rank_id` is
the ID of the current device in the cluster.
backend_policy (str): Used to choose a backend. ("ge", "vm" or "ms").
Through context.set_context(backend_policy="ms")
Default: The value must be in ['ge', 'vm', 'ms'].
Raises:
ValueError: If input key is not an attribute in context.

View File

@ -25,7 +25,6 @@ from mindspore import log as logger
from ..version import __version__
from ..default_config import __package_name__
class EnvChecker(metaclass=ABCMeta):
"""basic class for environment check"""
@ -389,9 +388,6 @@ class AscendEnvChecker(EnvChecker):
def check_version_and_env_config():
"""check version and env config"""
if os.getenv("MS_DEV_CLOSE_VERSION_CHECK") == "ON":
return
os.environ["MS_DEV_CLOSE_VERSION_CHECK"] = "ON"
if __package_name__.lower() == "mindspore-ascend":
env_checker = AscendEnvChecker()
# Note: pre-load libgomp.so to solve error like "cannot allocate memory in statis TLS block"
@ -407,6 +403,9 @@ def check_version_and_env_config():
else:
logger.info(f"Package version {__package_name__} does not need to check any environment variable, skipping.")
return
if os.getenv("MS_DEV_CLOSE_VERSION_CHECK") == "ON":
return
os.environ["MS_DEV_CLOSE_VERSION_CHECK"] = "ON"
try:
# check version of ascend site or cuda

View File

@ -18,7 +18,7 @@ set -e
build_option_proc_b()
{
if [[ "X$OPTARG" != "Xge" && "X$OPTARG" != "Xcpu" ]]; then
if [ "X$OPTARG" != "Xcpu" ]; then
echo "Invalid value ${OPTARG} for option -b"
usage
exit 1

View File

@ -255,7 +255,7 @@ if(MINDSPORE_PROTO_LIST)
set_target_properties(proto_input_ut PROPERTIES COMPILE_FLAGS "-Wno-unused-variable")
endif()
if(ENABLE_GE)
if(ENABLE_D)
if(ENABLE_TRAIN)
target_link_libraries(ut_tests PRIVATE graph ge_runner)
else()

View File

@ -64,8 +64,6 @@ TEST_F(TestConvert, TestConstruct) {
ASSERT_NE(converter.ErrCode(), SUCCESS);
}
#if (!defined ENABLE_GE)
namespace {
bool MakeDfGraph(PrimitivePtr prim, unsigned int nparam) {
@ -599,8 +597,6 @@ TEST_F(TestConvert, TestNPUClearFloatStatusOps) {
ASSERT_TRUE(ret);
}
#endif
TEST_F(TestConvert, TestAddOps) {
auto prim = std::make_shared<Primitive>("Add");
auto func_graph = MakeFuncGraph(prim, 2);

View File

@ -131,8 +131,6 @@ TEST_F(TestGraphRunner, TestGeTensorConstructor) {
assert(memcmp(ge_tensor_data, tensor_tuple_array.data(), sizeof(ge_tensor_data)) == 0);
}
#if (!defined ENABLE_GE)
TEST_F(TestGraphRunner, TestRunGraphException) {
DfGraphManager &graph_manager = DfGraphManager::GetInstance();
graph_manager.ClearGraph();
@ -244,7 +242,6 @@ TEST_F(TestGraphRunner, TestAPI) {
PrintMeTensor(&(*tensor));
}
}
#endif
} // namespace transform
} // namespace mindspore

View File

@ -33,7 +33,6 @@ class TestOpAdapter : public UT::Common {
TestOpAdapter() {}
};
#if (!defined ENABLE_GE)
#if 0
// fix conv2d ut
TEST_F(TestOpAdapter, TestSpecilization_Conv2D) {
@ -94,6 +93,5 @@ TEST_F(TestOpAdapter, TestSetAttr_Conv2d_Primitive) {
delete adpt;
}
#endif
#endif
} // namespace transform
} // namespace mindspore

View File

@ -22,7 +22,7 @@
#include "pipeline/jit/parse/python_adapter.h"
#include "transform/graph_ir/df_graph_manager.h"
#include "debug/draw.h"
#ifdef ENABLE_GE
#ifdef ENABLE_D
#include "utils/callbacks_ge.h"
#endif

View File

@ -20,17 +20,11 @@
import os
import pytest
def is_enable_ge():
val = os.getenv("ENABLE_GE", "False")
if val in ('ON', 'on', 'TRUE', 'True', 'true'):
return True
return False
non_graph_engine = pytest.mark.skipif(is_enable_ge(), reason="Not support running on GE environment")
def is_enable_gpu():
val = os.getenv("ENABLE_GPU", "False")
if val in ('ON', 'on', 'TRUE', 'True', 'true'):