adapt code to windows gpu

This commit is contained in:
taipingchangan 2022-08-24 15:54:20 +08:00
parent 9eee4f4a13
commit 122417987e
4 changed files with 27 additions and 0 deletions

View File

@ -60,6 +60,9 @@ set(tinyxml2_LIBPATH ${tinyxml2_LIBPATH}/../bin/)
message("offline debugger does not support windows system temporarily")
file(GLOB PROTO_PY_PB2_LST ${CMAKE_SOURCE_DIR}/build/mindspore/proto_py/proto/*_pb2.py)
file(COPY ${PROTO_PY_PB2_LST} DESTINATION ${CMAKE_SOURCE_DIR}/mindspore/python/mindspore/train)
# set package files
install(
TARGETS _c_expression

View File

@ -28,7 +28,9 @@
#ifdef WITH_BACKEND
#include "mindspore/ccsrc/include/backend/data_queue/data_queue_mgr.h"
#endif
#ifndef _WIN32
#include "mindspore/ccsrc/ps/ps_cache/ps_data/ps_data_prefetch.h"
#endif
#ifdef WITH_BACKEND
#include "utils/ms_context.h"
#endif
@ -519,11 +521,13 @@ Status DeviceQueueOp::PushDataToGPU() {
md_channel_info_->RecordPushStartTime();
#endif
// Data prefetch only when PS mode enables cache.
#ifndef _WIN32
if (!ps::PsDataPrefetch::GetInstance().PrefetchData(channel_name_, items[0].data_ptr_, items[0].data_len_,
items[0].data_type_)) {
RETURN_STATUS_ERROR(StatusCode::kMDTimeOut,
"[Internal ERROR] Failed to prefetch data in current PS mode(cache data when sending).");
}
#endif
RETURN_IF_NOT_OK(RetryPushData(items, is_profiling_enable, &push_cost));
#ifndef ENABLE_SECURITY
ProfilingRecorder(is_profiling_enable, profiling_node, send_batch, push_cost, &batch_start_time, &end_time,

View File

@ -512,7 +512,9 @@ void GPUKernelExecutor::OptimizeGraph(const FuncGraphPtr &graph) const {
// Graph kernel fusion optimization
if (graphkernel::GraphKernelFlags::GetInstance().IsEnableGraphKernel()) {
#if (defined(ENABLE_AKG) && !defined(_WIN32))
graphkernel::GraphKernelOptimize(kernel_graph);
#endif
kernel_graph->SetExecOrderByDefault();
}
@ -547,13 +549,16 @@ void GPUKernelExecutor::SetOperatorInfo(const KernelGraphPtr &graph) const {
mng = Manage(graph, true);
graph->set_manager(mng);
}
#if (defined(ENABLE_AKG) && !defined(_WIN32))
bool do_expand = false;
#endif
auto &node_list = graph->execution_order();
for (auto &node : node_list) {
auto [msg, etype] = SetKernelInfoWithMsg(node);
if (msg.empty()) {
continue;
}
#if (defined(ENABLE_AKG) && !defined(_WIN32))
auto f = [](const CNodePtr &n) {
auto res = SetKernelInfoWithMsg(n);
return res.first.empty();
@ -567,11 +572,14 @@ void GPUKernelExecutor::SetOperatorInfo(const KernelGraphPtr &graph) const {
auto expand_fg = GetCNodeFuncGraph(cnode);
graphkernel::InlineExpandFuncGraph(cnode, expand_fg);
do_expand = true;
#endif
}
#if (defined(ENABLE_AKG) && !defined(_WIN32))
if (do_expand) {
graphkernel::BindValueToGraph().Run(graph);
graph->SetExecOrderByDefault();
}
#endif
}
void GPUKernelExecutor::CreateKernel(const std::vector<CNodePtr> &nodes) const {

View File

@ -14,6 +14,7 @@
# ============================================================================
"""version and config check"""
import os
import platform
import sys
import time
import subprocess
@ -455,5 +456,16 @@ def _set_pb_env():
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
def _add_cuda_path():
if platform.system().lower() == 'windows':
if __package_name__.lower() == "mindspore_gpu":
cuda_home = os.environ.get('CUDA_PATH')
if cuda_home is None:
logger.error("mindspore-gpu on windows need CUDA_PATH, but not set it now")
else:
os.add_dll_directory(os.path.join(os.environ['CUDA_PATH'], 'bin'))
check_version_and_env_config()
_set_pb_env()
_add_cuda_path()