From 31ac782bf73cff248b621af920299e64a0a15108 Mon Sep 17 00:00:00 2001 From: taipingchangan <1196608768@qq.com> Date: Sat, 3 Sep 2022 09:49:25 +0800 Subject: [PATCH] adapt code for msvc --- cmake/init.cmake | 26 +++++++++-------- cmake/mind_expression.cmake | 2 ++ mindspore/ccsrc/CMakeLists.txt | 28 ++++++++++++++++--- .../backend/common/session/session_basic.cc | 25 +++++++++-------- mindspore/ccsrc/cxx_api/CMakeLists.txt | 11 ++++++++ .../persistent/storage/file_io_utils.cc | 15 ++++++++-- .../persistent/storage/file_io_utils.h | 4 +++ .../cache_embedding/cache_embedding.cc | 2 +- .../parallel/pynative_shard/pynative_shard.cc | 2 +- .../backend/data_queue/blocking_queue.h | 1 - .../include/backend/data_queue/data_queue.h | 1 - .../backend/data_queue/data_queue_mgr.h | 1 - mindspore/ccsrc/include/common/duplex_pipe.h | 1 + mindspore/ccsrc/kernel/kernel.h | 1 + .../dataset/audio/kernels/flanger_op.h | 3 +- .../dataset/engine/cache/CMakeLists.txt | 3 +- .../dataset/engine/cache/cache_request.cc | 1 + .../dataset/engine/cache/cache_server.cc | 1 + mindspore/ccsrc/pipeline/jit/pipeline.cc | 4 +++ .../cpu/hal/device/cpu_kernel_runtime.cc | 1 - .../kernel/eigen/random_poisson_cpu_kernel.cc | 9 ++++++ .../cpu/kernel/mkldnn/eltwise_cpu_kernel.cc | 21 +++++++++----- .../device/cpu/kernel/nnacl/CMakeLists.txt | 2 +- .../cpu/kernel/rl/buffer_sample_cpu_kernel.h | 6 ++-- .../ccsrc/plugin/device/gpu/CMakeLists.txt | 12 ++++++++ .../gpu/kernel/cuda_impl/CMakeLists.txt | 11 ++++++++ .../request_process_result_code.h | 4 +-- mindspore/core/ops/concat.cc | 5 ++-- mindspore/core/utils/log_adapter.h | 1 + mindspore/core/utils/macros.h | 4 +-- mindspore/core/utils/system/file_system.h | 2 +- 31 files changed, 153 insertions(+), 57 deletions(-) diff --git a/cmake/init.cmake b/cmake/init.cmake index 3f4cc595783..a60f751d625 100644 --- a/cmake/init.cmake +++ b/cmake/init.cmake @@ -13,23 +13,25 @@ if(MSVC) cmake_host_system_information(RESULT CPU_CORES QUERY NUMBER_OF_LOGICAL_CORES) message("CPU_CORE number = ${CPU_CORES}") math(EXPR MP_NUM "${CPU_CORES} * 2") - set(CMAKE_C_FLAGS "/MD /O2 /Ob2 /DNDEBUG /MP${MP_NUM} /EHsc") - set(CMAKE_C_FLAGS_DEBUG "/MDd /Zi /Ob0 /Od /RTC1 /MP${MP_NUM} /EHsc") - set(CMAKE_C_FLAGS_RELEASE "/MD /O2 /Ob2 /DNDEBUG /MP${MP_NUM} /EHsc") - set(CMAKE_C_FLAGS_RELWITHDEBINFO "/MD /Zi /O2 /Ob1 /DNDEBUG /MP${MP_NUM} /EHsc") - set(CMAKE_C_FLAGS_MINSIZEREL "/MD /O1 /Ob1 /DNDEBUG /MP${MP_NUM} /EHsc") + set(CMAKE_C_FLAGS "/MD /O2 /Ob2 /DNDEBUG /MP${MP_NUM} /EHsc /bigobj") + set(CMAKE_C_FLAGS_DEBUG "/MDd /Zi /Ob0 /Od /RTC1 /MP${MP_NUM} /EHsc /bigobj") + set(CMAKE_C_FLAGS_RELEASE "/MD /O2 /Ob2 /DNDEBUG /MP${MP_NUM} /EHsc /bigobj") + set(CMAKE_C_FLAGS_RELWITHDEBINFO "/MD /Zi /O2 /Ob1 /DNDEBUG /MP${MP_NUM} /EHsc /bigobj") + set(CMAKE_C_FLAGS_MINSIZEREL "/MD /O1 /Ob1 /DNDEBUG /MP${MP_NUM} /EHsc /bigobj") - set(CMAKE_CXX_FLAGS "/MD /O2 /Ob2 /DNDEBUG /MP${MP_NUM} /EHsc") - set(CMAKE_CXX_FLAGS_DEBUG "/MDd /Zi /Ob0 /Od /RTC1 /MP${MP_NUM} /EHsc") - set(CMAKE_CXX_FLAGS_RELEASE "/MD /O2 /Ob2 /DNDEBUG /MP${MP_NUM} /EHsc") - set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "/MD /Zi /O2 /Ob1 /DNDEBUG /MP${MP_NUM} /EHsc") - set(CMAKE_CXX_FLAGS_MINSIZEREL "/MD /O1 /Ob1 /DNDEBUG /MP${MP_NUM} /EHsc") + set(CMAKE_CXX_FLAGS "/MD /O2 /Ob2 /DNDEBUG /MP${MP_NUM} /EHsc /bigobj") + set(CMAKE_CXX_FLAGS_DEBUG "/MDd /Zi /Ob0 /Od /RTC1 /MP${MP_NUM} /EHsc /bigobj") + set(CMAKE_CXX_FLAGS_RELEASE "/MD /O2 /Ob2 /DNDEBUG /MP${MP_NUM} /EHsc /bigobj") + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "/MD /Zi /O2 /Ob1 /DNDEBUG /MP${MP_NUM} /EHsc /bigobj") + set(CMAKE_CXX_FLAGS_MINSIZEREL "/MD /O1 /Ob1 /DNDEBUG /MP${MP_NUM} /EHsc /bigobj") # resolve std::min/std::max and opencv::min opencv:max had defined in windows.h add_definitions(-DNOMINMAX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4251 /wd4819 /wd4715 /wd4244 /wd4267 /wd4716") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4251 /wd4819 /wd4715 /wd4244 /wd4267 /wd4716") + # resolve ERROR had defined in windows.h + add_definitions(-DNOGDI) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4251 /wd4819 /wd4715 /wd4244 /wd4267 /wd4716 /wd4566 /wd4273") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4251 /wd4819 /wd4715 /wd4244 /wd4267 /wd4716 /wd4566 /wd4273") if(ENABLE_GPU) message("init cxx_flags on windows_gpu") diff --git a/cmake/mind_expression.cmake b/cmake/mind_expression.cmake index 7f4f902ad18..b6040e87f6e 100644 --- a/cmake/mind_expression.cmake +++ b/cmake/mind_expression.cmake @@ -58,7 +58,9 @@ endif() if(ENABLE_GPU) include(${CMAKE_SOURCE_DIR}/cmake/external_libs/cub.cmake) + if(NOT MSVC) include(${CMAKE_SOURCE_DIR}/cmake/external_libs/fast_transformers.cmake) + endif() if(ENABLE_MPI) include(${CMAKE_SOURCE_DIR}/cmake/external_libs/nccl.cmake) endif() diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index a15603c0083..0266c5f0972 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -115,10 +115,18 @@ if(ENABLE_GPU) if(DEFINED ENV{CUDNN_HOME} AND NOT $ENV{CUDNN_HOME} STREQUAL "") set(CUDNN_INCLUDE_DIR $ENV{CUDNN_HOME}/include) + if(WIN32) + set(CUDNN_LIBRARY_DIR $ENV{CUDNN_HOME}/lib) + else() set(CUDNN_LIBRARY_DIR $ENV{CUDNN_HOME}/lib64) + endif() find_path(CUDNN_INCLUDE_PATH cudnn.h HINTS ${CUDNN_INCLUDE_DIR} NO_DEFAULT_PATH) find_library(CUDNN_LIBRARY_PATH "cudnn" HINTS ${CUDNN_LIBRARY_DIR} NO_DEFAULT_PATH) + if(WIN32) + find_library(CUBLAS_LIBRARY_PATH "cublas" HINTS ${CUDA_PATH}/lib/x64) + else() find_library(CUBLAS_LIBRARY_PATH "cublas" HINTS ${CUDNN_LIBRARY_DIR}) + endif() if(CUDNN_INCLUDE_PATH STREQUAL CUDNN_INCLUDE_PATH-NOTFOUND) message(FATAL_ERROR "Failed to find cudnn header file, please set environment variable CUDNN_HOME to \ cudnn installation position.") @@ -162,7 +170,9 @@ if(ENABLE_GPU) ## set NVCC ARCH FLAG set(CUDA_NVCC_FLAGS) set_nvcc_flag(CUDA_NVCC_FLAGS) + if(NOT MSVC) add_definitions(-Wno-unknown-pragmas) # Avoid compilation warnings from cuda/thrust + endif() if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") list(APPEND CUDA_NVCC_FLAGS -G) message("CUDA_NVCC_FLAGS" ${CUDA_NVCC_FLAGS}) @@ -203,7 +213,9 @@ if(CMAKE_SYSTEM_NAME MATCHES "Windows" AND NOT MSVC) endif() # Set compile flags to ensure float compute consistency. +if(NOT MSVC) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-fast-math") +endif() if(ENABLE_MPI) add_compile_definitions(ENABLE_MPI) @@ -453,8 +465,12 @@ endif() if(MODE_ASCEND_ALL) target_link_libraries(mindspore PUBLIC -Wl,--start-group proto_input mindspore::protobuf -Wl,--end-group) elseif(CMAKE_SYSTEM_NAME MATCHES "Windows") - target_link_libraries(mindspore PUBLIC -Wl,--start-group proto_input mindspore::protobuf mindspore::sentencepiece - -Wl,--end-group) + if(MSVC) + target_link_libraries(mindspore PUBLIC proto_input mindspore::protobuf mindspore::sentencepiece) + else() + target_link_libraries(mindspore PUBLIC -Wl,--start-group proto_input mindspore::protobuf + mindspore::sentencepiece -Wl,--end-group) + endif() elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin") target_link_libraries(mindspore PUBLIC -Wl proto_input mindspore::protobuf mindspore::sentencepiece -Wl) else() @@ -495,8 +511,12 @@ endif() if(CMAKE_SYSTEM_NAME MATCHES "Windows") target_link_libraries(mindspore PUBLIC mindspore::pybind11_module) - target_link_libraries(_c_expression PRIVATE -Wl,--whole-archive mindspore -Wl,--no-whole-archive mindspore_core - mindspore_common mindspore_backend) + if(NOT MSVC) + target_link_libraries(_c_expression PRIVATE -Wl,--whole-archive mindspore -Wl,--no-whole-archive mindspore_core + mindspore_common mindspore_backend) + else() + target_link_libraries(_c_expression PRIVATE mindspore_core mindspore_common mindspore_backend mindspore) + endif() elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin") target_link_libraries(mindspore PUBLIC proto_input mindspore::protobuf mindspore::eigen mindspore::json) target_link_libraries(_c_expression PRIVATE -Wl,-all_load mindspore proto_input -Wl,-noall_load mindspore_core diff --git a/mindspore/ccsrc/backend/common/session/session_basic.cc b/mindspore/ccsrc/backend/common/session/session_basic.cc index e448cfe78c1..19bbdb7cc12 100644 --- a/mindspore/ccsrc/backend/common/session/session_basic.cc +++ b/mindspore/ccsrc/backend/common/session/session_basic.cc @@ -1414,18 +1414,19 @@ BackendOpRunInfoPtr SessionBasic::GetSingleOpRunInfo(const CNodePtr &cnode, cons [cnode](const std::pair>> &output_index) { return output_index.first.first == cnode; }); - pynative::BaseOpRunInfo base_op_run_info = {.has_dynamic_input = common::AnfAlgo::IsNodeInputDynamicShape(cnode), - .has_dynamic_output = shape->IsDynamic(), - .is_mixed_precision_cast = false, - .lazy_build = !shape->IsDynamic(), - .op_name = primitive->name(), - .next_op_name = std::string(), - .graph_info = graph_info, - .device_target = GetOpRunDeviceTarget(primitive), - .next_input_index = 0, - .input_tensor = tensor_info.input_tensors, - .input_mask = tensor_info.input_tensors_mask, - .abstract = abstract}; + pynative::BaseOpRunInfo base_op_run_info; + base_op_run_info.has_dynamic_input = common::AnfAlgo::IsNodeInputDynamicShape(cnode); + base_op_run_info.has_dynamic_output = shape->IsDynamic(); + base_op_run_info.is_mixed_precision_cast = false; + base_op_run_info.lazy_build = !shape->IsDynamic(); + base_op_run_info.op_name = primitive->name(); + base_op_run_info.next_op_name = std::string(); + base_op_run_info.graph_info = graph_info; + base_op_run_info.device_target = GetOpRunDeviceTarget(primitive); + base_op_run_info.next_input_index = 0; + base_op_run_info.input_tensor = tensor_info.input_tensors; + base_op_run_info.input_mask = tensor_info.input_tensors_mask; + base_op_run_info.abstract = abstract; return std::make_shared(base_op_run_info, primitive.get(), false, is_gradient_out); } diff --git a/mindspore/ccsrc/cxx_api/CMakeLists.txt b/mindspore/ccsrc/cxx_api/CMakeLists.txt index a3dabc68549..e7a83ebd96d 100644 --- a/mindspore/ccsrc/cxx_api/CMakeLists.txt +++ b/mindspore/ccsrc/cxx_api/CMakeLists.txt @@ -174,6 +174,16 @@ if(ENABLE_D) endif() if(ENABLE_GPU) + if(WIN32) + target_link_libraries(mindspore_shared_lib PRIVATE cuda_ops + ${CUBLAS_LIBRARY_PATH} + ${CUDA_PATH}/lib/x64/curand.lib + ${CUDNN_LIBRARY_PATH} + ${CUDA_PATH}/lib/x64/cudart.lib + ${CUDA_PATH}/lib/x64/cuda.lib + ${CUDA_PATH}/lib/x64/cusolver.lib + ${CUDA_PATH}/lib/x64/cufft.lib) + else() target_link_libraries(mindspore_shared_lib PRIVATE cuda_ops ${CUBLAS_LIBRARY_PATH} ${CUDA_PATH}/lib64/libcurand.so @@ -182,6 +192,7 @@ if(ENABLE_GPU) ${CUDA_PATH}/lib64/stubs/libcuda.so ${CUDA_PATH}/lib64/libcusolver.so ${CUDA_PATH}/lib64/libcufft.so) + endif() endif() if(CMAKE_SYSTEM_NAME MATCHES "Linux") diff --git a/mindspore/ccsrc/distributed/persistent/storage/file_io_utils.cc b/mindspore/ccsrc/distributed/persistent/storage/file_io_utils.cc index 54b49a6dbc7..a9ed5d8f10c 100644 --- a/mindspore/ccsrc/distributed/persistent/storage/file_io_utils.cc +++ b/mindspore/ccsrc/distributed/persistent/storage/file_io_utils.cc @@ -15,14 +15,12 @@ */ #include "distributed/persistent/storage/file_io_utils.h" - -#include -#include #include #include "mindspore/core/utils/file_utils.h" #include "utils/convert_utils_base.h" #include "utils/log_adapter.h" +#include "utils/os.h" namespace mindspore { namespace distributed { @@ -127,13 +125,16 @@ bool FileIOUtils::IsFileOrDirExist(const std::string &path) { } void FileIOUtils::CreateFile(const std::string &file_path, mode_t mode) { + (void)mode; if (IsFileOrDirExist(file_path)) { return; } std::ofstream output_file(file_path); output_file.close(); +#ifndef _MSC_VER ChangeFileMode(file_path, mode); +#endif } void FileIOUtils::CreateDir(const std::string &dir_path, mode_t mode) { @@ -142,7 +143,11 @@ void FileIOUtils::CreateDir(const std::string &dir_path, mode_t mode) { } #if defined(_WIN32) || defined(_WIN64) +#ifndef _MSC_VER int ret = mkdir(dir_path.c_str()); +#else + int ret = _mkdir(dir_path.c_str()); +#endif #else int ret = mkdir(dir_path.c_str(), mode); if (ret == 0) { @@ -173,7 +178,11 @@ void FileIOUtils::CreateDirRecursive(const std::string &dir_path, mode_t mode) { } #if defined(_WIN32) || defined(_WIN64) +#ifndef _MSC_VER int32_t ret = mkdir(tmp_dir_path); +#else + int32_t ret = _mkdir(tmp_dir_path); +#endif if (ret != 0) { MS_LOG(EXCEPTION) << "Failed to create directory recursion: " << dir_path << ". Errno = " << errno; } diff --git a/mindspore/ccsrc/distributed/persistent/storage/file_io_utils.h b/mindspore/ccsrc/distributed/persistent/storage/file_io_utils.h index b27f78573e9..fb1e54c7fd0 100644 --- a/mindspore/ccsrc/distributed/persistent/storage/file_io_utils.h +++ b/mindspore/ccsrc/distributed/persistent/storage/file_io_utils.h @@ -22,6 +22,10 @@ #include #include #include +#include "utils/os.h" +#ifdef CreateFile +#undef CreateFile +#endif namespace mindspore { namespace distributed { diff --git a/mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc b/mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc index 4521341b75b..89132c932eb 100644 --- a/mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc +++ b/mindspore/ccsrc/frontend/parallel/cache_embedding/cache_embedding.cc @@ -216,7 +216,7 @@ void InitHashMapData(void *data, const int64_t host_size, const int64_t cache_si for (int64_t i = 0; i < host_size; ++i) { host_range.emplace_back(static_cast(i)); } -#if defined(__APPLE__) +#if defined(__APPLE__) || defined(_MSC_VER) std::random_device rd; std::mt19937 rng(rd()); std::shuffle(host_range.begin(), host_range.end(), rng); diff --git a/mindspore/ccsrc/frontend/parallel/pynative_shard/pynative_shard.cc b/mindspore/ccsrc/frontend/parallel/pynative_shard/pynative_shard.cc index d1696281681..a35da502c51 100644 --- a/mindspore/ccsrc/frontend/parallel/pynative_shard/pynative_shard.cc +++ b/mindspore/ccsrc/frontend/parallel/pynative_shard/pynative_shard.cc @@ -366,7 +366,7 @@ static std::set SetParameterLayout(const FuncGraphPtr &root, const Fun PrimitivePtr prim = GetCNodePrimitive(cnode); MS_EXCEPTION_IF_NULL(prim); auto attrs = prim->attrs(); - if (!attrs.contains(parallel::IN_STRATEGY)) { + if (attrs.count(parallel::IN_STRATEGY) == 0) { auto empty_strategies = GenerateEmptyStrategies(cnode); attrs[parallel::IN_STRATEGY] = ShapesToValueTuplePtr(empty_strategies); } diff --git a/mindspore/ccsrc/include/backend/data_queue/blocking_queue.h b/mindspore/ccsrc/include/backend/data_queue/blocking_queue.h index 8e276ee7e7c..2b40f57184e 100644 --- a/mindspore/ccsrc/include/backend/data_queue/blocking_queue.h +++ b/mindspore/ccsrc/include/backend/data_queue/blocking_queue.h @@ -17,7 +17,6 @@ #ifndef MINDSPORE_CCSRC_INCLUDE_BACKEND_DATA_QUEUE_BLOCKING_QUEUE_H #define MINDSPORE_CCSRC_INCLUDE_BACKEND_DATA_QUEUE_BLOCKING_QUEUE_H -#include #include #include #include diff --git a/mindspore/ccsrc/include/backend/data_queue/data_queue.h b/mindspore/ccsrc/include/backend/data_queue/data_queue.h index ca32bf20dc5..1a0a749e27f 100644 --- a/mindspore/ccsrc/include/backend/data_queue/data_queue.h +++ b/mindspore/ccsrc/include/backend/data_queue/data_queue.h @@ -17,7 +17,6 @@ #ifndef MINDSPORE_CCSRC_INCLUDE_BACKEND_DATA_QUEUE_DATA_QUEUE_H #define MINDSPORE_CCSRC_INCLUDE_BACKEND_DATA_QUEUE_DATA_QUEUE_H -#include #include #include #include diff --git a/mindspore/ccsrc/include/backend/data_queue/data_queue_mgr.h b/mindspore/ccsrc/include/backend/data_queue/data_queue_mgr.h index e988055d290..62fcf2f4b85 100644 --- a/mindspore/ccsrc/include/backend/data_queue/data_queue_mgr.h +++ b/mindspore/ccsrc/include/backend/data_queue/data_queue_mgr.h @@ -17,7 +17,6 @@ #ifndef MINDSPORE_CCSRC_INCLUDE_BACKEND_DATA_QUEUE_DATA_QUEUE_MGR_H #define MINDSPORE_CCSRC_INCLUDE_BACKEND_DATA_QUEUE_DATA_QUEUE_MGR_H -#include #include #include #include diff --git a/mindspore/ccsrc/include/common/duplex_pipe.h b/mindspore/ccsrc/include/common/duplex_pipe.h index 6b651fa1aa7..a2ba1ac8897 100644 --- a/mindspore/ccsrc/include/common/duplex_pipe.h +++ b/mindspore/ccsrc/include/common/duplex_pipe.h @@ -24,6 +24,7 @@ #include #include "utils/log_adapter.h" +#include "utils/os.h" #include "include/common/visible.h" #define DP_DEBUG MS_LOG(DEBUG) << "[DuplexPipe] " diff --git a/mindspore/ccsrc/kernel/kernel.h b/mindspore/ccsrc/kernel/kernel.h index e35aa828827..54225070a35 100644 --- a/mindspore/ccsrc/kernel/kernel.h +++ b/mindspore/ccsrc/kernel/kernel.h @@ -20,6 +20,7 @@ #include #include #include +#include #include "nlohmann/json.hpp" #include "ir/anf.h" #include "ir/dtype.h" diff --git a/mindspore/ccsrc/minddata/dataset/audio/kernels/flanger_op.h b/mindspore/ccsrc/minddata/dataset/audio/kernels/flanger_op.h index fabc5b41145..cc03625e2f4 100644 --- a/mindspore/ccsrc/minddata/dataset/audio/kernels/flanger_op.h +++ b/mindspore/ccsrc/minddata/dataset/audio/kernels/flanger_op.h @@ -47,7 +47,8 @@ class FlangerOp : public TensorOp { void Print(std::ostream &out) const override { out << Name() << ": sample_rate: " << sample_rate_ << ", delay:" << delay_ << ", depth: " << depth_ << ", regen: " << regen_ << ", width: " << width_ << ", speed: " << speed_ << ", phase: " << phase_ - << ", Modulation: " << static_cast(Modulation_) << ", Interpolation: " << Interpolation_ << std::endl; + << ", Modulation: " << static_cast(Modulation_) << ", Interpolation: " << static_cast(Interpolation_) + << std::endl; } Status Compute(const std::shared_ptr &input, std::shared_ptr *output) override; diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/CMakeLists.txt b/mindspore/ccsrc/minddata/dataset/engine/cache/CMakeLists.txt index 853e9255119..874ac45dd7a 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/CMakeLists.txt +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/CMakeLists.txt @@ -45,8 +45,9 @@ else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-rpath,$ORIGIN:$ORIGIN/..:$ORIGIN/../lib") endif() endif() - +if(NOT MSVC) set(CMAKE_CXX_FLAGS "-fPIE ${CMAKE_CXX_FLAGS}") +endif() if(ENABLE_CACHE) ms_grpc_generate(CACHE_GRPC_SRCS CACHE_GRPC_HDRS cache_grpc.proto) diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_request.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_request.cc index 3547af60e68..f48dd462070 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_request.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_request.cc @@ -25,6 +25,7 @@ #include "minddata/dataset/include/dataset/constants.h" #include "minddata/dataset/engine/cache/cache_client.h" #include "minddata/dataset/engine/cache/cache_fbb.h" +#undef BitTest namespace mindspore { namespace dataset { Status BaseRequest::Wait() { diff --git a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.cc b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.cc index 98dfa8a4f7e..c7fbb2d49db 100644 --- a/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.cc +++ b/mindspore/ccsrc/minddata/dataset/engine/cache/cache_server.cc @@ -28,6 +28,7 @@ #ifdef CACHE_LOCAL_CLIENT #include "minddata/dataset/util/sig_handler.h" #endif +#undef BitTest namespace mindspore { namespace dataset { diff --git a/mindspore/ccsrc/pipeline/jit/pipeline.cc b/mindspore/ccsrc/pipeline/jit/pipeline.cc index 48f8ff63acc..1062c2b972e 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline.cc +++ b/mindspore/ccsrc/pipeline/jit/pipeline.cc @@ -1020,8 +1020,12 @@ bool GraphExecutorPy::Compile(const py::object &source_obj, const py::tuple &arg throw(std::runtime_error(ex.what())); } catch (...) { ReleaseResource(phase); +#ifndef _MSC_VER std::string exName(abi::__cxa_current_exception_type()->name()); MS_LOG(EXCEPTION) << "Error occurred when compile graph. Exception name: " << exName; +#else + MS_LOG(EXCEPTION) << "Error occurred when compile graph. Exception name: "; +#endif } return ret_value; } diff --git a/mindspore/ccsrc/plugin/device/cpu/hal/device/cpu_kernel_runtime.cc b/mindspore/ccsrc/plugin/device/cpu/hal/device/cpu_kernel_runtime.cc index d8727430faf..8e20e5a4afe 100644 --- a/mindspore/ccsrc/plugin/device/cpu/hal/device/cpu_kernel_runtime.cc +++ b/mindspore/ccsrc/plugin/device/cpu/hal/device/cpu_kernel_runtime.cc @@ -14,7 +14,6 @@ * limitations under the License. */ #include "plugin/device/cpu/hal/device/cpu_kernel_runtime.h" -#include #include #include #include diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/eigen/random_poisson_cpu_kernel.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/eigen/random_poisson_cpu_kernel.cc index ddfb267a96a..dab7f790d49 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/eigen/random_poisson_cpu_kernel.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/eigen/random_poisson_cpu_kernel.cc @@ -34,10 +34,19 @@ using KernelRunFunc = RandomPoissonCpuKernelMod::KernelRunFunc; } static unsigned int s_seed = static_cast(time(nullptr)); +#ifndef _MSC_VER EIGEN_DEVICE_FUNC uint64_t get_random_seed() { auto rnd = rand_r(&s_seed); return IntToSize(rnd); } +#else +EIGEN_DEVICE_FUNC uint64_t get_random_seed() { + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution distribution(0, std::numeric_limits::max()); + return distribution(gen); +} +#endif static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE uint64_t PCG_XSH_RS_state(uint64_t seed) { seed = (seed == 0) ? get_random_seed() : seed; diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/eltwise_cpu_kernel.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/eltwise_cpu_kernel.cc index d07871c857a..f33b9b8325d 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/eltwise_cpu_kernel.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/eltwise_cpu_kernel.cc @@ -61,7 +61,9 @@ class EltwiseCpuKernelFunc : public CpuKernelFunc { auto kernel_attr = GetKernelAttrFromTensors(inputs, outputs); auto iter = eltwise_func_map.find(kernel_name_); if (iter == eltwise_func_map.end()) { - MS_LOG(EXCEPTION) << "For 'EltWise Op', the kernel name must be in " << kernel::Map2Str(eltwise_func_map) + MS_LOG(EXCEPTION) << "For 'EltWise Op', the kernel name must be in " + << kernel::Map2Str>>( + eltwise_func_map) << ", but got " << kernel_name_; } std::vector support_list; @@ -165,8 +167,11 @@ bool EltWiseCpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std:: } auto iter = additional_kernel_attr_map_.find(kernel_name_); if (iter == additional_kernel_attr_map_.end()) { - MS_LOG(ERROR) << "For 'EltWise Op', the kernel name must be in " << kernel::Map2Str(additional_kernel_attr_map_) - << ", but got " << kernel_name_; + MS_LOG(ERROR) + << "For 'EltWise Op', the kernel name must be in " + << kernel::Map2Str>>( + additional_kernel_attr_map_) + << ", but got " << kernel_name_; return false; } additional_func_ = iter->second[index].second(); @@ -180,8 +185,9 @@ bool EltWiseCpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std:: } auto iter = mkl_kernel_attr_map_.find(kernel_name_); if (iter == mkl_kernel_attr_map_.end()) { - MS_LOG(ERROR) << "For 'EltWise Op', the kernel name must be in " << kernel::Map2Str(mkl_kernel_attr_map_) - << ", but got " << kernel_name_; + MS_LOG(ERROR) << "For 'EltWise Op', the kernel name must be in " + << kernel::Map2Str>(mkl_kernel_attr_map_) << ", but got " + << kernel_name_; return false; } } @@ -247,8 +253,9 @@ std::vector EltWiseCpuKernelMod::GetOpSupport() { // only mkl_kernel_attr_map_ need to be checked since it contains all kind of ops auto iter = mkl_kernel_attr_map_.find(kernel_name_); if (iter == mkl_kernel_attr_map_.end()) { - MS_LOG(ERROR) << "For 'EltWise Op', the kernel name must be in " << kernel::Map2Str(mkl_kernel_attr_map_) - << ", but got " << kernel_name_; + MS_LOG(ERROR) << "For 'EltWise Op', the kernel name must be in " + << kernel::Map2Str>(mkl_kernel_attr_map_) << ", but got " + << kernel_name_; return std::vector{}; } std::vector support_list; diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/CMakeLists.txt b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/CMakeLists.txt index ba1c9a67760..7eef8172f51 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/CMakeLists.txt +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/CMakeLists.txt @@ -51,7 +51,7 @@ generate_simd_code(AVX 8 "\"avx\", \"avx2\"") generate_simd_code(AVX512 16 \"avx512f\") generate_simd_header_code() -if(ENABLE_CPU) +if(ENABLE_CPU AND NOT MSVC) set(CMAKE_C_FLAGS "-Wno-attributes ${CMAKE_C_FLAGS}") endif() diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/rl/buffer_sample_cpu_kernel.h b/mindspore/ccsrc/plugin/device/cpu/kernel/rl/buffer_sample_cpu_kernel.h index 72c7635fbce..c02583290cd 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/rl/buffer_sample_cpu_kernel.h +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/rl/buffer_sample_cpu_kernel.h @@ -80,10 +80,10 @@ class BufferCPUSampleKernelMod : public DeprecatedNativeCpuKernelMod { for (size_t i = 0; i < IntToSize(count_addr[0]); ++i) { (void)indexes.emplace_back(i); } -#if !defined(__APPLE__) - random_shuffle(indexes.begin(), indexes.end(), [&](int i) { return std::rand() % i; }); -#else +#if defined(__APPLE__) || defined(_MSC_VER) std::shuffle(indexes.begin(), indexes.end(), generator_); +#else + random_shuffle(indexes.begin(), indexes.end(), [&](int i) { return std::rand() % i; }); #endif } else { std::uniform_int_distribution<> distrib(0, count_addr[0] - 1); // random integers in a range [a,b] diff --git a/mindspore/ccsrc/plugin/device/gpu/CMakeLists.txt b/mindspore/ccsrc/plugin/device/gpu/CMakeLists.txt index 258e6e624e6..f1ea468a240 100644 --- a/mindspore/ccsrc/plugin/device/gpu/CMakeLists.txt +++ b/mindspore/ccsrc/plugin/device/gpu/CMakeLists.txt @@ -35,6 +35,17 @@ target_link_libraries(mindspore_gpu PRIVATE mindspore::event mindspore::event_pt if(ENABLE_GPU) message("add gpu lib to mindspore_gpu") + if(WIN32) + target_link_libraries(mindspore_gpu PRIVATE cuda_ops + ${CUBLAS_LIBRARY_PATH} + ${CUDA_PATH}/lib/x64/curand.lib + ${CUDNN_LIBRARY_PATH} + ${CUDA_PATH}/lib/x64/cudart.lib + ${CUDA_PATH}/lib/x64/cuda.lib + ${CUDA_PATH}/lib/x64/cusolver.lib + ${CUDA_PATH}/lib/x64/cufft.lib + ${CUDA_PATH}/lib/x64/cusparse.lib) + else() target_link_libraries(mindspore_gpu PRIVATE cuda_ops ${CUBLAS_LIBRARY_PATH} ${CUDA_PATH}/lib64/libcurand.so @@ -44,6 +55,7 @@ if(ENABLE_GPU) ${CUDA_PATH}/lib64/libcusolver.so ${CUDA_PATH}/lib64/libcufft.so ${CUDA_PATH}/lib64/libcusparse.so) + endif() endif() if(ENABLE_DEBUGGER) diff --git a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/CMakeLists.txt b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/CMakeLists.txt index 0a0764007a2..8c26317bc70 100644 --- a/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/CMakeLists.txt +++ b/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/CMakeLists.txt @@ -28,6 +28,16 @@ if(ENABLE_GPU) endif() cuda_add_library(cuda_ops SHARED ${CUDA_OPS_SRC_LIST} $) message("add gpu lib to cuda_ops") + if(WIN32) + target_link_libraries(cuda_ops mindspore_core + ${CUBLAS_LIBRARY_PATH} + ${CUDA_PATH}/lib/x64/curand.lib + ${CUDNN_LIBRARY_PATH} + ${CUDA_PATH}/lib/x64/cudart.lib + ${CUDA_PATH}/lib/x64/cuda.lib + ${CUDA_PATH}/lib/x64/cusolver.lib + ${CUDA_PATH}/lib/x64/cufft.lib) + else() target_link_libraries(cuda_ops mindspore_core ${CUBLAS_LIBRARY_PATH} ${CUDA_PATH}/lib64/libcurand.so @@ -36,4 +46,5 @@ if(ENABLE_GPU) ${CUDA_PATH}/lib64/stubs/libcuda.so ${CUDA_PATH}/lib64/libcusolver.so ${CUDA_PATH}/lib64/libcufft.so) + endif() endif() \ No newline at end of file diff --git a/mindspore/ccsrc/ps/core/communicator/request_process_result_code.h b/mindspore/ccsrc/ps/core/communicator/request_process_result_code.h index e5863176777..852a64a6a40 100644 --- a/mindspore/ccsrc/ps/core/communicator/request_process_result_code.h +++ b/mindspore/ccsrc/ps/core/communicator/request_process_result_code.h @@ -83,11 +83,11 @@ class RequestProcessResult { operator bool() const = delete; - RequestProcessResult &operator<(const LogStream &stream) noexcept __attribute__((visibility("default"))) { + RequestProcessResult &operator<(const LogStream &stream) noexcept { msg_ = stream.stream()->str(); return *this; } - RequestProcessResult &operator=(const std::string &message) noexcept __attribute__((visibility("default"))) { + RequestProcessResult &operator=(const std::string &message) noexcept { msg_ = message; return *this; } diff --git a/mindspore/core/ops/concat.cc b/mindspore/core/ops/concat.cc index 0fccc859c8c..1f98b08e1aa 100644 --- a/mindspore/core/ops/concat.cc +++ b/mindspore/core/ops/concat.cc @@ -75,8 +75,9 @@ TypePtr ConcatInferType(const PrimitivePtr &primitive, const std::vectorname(); if (!input_args[0]->isa() && !input_args[0]->isa()) { - MS_EXCEPTION(TypeError) << "For '" << prim_name << "', the input must be a list or tuple of tensors. But got:" - << input_args[0]->ToString() << "."; + MS_EXCEPTION(TypeError) << "For '" << prim_name + << "', the input must be a list or tuple of tensors. But got: " << input_args[0]->ToString() + << "."; } auto elements = input_args[0]->isa() ? input_args[0]->cast()->elements() diff --git a/mindspore/core/utils/log_adapter.h b/mindspore/core/utils/log_adapter.h index d67c79b48e5..991553d6da0 100644 --- a/mindspore/core/utils/log_adapter.h +++ b/mindspore/core/utils/log_adapter.h @@ -27,6 +27,7 @@ #include #include #include "utils/macros.h" +#include "utils/os.h" #include "utils/overload.h" #include "./securec.h" #ifdef USE_GLOG diff --git a/mindspore/core/utils/macros.h b/mindspore/core/utils/macros.h index 5841018c4d1..ecef0524293 100644 --- a/mindspore/core/utils/macros.h +++ b/mindspore/core/utils/macros.h @@ -38,13 +38,13 @@ #endif #ifdef _MSC_VER -#define NO_RETURN __declspec(noreturn) +#define NO_RETURN #else #define NO_RETURN __attribute__((noreturn)) #endif #ifdef _MSC_VER -#define ALWAYS_INLINE __declspec(__forceinline) +#define ALWAYS_INLINE #else #define ALWAYS_INLINE __attribute__((__always_inline__)) #endif diff --git a/mindspore/core/utils/system/file_system.h b/mindspore/core/utils/system/file_system.h index 516b8176fa8..d5280c8ab68 100644 --- a/mindspore/core/utils/system/file_system.h +++ b/mindspore/core/utils/system/file_system.h @@ -17,7 +17,6 @@ #ifndef MINDSPORE_CORE_UTILS_SYSTEM_FILE_SYSTEM_H_ #define MINDSPORE_CORE_UTILS_SYSTEM_FILE_SYSTEM_H_ -#include #include #include #include @@ -28,6 +27,7 @@ #include #include "utils/system/base.h" #include "utils/log_adapter.h" +#include "utils/os.h" #include "include/common/debug/common.h" namespace mindspore {