!31298 build libmindspore_backend.so

Merge pull request !31298 from zhoufeng/backend-frontend-decoupling
This commit is contained in:
i-robot 2022-03-18 09:41:51 +00:00 committed by Gitee
commit dc5f77f585
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
353 changed files with 1456 additions and 1107 deletions

View File

@ -90,7 +90,7 @@ install(
)
install(
TARGETS mindspore_core mindspore_common
TARGETS mindspore_core mindspore_common mindspore_backend
DESTINATION ${INSTALL_LIB_DIR}
COMPONENT mindspore
)

View File

@ -67,7 +67,7 @@ install(
)
install(
TARGETS mindspore_core mindspore_common
TARGETS mindspore_core mindspore_common mindspore_backend
DESTINATION ${INSTALL_LIB_DIR}
COMPONENT mindspore
)

View File

@ -27,7 +27,7 @@ install(
)
install(
TARGETS mindspore_core mindspore_common
TARGETS mindspore_core mindspore_common mindspore_backend
DESTINATION ${INSTALL_LIB_DIR}
COMPONENT mindspore
)

View File

@ -74,7 +74,7 @@ install(
)
install(
TARGETS mindspore_core mindspore_common
TARGETS mindspore_core mindspore_common mindspore_backend
DESTINATION ${INSTALL_LIB_DIR}
COMPONENT mindspore
)

View File

@ -244,35 +244,7 @@ set(SUB_COMP
frontend/operator
pipeline/jit
pipeline/pynative
debug pybind_api
profiler ps fl distributed
kernel
common/mem_reuse
backend/common/optimizer
backend/common/pass
backend/common/session
backend/common/somas
common/graph_kernel
backend/graph_compiler
runtime/collective
runtime/device
runtime/graph_scheduler
runtime/hardware
runtime/recovery
runtime/pynative
plugin/device/ascend/hal/device
plugin/device/ascend/hal/hardware
plugin/device/ascend/hal/hccl_adapter
plugin/device/ascend/kernel
plugin/device/ascend/optimizer
plugin/device/cpu/hal/device
plugin/device/cpu/hal/hardware
plugin/device/cpu/kernel
plugin/device/cpu/optimizer
plugin/device/gpu/hal/device
plugin/device/gpu/hal/hardware
plugin/device/gpu/kernel
plugin/device/gpu/optimizer
pybind_api
)
foreach(_comp ${SUB_COMP})
@ -287,10 +259,12 @@ endforeach()
set_property(SOURCE ${SUB_OBJECTS_SRC} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_ME)
add_library(mindspore STATIC ${SUB_OBJECTS_SRC})
add_subdirectory(debug)
set(COMMON_SUB_COMP
transform/graph_ir
utils
common
common/debug
)
foreach(_comp ${COMMON_SUB_COMP})
@ -327,6 +301,90 @@ if(NOT WIN32)
target_link_libraries(mindspore PUBLIC dl)
endif()
set(BACKEND_SUB_COMP
profiler
ps
fl
distributed
kernel
common/mem_reuse
backend/common/optimizer
backend/common/pass
backend/common/session
backend/common/somas
common/graph_kernel
backend/graph_compiler
runtime/collective
runtime/device
runtime/graph_scheduler
runtime/hardware
runtime/pynative
runtime/recovery
plugin/device/ascend/hal/device
plugin/device/ascend/hal/hardware
plugin/device/ascend/hal/hccl_adapter
plugin/device/ascend/kernel
plugin/device/ascend/optimizer
plugin/device/cpu/hal/device
plugin/device/cpu/hal/hardware
plugin/device/cpu/kernel
plugin/device/cpu/optimizer
plugin/device/gpu/hal/device
plugin/device/gpu/hal/hardware
plugin/device/gpu/kernel
plugin/device/gpu/optimizer
)
foreach(_comp ${BACKEND_SUB_COMP})
add_subdirectory(${_comp})
string(REPLACE "/" "_" sub ${_comp})
if(TARGET _mindspore_${sub}_obj)
list(APPEND BACKEND_SUB_OBJECTS_SRC $<TARGET_OBJECTS:_mindspore_${sub}_obj>)
add_dependencies(_mindspore_${sub}_obj proto_input)
if(CMAKE_SYSTEM_NAME MATCHES "Windows")
target_compile_definitions(_mindspore_${sub}_obj PRIVATE BACKEND_DLL)
endif()
endif()
endforeach()
set_property(SOURCE ${BACKEND_SUB_OBJECTS_SRC} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_ME)
add_library(mindspore_backend SHARED ${BACKEND_SUB_OBJECTS_SRC})
if(MODE_ASCEND_ACL)
add_library(mindspore_backend_static STATIC ${BACKEND_SUB_OBJECTS_SRC})
endif()
if(CMAKE_SYSTEM_NAME MATCHES "Windows")
target_link_libraries(mindspore_backend PRIVATE mindspore::pybind11_module mindspore_debug)
else()
target_link_libraries(mindspore_backend PRIVATE mindspore_debug)
endif()
target_link_libraries(mindspore_backend PRIVATE mindspore_core mindspore_common proto_input mindspore::protobuf)
target_link_libraries(mindspore_backend PRIVATE securec)
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
set_target_properties(mindspore_backend PROPERTIES MACOSX_RPATH ON)
set_target_properties(mindspore_backend PROPERTIES INSTALL_RPATH @loader_path)
else()
set_target_properties(mindspore_backend PROPERTIES INSTALL_RPATH $ORIGIN)
endif()
if(ENABLE_CPU AND NOT WIN32)
target_link_libraries(mindspore_backend PRIVATE ps_cache)
endif()
if(ENABLE_CPU)
target_link_libraries(mindspore_backend PRIVATE mindspore::dnnl mindspore::mkldnn nnacl)
endif()
if(ENABLE_GPU)
message("add gpu lib to mindspore_backend")
target_link_libraries(mindspore_backend PRIVATE gpu_cuda_lib gpu_queue cublas cuda_ops
${CUDA_PATH}/lib64/libcurand.so
${CUDNN_LIBRARY_PATH}
${CUDA_PATH}/lib64/libcudart.so
${CUDA_PATH}/lib64/stubs/libcuda.so
${CUDA_PATH}/lib64/libcusolver.so
${CUDA_PATH}/lib64/libcufft.so)
if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
target_link_libraries(mindspore_backend PRIVATE mindspore::crypto mindspore::ssl)
endif()
endif()
if(MODE_ASCEND_ALL OR MODE_ASCEND_ACL)
# common env paths
if(DEFINED ENV{D_LINK_PATH})
@ -386,7 +444,7 @@ if(MODE_ASCEND_ALL)
find_library(OPT_FEATURE opt_feature ${ASCEND_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH}
${ASCEND_RUNTIME_NEW_PATH} ${ASCEND_TOOLKIT_RUNTIME_NEW_PATH})
target_link_libraries(mindspore PUBLIC ${RUNTIME_LIB} ${TSDCLIENT} ${DATATRANSFER} ${ERROR_MANAGER}
target_link_libraries(mindspore_backend PUBLIC ${RUNTIME_LIB} ${TSDCLIENT} ${DATATRANSFER} ${ERROR_MANAGER}
-Wl,--no-as-needed ${OPTILING} ${PLATFORM} ${ACL} ${ACL_TDT_CHANNEL} ${OPT_FEATURE} ${PROFILING})
target_link_libraries(mindspore PUBLIC -Wl,--start-group proto_input mindspore::protobuf -Wl,--end-group)
elseif(CMAKE_SYSTEM_NAME MATCHES "Windows")
@ -437,13 +495,13 @@ set_target_properties(_c_expression PROPERTIES INSTALL_RPATH ${MINDSPORE_RPATH})
if(CMAKE_SYSTEM_NAME MATCHES "Windows")
target_link_libraries(mindspore PUBLIC mindspore::pybind11_module)
target_link_libraries(_c_expression PRIVATE -Wl,--whole-archive mindspore -Wl,--no-whole-archive mindspore_core
mindspore_common)
mindspore_common mindspore_backend)
elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin")
target_link_libraries(mindspore PUBLIC proto_input mindspore::protobuf
mindspore::event mindspore::event_pthreads mindspore::event_openssl mindspore::eigen mindspore::json)
target_link_libraries(mindspore PUBLIC mindspore::event_core ps_cache)
target_link_libraries(_c_expression PRIVATE -Wl,-all_load mindspore proto_input -Wl,-noall_load mindspore_core
mindspore_common)
mindspore_common mindspore_backend)
target_link_libraries(_c_expression PRIVATE mindspore::pybind11_module)
else()
if(ENABLE_CPU AND NOT WIN32)
@ -455,7 +513,7 @@ else()
endif()
endif()
target_link_libraries(_c_expression PRIVATE -Wl,--whole-archive mindspore proto_input -Wl,--no-whole-archive
mindspore_core mindspore_common)
mindspore_core mindspore_common mindspore_backend)
target_link_libraries(_c_expression PRIVATE mindspore::pybind11_module)
endif()
@ -468,14 +526,6 @@ if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
endif()
if(ENABLE_GPU)
message("add gpu lib to c_expression")
target_link_libraries(_c_expression PRIVATE gpu_cuda_lib gpu_queue cublas cuda_ops
${CUDA_PATH}/lib64/libcurand.so
${CUDNN_LIBRARY_PATH}
${CUDA_PATH}/lib64/libcudart.so
${CUDA_PATH}/lib64/stubs/libcuda.so
${CUDA_PATH}/lib64/libcusolver.so
${CUDA_PATH}/lib64/libcufft.so)
if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
target_link_libraries(_c_expression PRIVATE mindspore::crypto mindspore::ssl)
endif()
@ -514,4 +564,3 @@ if(ENABLE_D)
endif()
add_subdirectory(cxx_api)
# include(${CMAKE_CURRENT_SOURCE_DIR}/sharedlib.cmake)

View File

@ -35,7 +35,7 @@
#include "backend/common/pass/sparse_process.h"
#include "backend/common/pass/insert_assign_for_custom_op.h"
#include "utils/ms_context.h"
#include "debug/anf_ir_dump.h"
#include "include/common/debug/anf_ir_dump.h"
namespace mindspore {
namespace opt {

View File

@ -21,6 +21,7 @@
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "utils/ms_utils.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace opt {
@ -47,7 +48,7 @@ class ConstInputToAttrInfoRegister {
mindspore::HashSet<size_t> input_attr_set_;
};
class ConstInputToAttrInfoRegistry {
class BACKEND_EXPORT ConstInputToAttrInfoRegistry {
public:
static ConstInputToAttrInfoRegistry &Instance();
void Register(const ConstInputToAttrInfoRegister &reg);

View File

@ -26,6 +26,7 @@
#include "backend/common/session/kernel_graph.h"
#include "utils/ms_utils.h"
#include "backend/common/optimizer/pattern_engine.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace opt {
@ -170,7 +171,7 @@ void CreateMultipleOutputsOfAnfNode(const FuncGraphPtr &kernel_graph, const AnfN
tensor::TensorPtr CreateTensorWithValueTuple(const ValueTuplePtr &value_tuple_ptr, const TypePtr &type_ptr,
size_t data_length);
tensor::TensorPtr CreateTupleTensor(const ValueTuplePtr &value_tuple);
BACKEND_EXPORT tensor::TensorPtr CreateTupleTensor(const ValueTuplePtr &value_tuple);
bool IsAllNopNode(const session::KernelGraph *const graph);
@ -238,7 +239,7 @@ std::vector<int64_t> GetNodeOutputUsedNum(const session::KernelGraph &kernel_gra
int64_t GetNodeOutputTotalUsedNum(const session::KernelGraph &kernel_graph, const AnfNodePtr &node);
// Get custom operator attr input indexes
void GetCustomOpAttrIndex(const PrimitivePtr &primitive, mindspore::HashSet<size_t> *indexes);
BACKEND_EXPORT void GetCustomOpAttrIndex(const PrimitivePtr &primitive, mindspore::HashSet<size_t> *indexes);
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_COMMON_HELPER_H_

View File

@ -29,12 +29,13 @@
#include "ir/graph_utils.h"
#include "utils/ms_utils.h"
#include "backend/common/optimizer/helper.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace opt {
using PatternListType = std::initializer_list<BaseRef>;
class PatternProcessPass : public NodePass {
class BACKEND_EXPORT PatternProcessPass : public NodePass {
public:
explicit PatternProcessPass(const std::string &name = "", bool multigraph = true);
~PatternProcessPass() override = default;
@ -77,7 +78,7 @@ class MultipleOutputPatternProcessPass : public PatternProcessPass {
EquivPtr child_equiv_;
};
class GraphOptimizer {
class BACKEND_EXPORT GraphOptimizer {
public:
explicit GraphOptimizer(const std::string &name = "graph_optimizer") : name_(name) {}
virtual ~GraphOptimizer() = default;

View File

@ -21,7 +21,7 @@
#include "ir/anf.h"
#include "ir/manager.h"
#include "utils/ms_context.h"
#include "debug/anf_ir_dump.h"
#include "include/common/debug/anf_ir_dump.h"
#include "include/common/utils/anfalgo.h"
namespace mindspore {

View File

@ -24,6 +24,7 @@
#include "backend/common/optimizer/pass.h"
#include "backend/common/optimizer/node_pass.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace opt {
@ -41,7 +42,7 @@ class CacheManager {
};
// @brief For optimization passes management
class PassManager {
class BACKEND_EXPORT PassManager {
public:
explicit PassManager(const std::string &name = "pm", bool run_only_once = true)
: name_(name), passes_{}, run_only_once_(run_only_once), cache_manager_(std::make_shared<CacheManager>()) {}

View File

@ -17,7 +17,7 @@
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_PASS_COMMON_SUBEXPRESSION_ELIMINATION_H_
#include <string>
#include "backend/common/optimizer/pass.h"
#include "frontend/optimizer/cse.h"
#include "include/common/utils/cse.h"
namespace mindspore {
namespace opt {

View File

@ -6,6 +6,7 @@ file(GLOB_RECURSE _SESSION_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
"executor.cc"
"executor_manager.cc"
"anf_runtime_algorithm.cc"
"debug_register.cc"
"single_kernel_graph.cc"
)

View File

@ -26,6 +26,7 @@
#include "include/common/utils/utils.h"
#include "include/common/utils/parallel_context.h"
#include "include/common/utils/anfalgo.h"
#include "include/common/debug/anf_dump_utils.h"
#include "runtime/device/kernel_info.h"
#include "runtime/device/device_address.h"
#include "backend/common/optimizer/helper.h"
@ -81,6 +82,41 @@ static std::map<std::string, std::pair<std::map<size_t, size_t>, std::map<size_t
{{0, 0}, {1, 1}, {2, 2}, {3, 3}, {5, 4}, {6, 5}, {7, 6}, {8, 7}, {4, 8}}}},
{prim::kPrimStridedSliceGrad->name(),
{{{0, 1}, {1, 2}, {2, 3}, {3, 4}, {4, 0}}, {{1, 0}, {2, 1}, {3, 2}, {4, 3}, {0, 4}}}}};
std::string PrintKernelFormatAndType(const std::string &fmt, const TypeId &type, const std::vector<size_t> &shape) {
std::ostringstream buffer;
buffer << "<" << TypeToShortString(type);
if (!fmt.empty()) {
buffer << "x" << fmt << shape;
}
buffer << ">";
return buffer.str();
}
struct AnfDumpHandlerRegister {
AnfDumpHandlerRegister() {
AnfDumpHandler::SetPrintInputTypeShapeFormatHandler(
[](const std::shared_ptr<AnfNode> &node, size_t idx) -> std::string {
if (node == nullptr) {
return "";
}
auto format = AnfAlgo::GetInputFormat(node, idx);
auto type = AnfAlgo::GetInputDeviceDataType(node, idx);
auto shape = AnfAlgo::GetInputDeviceShape(node, idx);
return PrintKernelFormatAndType(format, type, shape);
});
AnfDumpHandler::SetPrintOutputTypeShapeFormatHandler(
[](const std::shared_ptr<AnfNode> &node, size_t idx) -> std::string {
if (node == nullptr) {
return "";
}
auto format = AnfAlgo::GetOutputFormat(node, idx);
auto type = AnfAlgo::GetOutputDeviceDataType(node, idx);
auto shape = AnfAlgo::GetOutputDeviceShape(node, idx);
return PrintKernelFormatAndType(format, type, shape);
});
}
} callback_register;
} // namespace
AnfNodePtr AnfRuntimeAlgorithm::MakeMonadValueNode(const KernelGraphPtr &kg) {

View File

@ -37,6 +37,7 @@
#include "include/common/utils/contract.h"
#include "utils/anf_utils.h"
#include "backend/common/session/kernel_graph.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace session {
@ -45,7 +46,7 @@ using DeviceAddressPtr = device::DeviceAddressPtr;
using Address = kernel::Address;
using AddressPtr = kernel::AddressPtr;
class AnfRuntimeAlgorithm {
class BACKEND_EXPORT AnfRuntimeAlgorithm {
public:
static AnfNodePtr MakeMonadValueNode(const KernelGraphPtr &kg);
static void KeepOrder(const KernelGraphPtr &kg, const AnfNodePtr &former, const AnfNodePtr &latter);

View File

@ -28,7 +28,7 @@
#include "utils/ms_context.h"
#include "utils/ordered_map.h"
#include "base/core_ops.h"
#include "debug/anf_ir_dump.h"
#include "include/common/debug/anf_ir_dump.h"
#include "pipeline/jit/base.h"
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"

View File

@ -51,11 +51,10 @@
#include "debug/data_dump/e2e_dump.h"
#include "debug/debugger/debugger_utils.h"
#endif
#include "debug/anf_ir_utils.h"
#include "common/graph_kernel/adapter/graph_kernel_optimization.h"
#include "backend/common/session/ascend_auto_monad.h"
#include "debug/anf_ir_dump.h"
#include "debug/dump_proto.h"
#include "include/common/debug/anf_ir_dump.h"
#include "include/common/debug/dump_proto.h"
#include "abstract/utils.h"
#ifdef ENABLE_DEBUGGER
#include "debug/tensor_load.h"
@ -67,8 +66,7 @@
#include "toolchain/adx_datadump_callback.h"
#include "toolchain/adx_datadump_server.h"
#ifdef ENABLE_DUMP_IR
#include "debug/rdr/running_data_recorder.h"
#include "debug/rdr/recorder_manager.h"
#include "include/common/debug/rdr/recorder_manager.h"
#include "debug/rdr/graph_recorder.h"
#endif
#if ENABLE_CPU && ENABLE_D

View File

@ -36,9 +36,9 @@
#include "common/graph_kernel/adapter/graph_kernel_optimization.h"
#include "backend/common/pass/replace_node_by_proxy.h"
#include "backend/common/pass/erase_visit_attr.h"
#include "debug/anf_ir_dump.h"
#include "include/common/debug/anf_ir_dump.h"
#include "backend/common/optimizer/common_backend_optimization.h"
#include "debug/dump_proto.h"
#include "include/common/debug/dump_proto.h"
#ifndef ENABLE_SECURITY
#include "debug/data_dump/dump_json_parser.h"
#endif
@ -48,7 +48,6 @@
#endif
#ifdef ENABLE_DUMP_IR
#include "debug/rdr/graph_recorder.h"
#include "debug/rdr/running_data_recorder.h"
#endif
namespace mindspore {

View File

@ -33,6 +33,7 @@
#include "utils/any.h"
#include "include/common/utils/comm_manager.h"
#include "include/common/utils/contract.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace session {
@ -152,7 +153,7 @@ class ExitTask : public Task {
enum class ExecutorEvent { kClear, kRunGraphFinished, kException };
class Executor {
class BACKEND_EXPORT Executor {
public:
Executor(const std::string &device_name, uint32_t device_id);
~Executor();

View File

@ -17,6 +17,11 @@
#include "include/common/thread_pool.h"
namespace mindspore {
namespace session {
ExecutorManager &ExecutorManager::Instance() {
static ExecutorManager instance{};
return instance;
}
std::shared_ptr<Executor> ExecutorManager::GetExecutor(const std::string &device_name, uint32_t device_id) {
std::string device_key = device_name + "_" + std::to_string(device_id);
auto iter = executors_.find(device_key);

View File

@ -20,15 +20,14 @@
#include <string>
#include <memory>
#include "backend/common/session/executor.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace session {
class Executor;
class ExecutorManager {
class BACKEND_EXPORT ExecutorManager {
public:
static ExecutorManager &Instance() {
static ExecutorManager instance{};
return instance;
}
static ExecutorManager &Instance();
std::shared_ptr<Executor> GetExecutor(const std::string &device_name, uint32_t device_id);
void OnEvent(const ExecutorEvent &event);
void Clear();

View File

@ -59,8 +59,8 @@
#include "backend/common/pass/optimize_updatestate.h"
#include "backend/common/pass/adjust_depend_for_parallel_optimizer_recompute_all_gather.h"
#include "runtime/device/ms_device_shape_transfer.h"
#include "debug/anf_ir_dump.h"
#include "debug/dump_proto.h"
#include "include/common/debug/anf_ir_dump.h"
#include "include/common/debug/dump_proto.h"
#ifdef ENABLE_DEBUGGER
#include "debug/data_dump/e2e_dump.h"
#include "debug/data_dump/dump_json_parser.h"
@ -89,9 +89,6 @@
#include "ps/util.h"
#include "ps/ps_cache/ps_cache_manager.h"
#endif
#ifdef ENABLE_DUMP_IR
#include "debug/rdr/running_data_recorder.h"
#endif
namespace mindspore {
namespace session {

View File

@ -32,6 +32,7 @@
#include "ir/graph_utils.h"
#include "include/common/utils/contract.h"
#include "runtime/device/kernel_info.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace session {
@ -52,7 +53,7 @@ struct KernelWithIndexCmp {
using DeviceAddressType = device::DeviceAddressType;
using KernelMapTensor = std::map<session::KernelWithIndex, BaseRef, session::KernelWithIndexCmp>;
class KernelGraph : public FuncGraph {
class BACKEND_EXPORT KernelGraph : public FuncGraph {
public:
KernelGraph() : graph_id_(0), start_label_(nullptr), end_goto_(nullptr), current_epoch_(0), is_dynamic_shape_(false) {
inputs_ = std::make_shared<std::vector<AnfNodePtr>>();

View File

@ -41,8 +41,8 @@
#include "ir/anf.h"
#include "ir/func_graph_cloner.h"
#include "include/common/utils/utils.h"
#include "debug/anf_ir_dump.h"
#include "debug/dump_proto.h"
#include "include/common/debug/anf_ir_dump.h"
#include "include/common/debug/dump_proto.h"
#include "utils/file_utils.h"
#include "utils/trace_base.h"
#include "include/common/utils/parallel_context.h"
@ -63,8 +63,8 @@
#include "debug/debugger/proto_exporter_stub.h"
#endif
#ifdef ENABLE_DUMP_IR
#include "debug/rdr/running_data_recorder.h"
#include "debug/rdr/recorder_manager.h"
#include "debug/rdr/graph_exec_order_recorder.h"
#include "include/common/debug/rdr/recorder_manager.h"
#include "debug/rdr/graph_recorder.h"
#include "runtime/hardware/device_context_manager.h"
#endif

View File

@ -39,6 +39,7 @@
#endif
#include "runtime/hardware/device_context.h"
#include "backend/common/session/pynative_task_manager.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace runtime {
@ -96,7 +97,7 @@ struct GraphOutputInfo {
class Executor;
class SessionBasic : public std::enable_shared_from_this<SessionBasic> {
class BACKEND_EXPORT SessionBasic : public std::enable_shared_from_this<SessionBasic> {
public:
SessionBasic() : context_(nullptr), summary_callback_(nullptr), device_id_(0) {
#if defined(ENABLE_DEBUGGER) && !defined(_WIN32) && !defined(_WIN64)
@ -366,8 +367,8 @@ class SessionBasic : public std::enable_shared_from_this<SessionBasic> {
using SessionPtr = std::shared_ptr<session::SessionBasic>;
using NamedSummaryOutputs = std::map<std::string, std::pair<AnfNodePtr, int>>;
} // namespace session
void DumpGraphExeOrder(const std::string &file_name, const std::string &target_dir,
const std::vector<CNodePtr> &execution_order);
BACKEND_EXPORT void DumpGraphExeOrder(const std::string &file_name, const std::string &target_dir,
const std::vector<CNodePtr> &execution_order);
uint32_t GetRankId();
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_SESSION_SESSION_BASIC_H

View File

@ -23,10 +23,12 @@
#include <utility>
#include "utils/ms_utils.h"
#include "backend/common/session/session_basic.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace session {
using SessionCreator = std::function<std::shared_ptr<SessionBasic>()>;
class SessionFactory {
class BACKEND_EXPORT SessionFactory {
public:
static SessionFactory &Get();
void Register(const std::string &device_name, SessionCreator &&session_creator);

View File

@ -31,9 +31,9 @@
#endif
#include "backend/common/optimizer/helper.h"
#include "utils/ms_context.h"
#include "debug/common.h"
#include "include/common/debug/common.h"
#ifdef ENABLE_DUMP_IR
#include "debug/rdr/running_data_recorder.h"
#include "debug/rdr/string_recorder.h"
#endif
#include "include/common/thread_pool.h"
#ifndef ENABLE_SECURITY

View File

@ -23,7 +23,7 @@
#include "backend/common/somas/somas_solver_core.h"
#include "backend/common/somas/somas_solver_pre.h"
#include "debug/common.h"
#include "include/common/debug/common.h"
namespace mindspore {
namespace somas {

View File

@ -734,9 +734,7 @@ void RunControlOperator(const std::shared_ptr<GraphCompiler> &graph_compiler, co
VectorRef args;
GetControlOpInput(graph_compiler, cnode, kernel, op_output_map, parameter_index, graph_inputs, input_tensor_info,
&args);
auto py_prim = prim->cast<PrimitivePyPtr>();
MS_EXCEPTION_IF_NULL(py_prim);
BaseRef out = py_prim->RunHookFunction(args);
BaseRef out = python_adapter::PyAdapterCallback::RunPrimitivePyHookFunction(prim, args);
// Convert pyobject output to tensor.
if (utils::isa<PyObjectRef>(out)) {
PyObjectRef py_ref = utils::cast<PyObjectRef>(out);

View File

@ -34,6 +34,7 @@
#include "runtime/hardware/device_context.h"
#include "runtime/graph_scheduler/graph_scheduler.h"
#include "runtime/pynative/op_task.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace compile {
@ -53,7 +54,7 @@ enum SwitchCondStatus {
kCondAlreadyRun,
};
class Backend {
class BACKEND_EXPORT Backend {
public:
explicit Backend(const std::string &name);
@ -75,7 +76,7 @@ class Backend {
bool is_multi_graph_sink_;
};
class MsBackend : public Backend {
class BACKEND_EXPORT MsBackend : public Backend {
public:
MsBackend(const std::string &name, const std::string &target, uint32_t device_id);
~MsBackend() override = default;
@ -101,7 +102,7 @@ class MsBackend : public Backend {
mindspore::HashMap<GraphId, LinConvertResult> graph_id_map_;
};
class MindRTBackend : public Backend {
class BACKEND_EXPORT MindRTBackend : public Backend {
public:
MindRTBackend(const std::string &backend_name, const std::string &device_name, uint32_t device_id);
~MindRTBackend() override = default;

View File

@ -23,11 +23,12 @@
#include "ir/func_graph.h"
#include "ir/graph_utils.h"
#include "base/base_ref.h"
#include "include/backend/visible.h"
namespace mindspore {
extern const char kMsVm[];
extern const char kGeVm[];
extern const char kMsConvert[];
BACKEND_EXPORT extern const char kMsVm[];
BACKEND_EXPORT extern const char kGeVm[];
BACKEND_EXPORT extern const char kMsConvert[];
namespace compile {
class GraphPartition {

View File

@ -25,13 +25,13 @@
#include <vector>
#include "abstract/abstract_value.h"
#include "abstract/abstract_function.h"
#ifdef ENABLE_D
#include "include/transform/graph_ir/convert.h"
#endif
#include "ir/graph_utils.h"
#include "utils/ms_context.h"
#include "debug/trace.h"
#include "debug/anf_ir_dump.h"
#include "utils/trace_base.h"
#if ((defined ENABLE_CPU) && (!defined _WIN32))
#include "ps/ps_context.h"
#endif

View File

@ -32,6 +32,7 @@
#include "backend/graph_compiler/segment_runner.h"
#include "backend/graph_compiler/backend.h"
#include "backend/graph_compiler/graph_partition.h"
#include "include/backend/visible.h"
// mindspore namespace is the top level namespace of MindSpore project.
// Other namespace should be a sub namespace of mindspore namespace in the ME project.
@ -42,14 +43,14 @@ extern const char kGeVm[];
// compile namespace
// A sub namespace in ME to support compile related definition.
namespace compile {
extern std::vector<PrimitivePtr> nonlinear_ops;
extern std::vector<PrimitivePtr> control_ops;
const std::vector<PrimitivePtr> &GetMsNonlinearOps();
BACKEND_EXPORT extern std::vector<PrimitivePtr> nonlinear_ops;
BACKEND_EXPORT extern std::vector<PrimitivePtr> control_ops;
BACKEND_EXPORT const std::vector<PrimitivePtr> &GetMsNonlinearOps();
FuncGraphPtr WrapPrimitives(const FuncGraphPtr &graph);
using VmEvalFunc = std::function<BaseRef(const VectorRef &)>;
using VmEvalFuncPtr = std::shared_ptr<std::function<BaseRef(const VectorRef &)>>;
class CompileGraph {
class BACKEND_EXPORT CompileGraph {
public:
explicit CompileGraph(const BackendPtr &backend, const std::vector<PrimitivePtr> &cut_list = nonlinear_ops);
@ -110,7 +111,7 @@ class CompileGraph {
using CompileGraphPtr = std::shared_ptr<CompileGraph>;
// CompileGraphs is used to Convert a graph cluster into instruction lists.
class CompileGraphs {
class BACKEND_EXPORT CompileGraphs {
public:
explicit CompileGraphs(const BackendPtr &backend, const std::vector<PrimitivePtr> &cut_list = nonlinear_ops);
@ -132,10 +133,10 @@ class CompileGraphs {
BackendPtr backend_;
};
BackendPtr CreateBackend();
BACKEND_EXPORT BackendPtr CreateBackend();
// Set mindRT whether enable. GPU and CPU use mindRT currently, and other hardwares will use it in the future.
void SetMindRTEnable();
BACKEND_EXPORT void SetMindRTEnable();
} // namespace compile
} // namespace mindspore

View File

@ -108,7 +108,7 @@ BaseRef FinalVM::Ref(int64_t i) {
if (utils::isa<PyObjectRef>(insts_stack_[sp_next])) {
py::object value = utils::cast<PyObjectRef>(insts_stack_[sp_next]).object_;
MS_LOG(DEBUG) << "VM ref python:" << py::str(value);
return parse::data_converter::PyDataToValue(value);
return python_adapter::PyAdapterCallback::PyDataToValue(value);
}
MS_LOG(DEBUG) << "Ref not python :" << insts_stack_[sp_next].ToString();
return insts_stack_[sp_next];
@ -471,9 +471,7 @@ void FinalVM::InstPushPrim(const VectorRef &args) {
}
if (prim->name() == kBpropCutOpName) {
auto py_prim = prim->cast<PrimitivePyPtr>();
MS_EXCEPTION_IF_NULL(py_prim);
auto outs = py_prim->RunHookFunction(tuple);
BaseRef outs = python_adapter::PyAdapterCallback::RunPrimitivePyHookFunction(prim, tuple);
Push(outs);
} else {
auto outs = RunOperation(prim, tuple);

View File

@ -33,6 +33,7 @@
#include "ir/anf.h"
#include "base/base_ref.h"
#include "include/backend/visible.h"
namespace py = pybind11;
@ -96,7 +97,7 @@ class StructSimuSwitch : public Base {
std::ostream &operator<<(std::ostream &os, const StructSimuSwitch &other);
bool operator==(const StructSimuSwitch &lhs, const StructSimuSwitch &rhs);
class FinalVM {
class BACKEND_EXPORT FinalVM {
public:
// Create a VM with the specified instructions and backend.
explicit FinalVM(const InstSet &insts, const BackendPtr &backend);

View File

@ -2,11 +2,13 @@ if(CMAKE_SYSTEM_NAME MATCHES "Windows")
file(GLOB_RECURSE _COMMON_ALL_SRC_FILES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
"duplex_pipe_win.cc"
"thread_pool.cc"
"api_register.cc"
)
else()
file(GLOB_RECURSE _COMMON_ALL_SRC_FILES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
"duplex_pipe.cc"
"thread_pool.cc"
"api_register.cc"
)
endif()

View File

@ -14,12 +14,12 @@
* limitations under the License.
*/
#include "pybind_api/api_register.h"
#include "include/common/pybind_api/api_register.h"
namespace mindspore {
PybindDefineRegister &PybindDefineRegister::GetSingleton() {
static PybindDefineRegister instance;
static PybindDefineRegister instance{};
return instance;
}

View File

@ -0,0 +1,4 @@
file(GLOB_RECURSE _COMMON_DEBUG_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.cc")
set_property(SOURCE ${_COMMON_DEBUG_SRC_LIST} PROPERTY COMPILE_DEFINITIONS
SUBMODULE_ID=mindspore::SubModuleId::SM_DEBUG)
add_library(_mindspore_common_debug_obj OBJECT ${_COMMON_DEBUG_SRC_LIST})

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "debug/anf_dump_utils.h"
#include "include/common/debug/anf_dump_utils.h"
#include "abstract/abstract_function.h"
namespace mindspore {
@ -72,4 +72,32 @@ std::string GetNodeFuncStr(const AnfNodePtr &nd) {
}
return GetAbstractFuncStr(abs_func);
}
std::string TypeToShortString(const TypeId &typeId) {
std::string label = TypeIdLabel(typeId);
std::string prefix = "kNumberType";
if (prefix.length() > label.length()) {
return label;
}
auto position = label.find(prefix);
// Position is 0 when label begins with prefix
if (position != 0) {
return label;
}
auto sub_position = position + prefix.length();
if (sub_position >= label.length()) {
return label;
}
return label.substr(sub_position);
}
std::string GetKernelNodeName(const AnfNodePtr &anf_node) {
MS_EXCEPTION_IF_NULL(anf_node);
std::string kernel_name = anf_node->fullname_with_scope();
if (kernel_name.empty()) {
kernel_name = anf_node->ToString();
}
MS_LOG(DEBUG) << "Full scope kernel name is " << kernel_name << ".";
return kernel_name;
}
} // namespace mindspore

View File

@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "debug/anf_ir_dump.h"
#include "include/common/debug/anf_ir_dump.h"
#if defined(_WIN32) || defined(_WIN64)
#include <stdlib.h>
#endif
@ -23,45 +23,13 @@
#include "utils/hash_map.h"
#include "ir/primitive.h"
#include "ir/func_graph.h"
#include "runtime/device/kernel_info.h"
#include "ir/graph_utils.h"
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"
#include "frontend/parallel/ops_info/operator_info.h"
#include "pipeline/jit/base.h"
#include "debug/trace.h"
#include "debug/anf_dump_utils.h"
#include "utils/trace_base.h"
#include "utils/anf_utils.h"
#include "include/common/utils/anfalgo.h"
#include "include/common/debug/anf_dump_utils.h"
namespace mindspore {
const std::string ToShortString(const TypeId &typeId) {
std::string label = TypeIdLabel(typeId);
std::string prefix = "kNumberType";
if (prefix.length() > label.length()) {
return label;
}
auto position = label.find(prefix);
// Position is 0 when label begins with prefix
if (position != 0) {
return label;
}
auto sub_position = position + prefix.length();
if (sub_position >= label.length()) {
return label;
}
return label.substr(sub_position);
}
void PrintKernelFormatAndType(std::ostringstream &buffer, const std::string &fmt, const TypeId &type,
const std::vector<size_t> &shape) {
buffer << "<" << ToShortString(type);
if (!fmt.empty()) {
buffer << "x" << fmt << shape;
}
buffer << ">";
}
void PrintTupleNodeUsedFlags(std::ostringstream &buffer, const abstract::AbstractSequencePtr &sequence_abs) {
if (sequence_abs == nullptr || sequence_abs->sequence_nodes() == nullptr || sequence_abs->sequence_nodes()->empty()) {
return;
@ -190,10 +158,7 @@ void DumpKernelInfo(const CNodePtr &node, const std::shared_ptr<SubGraphIRInfo>
if (i != 0) {
gsub->buffer << ", ";
}
auto format = AnfAlgo::GetInputFormat(node, i);
auto type = AnfAlgo::GetInputDeviceDataType(node, i);
auto shape = AnfAlgo::GetInputDeviceShape(node, i);
PrintKernelFormatAndType(gsub->buffer, format, type, shape);
gsub->buffer << AnfDumpHandler::PrintInputTypeShapeFormat(node, i);
}
gsub->buffer << ") -> (";
size_t output_num = common::AnfAlgo::GetOutputTensorNum(node);
@ -201,10 +166,7 @@ void DumpKernelInfo(const CNodePtr &node, const std::shared_ptr<SubGraphIRInfo>
if (i != 0) {
gsub->buffer << ", ";
}
auto format = AnfAlgo::GetOutputFormat(node, i);
auto type = AnfAlgo::GetOutputDeviceDataType(node, i);
auto shape = AnfAlgo::GetOutputDeviceShape(node, i);
PrintKernelFormatAndType(gsub->buffer, format, type, shape);
gsub->buffer << AnfDumpHandler::PrintOutputTypeShapeFormat(node, i);
}
gsub->buffer << ")";
gsub->buffer << std::endl;
@ -235,10 +197,7 @@ int32_t DumpParams(const FuncGraphPtr &graph, std::ostringstream &buffer, Ordere
auto kernel_info = p->kernel_info();
if (kernel_info != nullptr && kernel_info->has_build_info()) {
buffer << " : ";
auto type = AnfAlgo::GetOutputDeviceDataType(p, 0);
auto format = AnfAlgo::GetOutputFormat(p, 0);
auto shape = AnfAlgo::GetOutputDeviceShape(p, 0);
PrintKernelFormatAndType(buffer, format, type, shape);
buffer << AnfDumpHandler::PrintOutputTypeShapeFormat(p, 0);
buffer << " : IsWeight:" << std::boolalpha << common::AnfAlgo::IsParameterWeight(parameter_ptr);
}
buffer << std::endl;
@ -376,23 +335,15 @@ void DumpParallelInfo(const CNodePtr &node, const std::shared_ptr<SubGraphIRInfo
return;
}
auto operator_info = node->user_data<parallel::OperatorInfo>();
if (operator_info == nullptr) {
ValuePtr in_tmp = AnfDumpHandler::InStrategyValue(node);
if (in_tmp == nullptr) {
return;
}
auto in_strategy = operator_info->strategy();
if (in_strategy == nullptr) {
return;
}
ValuePtr in_tmp = MakeValue(in_strategy->GetInputDim());
gsub->buffer << " {in_strategy: ";
gsub->buffer << in_tmp->ToString();
auto out_strategy = operator_info->out_strategy();
if (out_strategy) {
ValuePtr out_tmp = MakeValue(out_strategy->GetInputDim());
ValuePtr out_tmp = AnfDumpHandler::OutStrategyValue(node);
if (out_tmp != nullptr) {
gsub->buffer << ", out_strategy: ";
gsub->buffer << out_tmp->ToString();
}

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "debug/common.h"
#include "include/common/debug/common.h"
#include <memory>
#include <iomanip>
@ -301,6 +301,15 @@ std::string Common::GetCompilerCachePath() {
return compile_cache_dir;
}
bool Common::GetDebugTerminate() { return debugger_terminate_; }
bool Common::GetDebugExitSuccess() { return exit_success_; }
void Common::DebugTerminate(bool val, bool exit_success) {
debugger_terminate_ = val;
exit_success_ = exit_success;
}
struct GlogLogDirRegister {
GlogLogDirRegister() {
const char *logtostderr = std::getenv("GLOG_logtostderr");

View File

@ -14,22 +14,20 @@
* limitations under the License.
*/
#include "debug/draw.h"
#include "include/common/debug/draw.h"
#include <iostream>
#include <iterator>
#include <vector>
#include <string>
#include <algorithm>
#include "ir/meta_func_graph.h"
#include "ir/param_info.h"
#include "ir/primitive.h"
#include "ir/graph_utils.h"
#include "include/common/utils/utils.h"
#include "frontend/operator/composite/composite.h"
#include "frontend/parallel/ops_info/operator_info.h"
#include "pipeline/jit/parse/resolve.h"
#include "ir/tensor.h"
#include "include/common/utils/utils.h"
#include "include/common/debug/anf_dump_utils.h"
namespace mindspore {
// namespace to support debug utils
@ -44,20 +42,6 @@ std::string ValueType(const ValueNodePtr &node) {
MS_EXCEPTION_IF_NULL(v);
return v->type_name();
}
std::string ReplaceSpecialChar(const std::string &str) {
std::ostringstream oss;
for (size_t i = 0; i < str.size(); i++) {
if (str[i] == '<') {
oss << "";
} else if (str[i] == '>') {
oss << "";
} else {
oss << str[i];
}
}
return oss.str();
}
} // namespace
// API of debug utils
@ -400,12 +384,8 @@ static void DrawValueNode(Graphviz *const graph_obj, const ValueNodePtr &node) {
<< "'>";
graph_obj->buffer() << "<tr><td bgcolor='white'>" << ValueType(node) << "</td></tr>"
<< "<tr><td>";
if (IsValueNode<MetaFuncGraph>(node)) {
graph_obj->buffer() << node->value()->cast<MetaFuncGraphPtr>()->name();
} else if (IsValueNode<parse::NameSpace>(node)) {
graph_obj->buffer() << node->value()->cast<parse::NameSpacePtr>()->name();
} else if (IsValueNode<parse::Symbol>(node)) {
graph_obj->buffer() << ReplaceSpecialChar(node->value()->cast<parse::SymbolPtr>()->name());
if (std::string value_node_str = AnfDumpHandler::ValueNodeStr(node); !value_node_str.empty()) {
graph_obj->buffer() << value_node_str;
} else {
std::ostringstream ss;
ss << node->value()->ToString();
@ -448,18 +428,16 @@ static void DrawParallelInfo(Graphviz *const graph_obj, const CNodePtr &node) {
if (graph_obj == nullptr || node == nullptr) {
return;
}
auto distributed_operation_info = node->user_data<parallel::OperatorInfo>();
if (distributed_operation_info != nullptr) {
auto strategyPtr = distributed_operation_info->strategy();
if (strategyPtr != nullptr) {
auto num = node->inputs().size();
graph_obj->buffer() << "<tr><td colspan='" << num << "' ";
graph_obj->buffer() << "bgcolor='" << graph_obj->Color(node) << "'>";
std::vector<ValuePtr> temp = {MakeValue(strategyPtr->GetInputStage()), MakeValue(strategyPtr->GetInputDim())};
ValueTuplePtr strategy_tuple = std::make_shared<ValueTuple>(temp);
graph_obj->buffer() << "Strategy " << strategy_tuple->ToString();
graph_obj->buffer() << "</td></tr>" << std::endl;
}
auto in_value = AnfDumpHandler::InStrategyValue(node);
auto in_stage_value = AnfDumpHandler::InStrategyStageValue(node);
if (in_value != nullptr && in_stage_value != nullptr) {
auto num = node->inputs().size();
graph_obj->buffer() << "<tr><td colspan='" << num << "' ";
graph_obj->buffer() << "bgcolor='" << graph_obj->Color(node) << "'>";
ValueTuplePtr strategy_tuple = std::make_shared<ValueTuple>(std::vector<ValuePtr>{in_stage_value, in_value});
graph_obj->buffer() << "Strategy " << strategy_tuple->ToString();
graph_obj->buffer() << "</td></tr>" << std::endl;
}
}

View File

@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "debug/dump_proto.h"
#include "include/common/debug/dump_proto.h"
#include <algorithm>
#include <fstream>
@ -22,15 +22,14 @@
#include <utility>
#include <vector>
#include "debug/anf_ir_utils.h"
#include "proto/anf_ir.pb.h"
#include "ir/graph_utils.h"
#include "utils/ms_context.h"
#include "utils/symbolic.h"
#include "include/common/utils/utils.h"
#include "pipeline/jit/base.h"
#include "include/common/debug/anf_dump_utils.h"
#include "utils/anf_utils.h"
#include "mindspore/ccsrc/frontend/parallel/ops_info/operator_info.h"
#include "frontend/parallel/ops_info/ops_utils.h" // todo: use constant string now
namespace mindspore {
class ProtoExporter {
@ -368,15 +367,11 @@ void ProtoExporter::GetOpNodeTypeAndAttrs(const FuncGraphPtr &, const CNodePtr &
}
// Only CNode save the operator strategy
auto operator_info = cnode->user_data<parallel::OperatorInfo>();
if (operator_info != nullptr) {
auto strategy = operator_info->strategy();
if (strategy != nullptr) {
ValuePtr strategy_value = MakeValue(strategy->GetInputDim());
irpb::AttributeProto *attr_proto = node_proto->add_attribute();
attr_proto->set_name(mindspore::parallel::IN_STRATEGY);
SetValueToProto(strategy_value, attr_proto->mutable_value());
}
auto strategy_value = AnfDumpHandler::InStrategyValue(cnode);
if (strategy_value != nullptr) {
irpb::AttributeProto *attr_proto = node_proto->add_attribute();
attr_proto->set_name(mindspore::parallel::IN_STRATEGY);
SetValueToProto(strategy_value, attr_proto->mutable_value());
}
node_proto->set_scope(op_node->scope()->name());

View File

@ -13,12 +13,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "debug/env_config_parser.h"
#include "include/common/debug/env_config_parser.h"
#include <algorithm>
#include <fstream>
#include "nlohmann/json.hpp"
#include "utils/log_adapter.h"
#include "debug/common.h"
#include "include/common/debug/common.h"
#include "utils/ms_context.h"
#include "utils/convert_utils_base.h"
@ -37,6 +37,12 @@ constexpr auto KEY_MEM_REUSE = "mem_reuse";
} // namespace
namespace mindspore {
EnvConfigParser &EnvConfigParser::GetInstance() {
static EnvConfigParser instance = EnvConfigParser();
instance.Parse();
return instance;
}
#ifdef ENABLE_DUMP_IR
std::optional<bool> GetRdrEnableFromEnv() {
// get environment variable to configure RDR

View File

@ -13,12 +13,41 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "debug/rdr/base_recorder.h"
#include "debug/common.h"
#include "include/common/utils/utils.h"
#include "include/common/debug/rdr/base_recorder.h"
#include "include/common/debug/common.h"
#include "include/common/utils/comm_manager.h"
#include "include/common/debug/env_config_parser.h"
namespace mindspore {
namespace {
constexpr int kMaxNameLength = 32;
} // namespace
BaseRecorder::BaseRecorder() : module_(""), name_(""), directory_(""), filename_(""), timestamp_("") {}
BaseRecorder::BaseRecorder(const std::string &module, const std::string &name)
: module_(module), name_(name), filename_("") {
directory_ = mindspore::EnvConfigParser::GetInstance().RdrPath();
if (name.length() > kMaxNameLength) {
name_ = name.substr(0, kMaxNameLength);
MS_LOG(WARNING) << "The name length is " << name.length() << ", exceeding the limit " << kMaxNameLength
<< ". It will be intercepted as '" << name_ << "'.";
}
std::string err_msg = module_ + ":" + name_ + " set filename failed.";
if (!filename_.empty() && !Common::IsFilenameValid(filename_, MAX_FILENAME_LENGTH, err_msg)) {
filename_ = "";
}
auto sys_time = GetTimeString();
for (auto ch : sys_time) {
if (ch == '.') {
break;
}
if (ch != '-' && ch != ':') {
timestamp_.push_back(ch);
}
}
}
std::optional<std::string> BaseRecorder::GetFileRealPath(const std::string &suffix) const {
std::string filename;
if (filename_.empty()) {

View File

@ -13,12 +13,18 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "debug/rdr/recorder_manager.h"
#include "include/common/debug/rdr/recorder_manager.h"
#include <utility>
#include "debug/rdr/base_recorder.h"
#include "debug/env_config_parser.h"
#include "include/common/debug/rdr/base_recorder.h"
#include "include/common/debug/env_config_parser.h"
namespace mindspore {
RecorderManager &RecorderManager::Instance() {
static RecorderManager manager{};
manager.UpdateRdrEnable();
return manager;
}
void RecorderManager::UpdateRdrEnable() {
static bool updated = false;
if (updated) {
@ -131,4 +137,12 @@ void RecorderManager::ClearAll() {
rdr_has_record_mem_ = false;
MS_LOG(INFO) << "RDR clear all recorders.";
}
namespace RDR {
void TriggerAll() { mindspore::RecorderManager::Instance().TriggerAll(); }
void Snapshot() { mindspore::RecorderManager::Instance().Snapshot(); }
void ResetRecorder() { mindspore::RecorderManager::Instance().ClearAll(); }
} // namespace RDR
} // namespace mindspore

View File

@ -21,7 +21,7 @@
#include "ir/func_graph.h"
#include "ir/manager.h"
#include "utils/ms_context.h"
#include "debug/anf_ir_dump.h"
#include "include/common/debug/anf_ir_dump.h"
namespace mindspore::graphkernel {
void GraphKernelPassManager::Add(const opt::PassPtr &pass, unsigned int pass_level, bool supported_device) {

View File

@ -32,7 +32,7 @@
#include "common/graph_kernel/graph_kernel_helper.h"
#include "common/graph_kernel/core/graph_kernel_utils.h"
#include "backend/common/session/kernel_graph.h"
#include "debug/anf_ir_dump.h"
#include "include/common/debug/anf_ir_dump.h"
#include "kernel/common_utils.h"
namespace mindspore::graphkernel {

View File

@ -20,7 +20,7 @@
#include "ir/anf.h"
#include "ir/func_graph.h"
#include "ir/manager.h"
#include "debug/anf_ir_dump.h"
#include "include/common/debug/anf_ir_dump.h"
namespace mindspore::graphkernel {
void GraphKernelPassManager::Add(const opt::PassPtr &pass, unsigned int pass_level, bool supported_device) {

View File

@ -24,7 +24,7 @@
#include "utils/log_adapter.h"
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"
#include "debug/anf_ir_dump.h"
#include "include/common/debug/anf_ir_dump.h"
#include "common/graph_kernel/core/graph_kernel_utils.h"
namespace mindspore::graphkernel {

View File

@ -25,7 +25,7 @@
#include <string>
#include "base/core_ops.h"
#include "ir/graph_utils.h"
#include "debug/common.h"
#include "include/common/debug/common.h"
#include "kernel/common_utils.h"
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"

View File

@ -56,6 +56,15 @@ set(MSLIB_SRC ${CMAKE_CURRENT_SOURCE_DIR}/types.cc
${API_OPS_SRC}
)
if(ENABLE_D)
list(APPEND MSLIB_SRC
"${CMAKE_SOURCE_DIR}/mindspore/ccsrc/frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.cc"
"${CMAKE_SOURCE_DIR}/mindspore/ccsrc/frontend/parallel/tensor_layout/array.cc"
"${CMAKE_SOURCE_DIR}/mindspore/ccsrc/frontend/parallel/tensor_layout/map.cc"
"${CMAKE_SOURCE_DIR}/mindspore/ccsrc/frontend/parallel/tensor_layout/arrangement.cc"
"${CMAKE_SOURCE_DIR}/mindspore/ccsrc/frontend/parallel/tensor_layout/shape_util.cc")
endif()
if(NOT ENABLE_TESTCASES AND NOT BUILD_LITE)
# users of shared_lib cannot find symbols in indirect dependency
set(MSLIB_SRC ${MSLIB_SRC} ${CMAKE_SOURCE_DIR}/mindspore/core/utils/status.cc)
@ -81,8 +90,9 @@ add_library(mindspore_shared_lib SHARED $<TARGET_OBJECTS:mindspore_shared_lib_ob
if(BUILD_LITE)
target_link_libraries(mindspore_shared_lib PRIVATE $<TARGET_OBJECTS:_mindspore_transform_graph_ir_obj>)
add_dependencies(mindspore_shared_lib _mindspore_transform_graph_ir_obj)
elseif(ENABLE_ACL AND NOT ENABLE_D)
target_link_libraries(mindspore_shared_lib PRIVATE $<TARGET_OBJECTS:_mindspore_backend_graph_compiler_obj>)
elseif(MODE_ASCEND_ACL)
target_link_libraries(mindspore_shared_lib PRIVATE $<TARGET_OBJECTS:_mindspore_backend_graph_compiler_obj>
mindspore_backend_static mindspore_debug)
endif()
if(NOT BUILD_LITE)
set_target_properties(mindspore_shared_lib PROPERTIES OUTPUT_NAME mindspore)
@ -90,7 +100,7 @@ endif()
if(ENABLE_D OR ENABLE_GPU)
target_link_libraries(mindspore_shared_lib PRIVATE -Wl,--as-needed ${PYTHON_LIBRARIES} ${SECUREC_LIBRARY}
-Wl,--whole-archive mindspore -Wl,--no-whole-archive mindspore_core mindspore_common proto_input
mindspore_backend mindspore_core mindspore_common proto_input
mindspore::protobuf)
else()
if(BUILD_LITE)

View File

@ -26,16 +26,53 @@
#include "backend/common/session/executor_manager.h"
#include "runtime/device/kernel_runtime_manager.h"
#include "runtime/dev.h"
#include "pipeline/jit/pipeline.h"
#include "frontend/parallel/step_parallel.h"
#include "pybind11/pybind11.h"
#include "frontend/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h"
#include "include/common/utils/python_adapter.h"
namespace mindspore {
namespace {
API_FACTORY_REG(GraphCell::GraphImpl, AscendGraphImpl);
static constexpr const char *kHcclEnable = "MS_ENABLE_HCCL";
static constexpr const char *kHcclGroupFile = "PARA_GROUP_FILE";
constexpr const char *kHcclEnable = "MS_ENABLE_HCCL";
constexpr const char *kHcclGroupFile = "PARA_GROUP_FILE";
void InitHccl() {
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
mindspore::python_adapter::set_python_env_flag(true);
uint32_t device_id = ms_context->get_param<uint32_t>(MS_CTX_DEVICE_ID);
if (ms_context->backend_policy() == "ms") {
auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(kAscendDevice, device_id);
MS_EXCEPTION_IF_NULL(runtime_instance);
#ifndef ENABLE_SECURITY
runtime_instance->PreInit();
#endif
(void)context::OpenTsd(ms_context);
if (!runtime_instance->Init()) {
MS_LOG(EXCEPTION) << "Runtime init failed.";
}
} else {
(void)context::OpenTsd(ms_context);
}
}
bool CreateGroupsByCkptFile(const std::string &file) {
parallel::GroupInfoMap group_info_map;
if (parallel::StrategyCheckpoint::GetInstance().LoadGroupInfo(file, &group_info_map) != parallel::SUCCESS) {
return false;
}
for (const auto &[group_name, rank_ids] : group_info_map) {
if (!CommManager::GetInstance().CreateGroupSync(group_name, rank_ids)) {
MS_LOG(ERROR) << "Create group " << group_name << " rank ids " << rank_ids << " failed.";
return false;
}
}
MS_LOG(INFO) << "Create groups by checkpoint file success";
return true;
}
} // namespace
AscendGraphImpl::AscendGraphImpl()
: session_impl_(nullptr),
graph_id_(0),
@ -308,13 +345,13 @@ AscendGraphImpl::MsEnvGuard::MsEnvGuard(uint32_t device_id) {
ms_context->set_param<bool>(MS_CTX_IS_MULTI_GRAPH_SINK, true);
if (ms_context->get_param<bool>(MS_CTX_ENABLE_HCCL)) {
pipeline::InitHccl();
InitHccl();
auto para_group_file = common::GetEnv(kHcclGroupFile);
if (para_group_file.empty()) {
MS_LOG(INFO) << "Cannot get Env " << kHcclGroupFile << ", skip.";
} else {
MS_LOG(INFO) << "Get env " << kHcclGroupFile << " success: " << para_group_file;
if (!parallel::CreateGroupsByCkptFile(para_group_file)) {
if (!CreateGroupsByCkptFile(para_group_file)) {
MS_LOG(ERROR) << "CreateGroupsByCkptFile failed.";
errno_ = kMCFailed;
return;

View File

@ -19,7 +19,7 @@
#include <vector>
#include "cxx_api/model/acl/acl_model_options.h"
#include "cxx_api/model/acl/acl_vm/acl_multi_graph_session.h"
#include "debug/trace.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace {

View File

@ -3,16 +3,7 @@ include_directories(${CMAKE_BINARY_DIR})
file(STRINGS "${CMAKE_SOURCE_DIR}/version.txt" MSVERSION)
add_definitions(-DMSVERSION=\"${MSVERSION}\")
set(_DEBUG_SRC_LIST
"${CMAKE_CURRENT_SOURCE_DIR}/anf_ir_dump.cc"
"${CMAKE_CURRENT_SOURCE_DIR}/anf_ir_utils.cc"
"${CMAKE_CURRENT_SOURCE_DIR}/anf_dump_utils.cc"
"${CMAKE_CURRENT_SOURCE_DIR}/draw.cc"
"${CMAKE_CURRENT_SOURCE_DIR}/dump_proto.cc"
"${CMAKE_CURRENT_SOURCE_DIR}/trace.cc"
"${CMAKE_CURRENT_SOURCE_DIR}/common.cc"
"${CMAKE_CURRENT_SOURCE_DIR}/env_config_parser.cc"
)
set(_DEBUG_SRC_LIST)
set(_OFFLINE_SRC_LIST
"${CMAKE_CURRENT_SOURCE_DIR}/debug_services.cc"
@ -57,7 +48,6 @@ if(NOT ENABLE_SECURITY)
)
if(NOT CMAKE_SYSTEM_NAME MATCHES "Windows")
list(APPEND _DEBUG_SRC_LIST
"${CMAKE_CURRENT_SOURCE_DIR}/common.cc"
"${CMAKE_CURRENT_SOURCE_DIR}/data_dump/e2e_dump.cc"
)
endif()
@ -65,7 +55,15 @@ endif()
set_property(SOURCE ${_DEBUG_SRC_LIST} ${_RDR_SRC_LIST} PROPERTY COMPILE_DEFINITIONS
SUBMODULE_ID=mindspore::SubModuleId::SM_DEBUG)
add_library(_mindspore_debug_obj OBJECT ${_DEBUG_SRC_LIST} ${_RDR_SRC_LIST})
list(APPEND _DEBUG_SRC_LIST ${_RDR_SRC_LIST})
list(LENGTH _DEBUG_SRC_LIST debug_files_size)
if(${debug_files_size} GREATER 0)
add_library(mindspore_debug STATIC ${_DEBUG_SRC_LIST})
if(CMAKE_SYSTEM_NAME MATCHES "Windows")
target_compile_definitions(mindspore_debug PRIVATE BACKEND_DLL)
endif()
add_dependencies(mindspore_debug proto_input)
endif()
if(ENABLE_DEBUGGER)
if(NOT CMAKE_SYSTEM_NAME MATCHES "Windows")
add_compile_options(-Wall -DOFFLINE_DBG_MODE -fPIC -O2)

View File

@ -16,10 +16,11 @@
#include "debug/data_dump/cpu_e2e_dump.h"
#include <map>
#include <fstream>
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"
#include "debug/anf_ir_utils.h"
#include "debug/common.h"
#include "include/common/debug/anf_dump_utils.h"
#include "include/common/debug/common.h"
namespace mindspore {
void CPUE2eDump::DumpCNodeData(const CNodePtr &node, uint32_t graph_id) {

View File

@ -16,14 +16,14 @@
#include "debug/data_dump/dump_json_parser.h"
#include <fstream>
#include "utils/log_adapter.h"
#include "debug/common.h"
#include "include/common/debug/common.h"
#include "debug/utils.h"
#include "utils/ms_context.h"
#include "utils/convert_utils_base.h"
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"
#include "debug/data_dump/npy_header.h"
#include "debug/anf_ir_utils.h"
#include "include/common/debug/anf_dump_utils.h"
#include "include/common/utils/comm_manager.h"
namespace {

View File

@ -22,17 +22,24 @@
#include <set>
#include <mutex>
#include <vector>
#include <memory>
#include "nlohmann/json.hpp"
#include "utils/ms_utils.h"
#include "backend/common/session/kernel_graph.h"
#include "include/backend/visible.h"
namespace mindspore {
class DumpJsonParser {
class BACKEND_EXPORT DumpJsonParser {
public:
static DumpJsonParser &GetInstance() {
static DumpJsonParser instance;
return instance;
std::lock_guard<std::mutex> lock(instance_mutex_);
if (instance_ == nullptr) {
instance_ = std::shared_ptr<DumpJsonParser>(new DumpJsonParser);
}
return *instance_;
}
~DumpJsonParser() = default;
void Parse();
static bool DumpToFile(const std::string &filename, const void *data, size_t len, const ShapeVector &shape,
TypeId type);
@ -75,9 +82,11 @@ class DumpJsonParser {
private:
DumpJsonParser() = default;
~DumpJsonParser() = default;
DISABLE_COPY_AND_ASSIGN(DumpJsonParser)
inline static std::shared_ptr<DumpJsonParser> instance_ = nullptr;
inline static std::mutex instance_mutex_;
std::mutex lock_;
bool async_dump_enabled_{false};
bool e2e_dump_enabled_{false};

View File

@ -20,13 +20,13 @@
#include "runtime/device/ms_device_shape_transfer.h"
#include "utils/ms_context.h"
#include "debug/anf_ir_utils.h"
#include "pipeline/jit/debug/anf_ir_utils.h"
#include "debug/data_dump/dump_json_parser.h"
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"
#include "runtime/device/kernel_runtime_manager.h"
#include "include/common/utils/utils.h"
#include "debug/common.h"
#include "include/common/debug/common.h"
#include "runtime/graph_scheduler/device_tensor_store.h"
using mindspore::runtime::DeviceTensorStore;

View File

@ -26,8 +26,8 @@
#include <vector>
#include "debug/data_dump/dump_json_parser.h"
#include "runtime/device/ms_device_shape_transfer.h"
#include "debug/anf_ir_utils.h"
#include "debug/common.h"
#include "include/common/debug/anf_dump_utils.h"
#include "include/common/debug/common.h"
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"
#include "utils/ms_context.h"

View File

@ -30,6 +30,7 @@
#ifdef ENABLE_D
#include "proto/dump_data.pb.h"
#endif
#include "include/backend/visible.h"
using mindspore::kernel::KernelLaunchInfo;
#ifndef ENABLE_DEBUGGER
@ -127,7 +128,7 @@ class E2eDump {
static bool DumpTensorDataIfNeeded(const dump_data_t &dump_tensor_info);
#endif
inline static unsigned int starting_graph_id = INT32_MAX;
BACKEND_EXPORT inline static unsigned int starting_graph_id = INT32_MAX;
};
} // namespace mindspore
#endif // MINDSPORE_MINDSPORE_CCSRC_DEBUG_DATA_DUMP_E_2_E_DUMP_UTIL_H_

View File

@ -19,7 +19,7 @@
#include <memory>
#include <map>
#include "utils/file_utils.h"
#include "debug/common.h"
#include "include/common/debug/common.h"
#include "debug/debug_services.h"
#include "debug/debugger/debugger.h"

View File

@ -63,9 +63,6 @@ class TensorStatDump {
const Debugger *debugger);
private:
static const char CSV_HEADER[];
static const char CSV_FILE_NAME[];
const std::string op_type_;
const std::string op_name_;
const std::string task_id_;

View File

@ -31,10 +31,9 @@
#include "pybind11/embed.h"
#include "pybind11/stl.h"
#ifdef ONLINE_DBG_MODE
#include "debug/common.h"
#include "include/common/debug/common.h"
#include "debug/debugger/debugger.h"
#include "debug/anf_ir_utils.h"
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/debug/anf_dump_utils.h"
#include "include/common/utils/anfalgo.h"
#endif
#include "debug/utils.h"

View File

@ -27,18 +27,18 @@
#include <regex>
#include "debug/debugger/debugger.h"
#include "debug/data_dump/dump_json_parser.h"
#include "pipeline/jit/pipeline.h"
#include "backend/common/session/session_basic.h"
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"
#include "runtime/device/kernel_runtime_manager.h"
#include "runtime/device/kernel_runtime.h"
#include "debug/data_dump/e2e_dump.h"
#include "include/common/utils/config_manager.h"
#include "debug/env_config_parser.h"
#include "include/common/debug/env_config_parser.h"
#include "include/common/utils/comm_manager.h"
#include "runtime/hardware/device_context_manager.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_utils.h"
#include "include/common/debug/anf_ir_dump.h"
#include "include/common/debug/anf_dump_utils.h"
#include "runtime/graph_scheduler/device_tensor_store.h"
#ifdef ENABLE_DEBUGGER
#include "debug/debugger/proto_exporter.h"
@ -64,8 +64,6 @@ namespace mindspore {
static constexpr auto g_chunk_size = 1024 * 1024 * 3;
static constexpr int32_t heartbeat_period_second = 30;
DebuggerPtr Debugger::debugger_ = nullptr;
std::mutex Debugger::instance_lock_;
Debugger::Debugger()
: grpc_client_(nullptr),
@ -423,7 +421,7 @@ void Debugger::PreExecute(const KernelGraphPtr &graph_ptr) {
// Multiple graph, and not the initial step,
// stop only when receive the first sub run graph for each step for old runtime
// if we have stopped for the last kernel before, no need to stop again
if (pipeline::GraphExecutorPy::GetDebugTerminate()) {
if (Common::GetDebugTerminate()) {
return;
}
if (!(run_level_ == "node" && suspended_at_last_kernel_)) {
@ -595,7 +593,7 @@ void Debugger::PostExecuteGraphDebugger() {
void Debugger::PostExecute() {
// access lock for public method
std::lock_guard<std::mutex> a_lock(access_lock_);
if (pipeline::GraphExecutorPy::GetDebugTerminate()) {
if (Common::GetDebugTerminate()) {
return;
}
if (debugger_ && debugger_->DebuggerBackendEnabled()) {
@ -648,7 +646,7 @@ bool Debugger::ReadNodeDataRequired(const CNodePtr &kernel) const {
void Debugger::PostExecuteNode(const CNodePtr &kernel, bool last_kernel) {
// access lock for public method
std::lock_guard<std::mutex> a_lock(access_lock_);
if (pipeline::GraphExecutorPy::GetDebugTerminate()) {
if (Common::GetDebugTerminate()) {
return;
}
if (debugger_enabled_ && !is_dataset_graph_) {
@ -1278,7 +1276,7 @@ void Debugger::Exit(bool exit_success) {
// debugger will notify main thread to exit because main thread can only exit at step boundary.
MS_LOG(INFO) << "Exit Debugger";
SetEnableHeartbeat(false);
pipeline::GraphExecutorPy::DebugTerminate(true, exit_success);
Common::DebugTerminate(true, exit_success);
}
std::list<WatchpointHit> Debugger::CheckWatchpoints(const std::string &watchnode, const CNodePtr &kernel,

View File

@ -30,6 +30,7 @@
#include "debug/dump_data_builder.h"
#endif
#include "runtime/device/device_address.h"
#include "include/backend/visible.h"
using debugger::Chunk;
using debugger::DataType;
@ -61,7 +62,7 @@ enum class DebuggerCommand {
kUnknownCMD = -1
};
class Debugger : public std::enable_shared_from_this<Debugger> {
class BACKEND_EXPORT Debugger : public std::enable_shared_from_this<Debugger> {
public:
static std::shared_ptr<Debugger> GetInstance() {
std::lock_guard<std::mutex> i_lock(instance_lock_);
@ -331,8 +332,8 @@ class Debugger : public std::enable_shared_from_this<Debugger> {
#endif
// singleton
static std::mutex instance_lock_;
static std::shared_ptr<Debugger> debugger_;
inline static std::mutex instance_lock_ = {};
inline static std::shared_ptr<Debugger> debugger_ = nullptr;
uint32_t not_dataset_graph_sum_;
std::list<uint32_t> rungraph_id_list_;
bool ascend_kernel_by_kernel_;

View File

@ -19,7 +19,7 @@
#include <vector>
#include <memory>
#include <string>
#include "debug/anf_ir_utils.h"
#include "include/common/debug/anf_dump_utils.h"
#include "debug/debugger/debugger.h"
#include "plugin/device/gpu/hal/device/gpu_device_address.h"
#include "debug/data_dump/dump_json_parser.h"

View File

@ -23,9 +23,9 @@
#include "utils/hash_map.h"
#include "utils/hash_set.h"
#include "debug/anf_ir_utils.h"
#include "include/common/debug/anf_dump_utils.h"
#include "debug/data_dump/dump_utils.h"
#include "debug/common.h"
#include "include/common/debug/common.h"
#include "debug/debugger/debugger.h"
#include "debug/data_dump/dump_json_parser.h"
#include "proto/debug_graph.pb.h"

View File

@ -20,7 +20,7 @@
#include <string>
#include <vector>
#include "debug/common.h"
#include "include/common/debug/common.h"
#include "debug/data_dump/dump_json_parser.h"
#include "ir/graph_utils.h"
#include "proto/debug_graph.pb.h"

View File

@ -15,11 +15,12 @@
*/
#include "debug/rdr/graph_exec_order_recorder.h"
#include <fstream>
#include <utility>
#include "mindspore/core/ir/anf.h"
#include "mindspore/core/utils/log_adapter.h"
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"
#include "include/common/utils/utils.h"
#include "include/common/debug/rdr/recorder_manager.h"
namespace mindspore {
namespace {
@ -56,4 +57,18 @@ void GraphExecOrderRecorder::Export() {
std::string real_file_path = realpath.value() + ".txt";
DumpGraphExeOrder(real_file_path, exec_order_);
}
namespace RDR {
bool RecordGraphExecOrder(const SubModuleId module, const std::string &name,
const std::vector<CNodePtr> &final_exec_order) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
GraphExecOrderRecorderPtr graph_exec_order_recorder =
std::make_shared<GraphExecOrderRecorder>(submodule_name, name, final_exec_order);
bool ans = mindspore::RecorderManager::Instance().RecordObject(std::move(graph_exec_order_recorder));
return ans;
}
} // namespace RDR
} // namespace mindspore

View File

@ -19,7 +19,7 @@
#include <string>
#include <memory>
#include "debug/rdr/base_recorder.h"
#include "include/common/debug/rdr/base_recorder.h"
namespace mindspore {
class GraphExecOrderRecorder : public BaseRecorder {
@ -36,5 +36,10 @@ class GraphExecOrderRecorder : public BaseRecorder {
std::vector<CNodePtr> exec_order_;
};
using GraphExecOrderRecorderPtr = std::shared_ptr<GraphExecOrderRecorder>;
namespace RDR {
bool RecordGraphExecOrder(const SubModuleId module, const std::string &name,
const std::vector<CNodePtr> &final_exec_order);
} // namespace RDR
} // namespace mindspore
#endif // MINDSPORE_CCSRC_DEBUG_RDR_GRAPH_EXEC_ORDER_RECORDER_H_

View File

@ -14,14 +14,16 @@
* limitations under the License.
*/
#include "debug/rdr/graph_recorder.h"
#include <fstream>
#include <utility>
#include "mindspore/core/base/base.h"
#include "mindspore/core/ir/func_graph.h"
#include "backend/common/session/kernel_graph.h"
#include "mindspore/core/utils/log_adapter.h"
#include "debug/anf_ir_dump.h"
#include "debug/anf_ir_utils.h"
#include "debug/dump_proto.h"
#include "debug/common.h"
#include "include/common/debug/anf_ir_dump.h"
#include "include/common/debug/anf_dump_utils.h"
#include "include/common/debug/dump_proto.h"
#include "include/common/debug/common.h"
#include "include/common/debug/rdr/recorder_manager.h"
namespace mindspore {
namespace protobuf {
@ -64,11 +66,7 @@ void GraphRecorder::Export() {
std::string realpath = tmp_realpath.value();
if (graph_type_.find(".dat") != std::string::npos) {
save_flag = true;
AnfExporter exporter("");
std::string realpath_dat = realpath + ".dat";
ChangeFileMode(realpath_dat, S_IRWXU);
exporter.ExportFuncGraph(realpath_dat, func_graph_);
ChangeFileMode(realpath_dat, S_IRUSR);
AnfDumpHandler::DumpDat(realpath, func_graph_);
}
if (graph_type_.find(".ir") != std::string::npos) {
save_flag = true;
@ -90,4 +88,18 @@ void GraphRecorder::Export() {
MS_LOG(WARNING) << "Unknown save graph type: " << graph_type_;
}
}
namespace RDR {
bool RecordAnfGraph(const SubModuleId module, const std::string &name, const FuncGraphPtr &graph,
const DumpGraphParams &info, const std::string &file_type) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
GraphRecorderPtr graph_recorder = std::make_shared<GraphRecorder>(submodule_name, name, graph, file_type);
graph_recorder->SetDumpFlag(info);
bool ans = mindspore::RecorderManager::Instance().RecordObject(std::move(graph_recorder));
return ans;
}
} // namespace RDR
} // namespace mindspore

View File

@ -19,7 +19,8 @@
#include <string>
#include <memory>
#include "debug/rdr/base_recorder.h"
#include "include/common/debug/rdr/base_recorder.h"
#include "include/backend/visible.h"
namespace mindspore {
struct DumpGraphParams {
@ -45,5 +46,10 @@ class GraphRecorder : public BaseRecorder {
DumpGraphParams dump_graph_info_{false, 0};
};
using GraphRecorderPtr = std::shared_ptr<GraphRecorder>;
namespace RDR {
BACKEND_EXPORT bool RecordAnfGraph(const SubModuleId module, const std::string &name, const FuncGraphPtr &graph,
const DumpGraphParams &info, const std::string &file_type = ".ir;.pb;.dat");
} // namespace RDR
} // namespace mindspore
#endif // MINDSPORE_CCSRC_DEBUG_RDR_GRAPH_RECORDER_H_

View File

@ -16,7 +16,9 @@
#include "debug/rdr/mem_address_recorder.h"
#include <fstream>
#include <sstream>
#include <utility>
#include "kernel/kernel.h"
#include "include/common/debug/rdr/recorder_manager.h"
namespace mindspore {
namespace {
@ -76,4 +78,48 @@ void MemAddressRecorder::CleanUp() {
mem_info_stream_.str("");
printed_ = false;
}
namespace RDR {
bool RecordMemAddressInfo(const SubModuleId module, const std::string &name) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
MemAddressRecorderPtr mem_info_recorder = std::make_shared<MemAddressRecorder>(submodule_name, name);
mem_info_recorder->Reset();
bool ans = mindspore::RecorderManager::Instance().RecordObject(std::move(mem_info_recorder));
return ans;
}
bool UpdateMemAddress(const SubModuleId module, const std::string &name, const std::string &op_name,
const kernel::KernelLaunchInfo &mem_info) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
auto recorder = mindspore::RecorderManager::Instance().GetRecorder(submodule_name, name);
bool ans = false;
if (recorder != nullptr) {
auto mem_recorder = std::dynamic_pointer_cast<MemAddressRecorder>(recorder);
mem_recorder->SaveMemInfo(op_name, mem_info);
ans = true;
}
return ans;
}
void ClearMemAddressInfo() {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return;
}
if (RecorderManager::Instance().CheckRdrMemIsRecord()) {
std::string name = "mem_address_list";
std::string submodule_name = "KERNEL";
auto recorder = RecorderManager::Instance().GetRecorder(submodule_name, name);
if (recorder != nullptr) {
auto mem_recorder = std::dynamic_pointer_cast<MemAddressRecorder>(recorder);
mem_recorder->CleanUp();
}
}
}
} // namespace RDR
} // namespace mindspore

View File

@ -22,7 +22,7 @@
#include <memory>
#include <mutex>
#include "debug/rdr/base_recorder.h"
#include "include/common/debug/rdr/base_recorder.h"
namespace mindspore {
namespace kernel {
@ -59,5 +59,12 @@ class MemAddressRecorder : public BaseRecorder {
std::ostringstream mem_info_stream_;
};
using MemAddressRecorderPtr = std::shared_ptr<MemAddressRecorder>;
namespace RDR {
bool RecordMemAddressInfo(const SubModuleId module, const std::string &name);
bool UpdateMemAddress(const SubModuleId module, const std::string &name, const std::string &op_name,
const kernel::KernelLaunchInfo &mem_info);
void ClearMemAddressInfo();
} // namespace RDR
} // namespace mindspore
#endif // MINDSPORE_CCSRC_DEBUG_RDR_MEM_ADDRESS_RECORDER_H_

View File

@ -1,140 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "debug/rdr/running_data_recorder.h"
#include <utility>
#include "debug/rdr/graph_exec_order_recorder.h"
#include "debug/rdr/recorder_manager.h"
#include "debug/rdr/string_recorder.h"
#include "debug/rdr/stream_exec_order_recorder.h"
#include "debug/rdr/mem_address_recorder.h"
#include "mindspore/core/ir/func_graph.h"
#include "mindspore/core/ir/anf.h"
#include "kernel/kernel.h"
#ifdef ENABLE_D
#include "plugin/device/ascend/hal/device/tasksink/task_generator.h"
#include "debug/rdr/task_debug_info_recorder.h"
#endif // ENABLE_D
namespace mindspore {
namespace RDR {
#ifdef ENABLE_D
bool RecordTaskDebugInfo(SubModuleId module, const std::string &name,
const std::vector<TaskDebugInfoPtr> &task_debug_info_list) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
TaskDebugInfoRecorderPtr task_debug_info_recorder =
std::make_shared<TaskDebugInfoRecorder>(submodule_name, name, task_debug_info_list);
bool ans = mindspore::RecorderManager::Instance().RecordObject(std::move(task_debug_info_recorder));
return ans;
}
#endif // ENABLE_D
bool RecordAnfGraph(const SubModuleId module, const std::string &name, const FuncGraphPtr &graph,
const DumpGraphParams &info, const std::string &file_type) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
GraphRecorderPtr graph_recorder = std::make_shared<GraphRecorder>(submodule_name, name, graph, file_type);
graph_recorder->SetDumpFlag(info);
bool ans = mindspore::RecorderManager::Instance().RecordObject(std::move(graph_recorder));
return ans;
}
bool RecordGraphExecOrder(const SubModuleId module, const std::string &name,
const std::vector<CNodePtr> &final_exec_order) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
GraphExecOrderRecorderPtr graph_exec_order_recorder =
std::make_shared<GraphExecOrderRecorder>(submodule_name, name, final_exec_order);
bool ans = mindspore::RecorderManager::Instance().RecordObject(std::move(graph_exec_order_recorder));
return ans;
}
bool RecordString(SubModuleId module, const std::string &name, const std::string &data) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
StringRecorderPtr string_recorder = std::make_shared<StringRecorder>(submodule_name, name, data);
bool ans = mindspore::RecorderManager::Instance().RecordObject(std::move(string_recorder));
return ans;
}
bool RecordStreamExecOrder(const SubModuleId module, const std::string &name, const std::vector<CNodePtr> &exec_order) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
StreamExecOrderRecorderPtr stream_exec_order_recorder =
std::make_shared<StreamExecOrderRecorder>(submodule_name, name, exec_order);
bool ans = mindspore::RecorderManager::Instance().RecordObject(std::move(stream_exec_order_recorder));
return ans;
}
bool RecordMemAddressInfo(const SubModuleId module, const std::string &name) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
MemAddressRecorderPtr mem_info_recorder = std::make_shared<MemAddressRecorder>(submodule_name, name);
mem_info_recorder->Reset();
bool ans = mindspore::RecorderManager::Instance().RecordObject(std::move(mem_info_recorder));
return ans;
}
bool UpdateMemAddress(const SubModuleId module, const std::string &name, const std::string &op_name,
const kernel::KernelLaunchInfo &mem_info) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
auto recorder = mindspore::RecorderManager::Instance().GetRecorder(submodule_name, name);
bool ans = false;
if (recorder != nullptr) {
auto mem_recorder = std::dynamic_pointer_cast<MemAddressRecorder>(recorder);
mem_recorder->SaveMemInfo(op_name, mem_info);
ans = true;
}
return ans;
}
void TriggerAll() { mindspore::RecorderManager::Instance().TriggerAll(); }
void Snapshot() { mindspore::RecorderManager::Instance().Snapshot(); }
void ResetRecorder() { mindspore::RecorderManager::Instance().ClearAll(); }
void ClearMemAddressInfo() {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return;
}
if (RecorderManager::Instance().CheckRdrMemIsRecord()) {
std::string name = "mem_address_list";
std::string submodule_name = "KERNEL";
auto recorder = RecorderManager::Instance().GetRecorder(submodule_name, name);
if (recorder != nullptr) {
auto mem_recorder = std::dynamic_pointer_cast<MemAddressRecorder>(recorder);
mem_recorder->CleanUp();
}
}
}
} // namespace RDR
} // namespace mindspore

View File

@ -1,64 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_DEBUG_RDR_RUNNING_DATA_RECORDER_H_
#define MINDSPORE_CCSRC_DEBUG_RDR_RUNNING_DATA_RECORDER_H_
#include <vector>
#include <string>
#include <memory>
#include "mindspore/core/utils/log_adapter.h"
#include "debug/rdr/graph_recorder.h"
namespace mindspore {
namespace kernel {
class Address;
struct KernelLaunchInfo;
using AddressPtr = std::shared_ptr<Address>;
} // namespace kernel
using AddressPtrList = std::vector<kernel::AddressPtr>;
struct MemInfo;
#ifdef ENABLE_D
namespace device {
namespace ascend {
namespace tasksink {
class TaskDebugInfo;
} // namespace tasksink
} // namespace ascend
} // namespace device
using TaskDebugInfoPtr = std::shared_ptr<device::ascend::tasksink::TaskDebugInfo>;
#endif // ENABLE_D
namespace RDR {
bool RecordAnfGraph(const SubModuleId module, const std::string &name, const FuncGraphPtr &graph,
const DumpGraphParams &info, const std::string &file_type = ".ir;.pb;.dat");
bool RecordGraphExecOrder(const SubModuleId module, const std::string &name,
const std::vector<CNodePtr> &final_exec_order);
bool RecordString(SubModuleId module, const std::string &name, const std::string &data);
bool RecordStreamExecOrder(const SubModuleId module, const std::string &name, const std::vector<CNodePtr> &exec_order);
bool RecordMemAddressInfo(const SubModuleId module, const std::string &name);
bool UpdateMemAddress(const SubModuleId module, const std::string &name, const std::string &op_name,
const kernel::KernelLaunchInfo &mem_info);
#ifdef ENABLE_D
bool RecordTaskDebugInfo(SubModuleId module, const std::string &name,
const std::vector<TaskDebugInfoPtr> &task_debug_info_list);
#endif // ENABLE_D
void TriggerAll();
void Snapshot();
void ResetRecorder();
void ClearMemAddressInfo();
} // namespace RDR
} // namespace mindspore
#endif // MINDSPORE_CCSRC_DEBUG_RDR_RUNNING_DATA_RECORDER_H_

View File

@ -21,6 +21,7 @@
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"
#include "include/common/utils/utils.h"
#include "include/common/debug/rdr/recorder_manager.h"
namespace mindspore {
std::string Vector2String(const std::vector<uint32_t> &v) {
@ -70,4 +71,17 @@ void StreamExecOrderRecorder::Export() {
fout.close();
ChangeFileMode(real_file_path, S_IRUSR);
}
namespace RDR {
bool RecordStreamExecOrder(const SubModuleId module, const std::string &name, const std::vector<CNodePtr> &exec_order) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
StreamExecOrderRecorderPtr stream_exec_order_recorder =
std::make_shared<StreamExecOrderRecorder>(submodule_name, name, exec_order);
bool ans = mindspore::RecorderManager::Instance().RecordObject(std::move(stream_exec_order_recorder));
return ans;
}
} // namespace RDR
} // namespace mindspore

View File

@ -22,7 +22,7 @@
#include "nlohmann/json.hpp"
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"
#include "debug/rdr/base_recorder.h"
#include "include/common/debug/rdr/base_recorder.h"
using json = nlohmann::json;
@ -98,5 +98,8 @@ class StreamExecOrderRecorder : public BaseRecorder {
std::vector<ExecNodePtr> exec_order_;
};
using StreamExecOrderRecorderPtr = std::shared_ptr<StreamExecOrderRecorder>;
namespace RDR {
bool RecordStreamExecOrder(const SubModuleId module, const std::string &name, const std::vector<CNodePtr> &exec_order);
} // namespace RDR
} // namespace mindspore
#endif // MINDSPORE_CCSRC_DEBUG_RDR_STREAM_EXEC_ORDER_RECORDER_H_

View File

@ -16,9 +16,10 @@
#include "debug/rdr/string_recorder.h"
#include <sys/stat.h>
#include <fstream>
#include "debug/common.h"
#include <utility>
#include "include/common/utils/utils.h"
#include "mindspore/core/utils/log_adapter.h"
#include "include/common/debug/rdr/recorder_manager.h"
namespace mindspore {
void StringRecorder::Export() {
@ -38,4 +39,16 @@ void StringRecorder::Export() {
// set file mode to read only by user
ChangeFileMode(file_path, S_IRUSR);
}
namespace RDR {
bool RecordString(SubModuleId module, const std::string &name, const std::string &data) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
StringRecorderPtr string_recorder = std::make_shared<StringRecorder>(submodule_name, name, data);
bool ans = mindspore::RecorderManager::Instance().RecordObject(std::move(string_recorder));
return ans;
}
} // namespace RDR
} // namespace mindspore

View File

@ -19,7 +19,7 @@
#include <string>
#include <memory>
#include "debug/rdr/base_recorder.h"
#include "include/common/debug/rdr/base_recorder.h"
namespace mindspore {
class StringRecorder : public BaseRecorder {
public:
@ -33,5 +33,8 @@ class StringRecorder : public BaseRecorder {
std::string data_;
};
using StringRecorderPtr = std::shared_ptr<StringRecorder>;
namespace RDR {
bool RecordString(SubModuleId module, const std::string &name, const std::string &data);
} // namespace RDR
} // namespace mindspore
#endif // MINDSPORE_CCSRC_DEBUG_RDR_STRING_RECORDER_H_

View File

@ -14,7 +14,9 @@
* limitations under the License.
*/
#include "debug/rdr/task_debug_info_recorder.h"
#include <utility>
#include "plugin/device/ascend/hal/device/tasksink/task_generator.h"
#include "include/common/debug/rdr/recorder_manager.h"
namespace mindspore {
void TaskDebugInfoRecorder::Export() {
@ -25,4 +27,18 @@ void TaskDebugInfoRecorder::Export() {
std::string file_path = realpath.value() + ".ir";
device::ascend::tasksink::TaskGenerator::DumpTaskInfo(file_path, task_debug_info_);
}
namespace RDR {
bool RecordTaskDebugInfo(SubModuleId module, const std::string &name,
const std::vector<TaskDebugInfoPtr> &task_debug_info_list) {
if (!mindspore::RecorderManager::Instance().RdrEnable()) {
return false;
}
std::string submodule_name = std::string(GetSubModuleName(module));
TaskDebugInfoRecorderPtr task_debug_info_recorder =
std::make_shared<TaskDebugInfoRecorder>(submodule_name, name, task_debug_info_list);
bool ans = mindspore::RecorderManager::Instance().RecordObject(std::move(task_debug_info_recorder));
return ans;
}
} // namespace RDR
} // namespace mindspore

View File

@ -19,7 +19,7 @@
#include <string>
#include <memory>
#include "debug/rdr/base_recorder.h"
#include "include/common/debug/rdr/base_recorder.h"
namespace mindspore {
namespace device {
@ -44,5 +44,9 @@ class TaskDebugInfoRecorder : public BaseRecorder {
std::vector<TaskDebugInfoPtr> task_debug_info_;
};
using TaskDebugInfoRecorderPtr = std::shared_ptr<TaskDebugInfoRecorder>;
namespace RDR {
bool RecordTaskDebugInfo(SubModuleId module, const std::string &name,
const std::vector<TaskDebugInfoPtr> &task_debug_info_list);
} // namespace RDR
} // namespace mindspore
#endif // MINDSPORE_CCSRC_DEBUG_RDR_TASK_DEBUG_INFO_RECORDER_H_

View File

@ -22,7 +22,7 @@
#include "distributed/collective/collective_manager.h"
#include "utils/ms_context.h"
#include "ps/ps_context.h"
#include "debug/common.h"
#include "include/common/debug/common.h"
namespace mindspore {
namespace distributed {

View File

@ -37,6 +37,7 @@
#include "ps/core/ps_server_node.h"
#include "ps/core/ps_scheduler_node.h"
#include "distributed/cluster/actor_route_table_proxy.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace distributed {
@ -49,7 +50,7 @@ constexpr char kDetailedFailureReason[] =
"https://www.mindspore.cn/docs/programming_guide/zh-CN/master/distributed_training_gpu.html#openmpi.";
// Node role based cluster built by MindSpore communication framework.
class ClusterContext {
class BACKEND_EXPORT ClusterContext {
public:
~ClusterContext();
DISABLE_COPY_AND_ASSIGN(ClusterContext)

View File

@ -24,6 +24,7 @@
#include "utils/ms_utils.h"
#include "distributed/constants.h"
#include "runtime/hardware/device_context_manager.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace distributed {
@ -37,7 +38,7 @@ using CommunicationGroupPtr = device::CommunicationGroupPtr;
// The collective communication API.
// MindSpore uses OpenMPI on CPU, NCCL on GPU, HCCL on Ascend, to achieve distributed training.
// Besides, MindSpore also has its own communication library which is implemented on the CPU side.
class CollectiveManager {
class BACKEND_EXPORT CollectiveManager {
public:
~CollectiveManager();
DISABLE_COPY_AND_ASSIGN(CollectiveManager);

View File

@ -25,13 +25,14 @@
#else
#include "distributed/cluster/dummy_cluster_context.h"
#endif
#include "include/backend/visible.h"
namespace mindspore {
namespace distributed {
// The static methods of MindSpore distributed execution. They can be exported by Pybind.
// Initialize and finalize distributed execution.
bool Initialize();
BACKEND_EXPORT bool Initialize();
bool Finalize();
// Initialize and finalize the cluster based on MindSpore communication framework.

View File

@ -18,7 +18,7 @@
#include <string>
#include <fstream>
#include "utils/file_utils.h"
#include "debug/common.h"
#include "include/common/debug/common.h"
#include "ps/constants.h"
namespace mindspore {

View File

@ -49,6 +49,11 @@ void SignalHandler(int signal) {
(void)g_communicator_with_server->Stop();
}
Server &Server::GetInstance() {
static Server instance;
return instance;
}
void Server::Initialize(bool use_tcp, bool use_http, uint16_t http_port, const std::vector<RoundConfig> &rounds_config,
const CipherConfig &cipher_config, const FuncGraphPtr &func_graph, size_t executor_threshold) {
MS_EXCEPTION_IF_NULL(func_graph);

View File

@ -30,6 +30,7 @@
#include "fl/server/common.h"
#include "fl/server/executor.h"
#include "fl/server/iteration.h"
#include "include/backend/visible.h"
namespace mindspore {
namespace fl {
@ -38,12 +39,9 @@ namespace server {
constexpr uint32_t kServerSleepTimeForNetworking = 1000;
constexpr uint64_t kDefaultReplayAttackTimeDiff = 600000;
// Class Server is the entrance of MindSpore's parameter server training mode and federated learning.
class Server {
class BACKEND_EXPORT Server {
public:
static Server &GetInstance() {
static Server instance;
return instance;
}
static Server &GetInstance();
void Initialize(bool use_tcp, bool use_http, uint16_t http_port, const std::vector<RoundConfig> &rounds_config,
const CipherConfig &cipher_config, const FuncGraphPtr &func_graph, size_t executor_threshold);

View File

@ -16,7 +16,7 @@
#include "fl/server/server_recovery.h"
#include "fl/server/local_meta_store.h"
#include "debug/common.h"
#include "include/common/debug/common.h"
namespace mindspore {
namespace fl {

View File

@ -25,6 +25,11 @@
namespace mindspore {
namespace fl {
namespace worker {
FLWorker &FLWorker::GetInstance() {
static FLWorker instance;
return instance;
}
void FLWorker::Run() {
if (running_.load()) {
return;

View File

@ -28,6 +28,7 @@
#include "ps/core/worker_node.h"
#include "ps/core/cluster_metadata.h"
#include "ps/core/communicator/tcp_communicator.h"
#include "include/backend/visible.h"
struct EncryptPublicKeys {
std::string flID;
@ -67,12 +68,9 @@ enum class IterationState {
namespace worker {
// This class is used for hybrid training mode for now. In later version, parameter server mode will also use this class
// as worker.
class FLWorker {
class BACKEND_EXPORT FLWorker {
public:
static FLWorker &GetInstance() {
static FLWorker instance;
return instance;
}
static FLWorker &GetInstance();
void Run();
void Finalize();
bool SendToServer(uint32_t server_rank, const void *data, size_t size, ps::core::TcpUserCommand command,

View File

@ -28,9 +28,9 @@
#include "frontend/operator/cc_implementations.h"
#include "frontend/optimizer/opt.h"
#include "utils/symbolic.h"
#include "pybind_api/api_register.h"
#include "include/common/pybind_api/api_register.h"
#include "ir/signature.h"
#include "debug/trace.h"
#include "pipeline/jit/debug/trace.h"
#include "utils/ms_context.h"
#include "include/common/utils/utils.h"

View File

@ -25,7 +25,7 @@
#include "abstract/param_validator.h"
#include "frontend/operator/cc_implementations.h"
#include "frontend/optimizer/opt.h"
#include "pybind_api/api_register.h"
#include "include/common/pybind_api/api_register.h"
namespace mindspore {
// namespace to support composite operators definition

View File

@ -22,7 +22,7 @@
#include "abstract/param_validator.h"
#include "frontend/optimizer/opt.h"
#include "pybind_api/api_register.h"
#include "include/common/pybind_api/api_register.h"
namespace mindspore {
// namespace to support composite operators definition

View File

@ -22,7 +22,7 @@
#include "abstract/param_validator.h"
#include "frontend/optimizer/opt.h"
#include "pybind_api/api_register.h"
#include "include/common/pybind_api/api_register.h"
namespace mindspore {
// namespace to support composite operators definition

View File

@ -25,8 +25,8 @@
#include "abstract/abstract_value.h"
#include "abstract/abstract_function.h"
#include "abstract/dshape.h"
#include "pybind_api/api_register.h"
#include "debug/trace.h"
#include "include/common/pybind_api/api_register.h"
#include "pipeline/jit/debug/trace.h"
#include "frontend/operator/ops.h"
namespace mindspore {

View File

@ -23,10 +23,10 @@
#include "abstract/dshape.h"
#include "frontend/optimizer/opt.h"
#include "utils/ms_context.h"
#include "pybind_api/api_register.h"
#include "include/common/pybind_api/api_register.h"
#include "ir/signature.h"
#include "ir/dtype.h"
#include "debug/trace.h"
#include "pipeline/jit/debug/trace.h"
namespace mindspore {
// namespace to support composite operators definition

View File

@ -23,7 +23,7 @@
#include "frontend/operator/cc_implementations.h"
#include "ir/anf.h"
#include "frontend/optimizer/opt.h"
#include "pybind_api/api_register.h"
#include "include/common/pybind_api/api_register.h"
namespace mindspore {
// namespace to support composite operators definition

Some files were not shown because too many files have changed in this diff Show More