forked from mindspore-Ecosystem/mindspore
compile macro
This commit is contained in:
parent
d41b1ab22d
commit
bcf1dfe963
|
@ -27,6 +27,7 @@
|
|||
"mindspore/mindspore/ccsrc/pipeline/jit/static_analysis/auto_monad.cc" "containerOutOfBounds"
|
||||
"mindspore/mindspore/core/load_mindir/anf_model_parser.cc" "stlIfStrFind"
|
||||
"mindspore/mindspore/ccsrc/frontend/optimizer/ad/kpynative.cc" "containerOutOfBounds"
|
||||
"mindspore/mindspore/ccsrc/pipeline/jit/action.cc" "unreadVariable"
|
||||
|
||||
# MindData
|
||||
"mindspore/mindspore/ccsrc/minddata/dataset/engine/dataset_iterator.cc" "useStlAlgorithm"
|
||||
|
|
|
@ -122,6 +122,11 @@ if(ENABLE_TESTCASES OR (NOT ENABLE_D))
|
|||
add_compile_definitions(NO_DLIB=1)
|
||||
endif()
|
||||
|
||||
if(NOT (ENABLE_TESTCASES OR ENABLE_TEST) AND NOT (CMAKE_SYSTEM_NAME MATCHES "Windows" OR
|
||||
CMAKE_SYSTEM_NAME MATCHES "Darwin"))
|
||||
add_compile_definitions(WITH_BACKEND)
|
||||
endif()
|
||||
|
||||
if(ENABLE_DUMP_IR)
|
||||
add_compile_definitions(ENABLE_DUMP_IR)
|
||||
endif()
|
||||
|
|
|
@ -343,6 +343,15 @@ foreach(_comp ${BACKEND_SUB_COMP})
|
|||
endif()
|
||||
endforeach()
|
||||
|
||||
if(ENABLE_TEST OR ENABLE_TESTCASES)
|
||||
include_directories(${CMAKE_BINARY_DIR})
|
||||
list(APPEND STUB_BACKEND_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/fl/fl_stub.cc)
|
||||
list(APPEND STUB_BACKEND_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/fl/server_stub.cc)
|
||||
list(APPEND STUB_BACKEND_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/ps/ps_core_stub.cc)
|
||||
add_library(stub_backend_obj OBJECT ${STUB_BACKEND_SOURCE})
|
||||
list(APPEND BACKEND_SUB_OBJECTS_SRC $<TARGET_OBJECTS:stub_backend_obj>)
|
||||
endif()
|
||||
|
||||
set_property(SOURCE ${BACKEND_SUB_OBJECTS_SRC} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_ME)
|
||||
add_library(mindspore_backend SHARED ${BACKEND_SUB_OBJECTS_SRC})
|
||||
if(MODE_ASCEND_ACL)
|
||||
|
@ -585,4 +594,36 @@ if(ENABLE_D)
|
|||
endif()
|
||||
endif()
|
||||
|
||||
if(ENABLE_TEST OR ENABLE_TESTCASES)
|
||||
include_directories(${CMAKE_BINARY_DIR})
|
||||
list(APPEND STUB_COMMON_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/ge/ge_operator_stub.cc)
|
||||
list(APPEND STUB_COMMON_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/transform/util.cc)
|
||||
list(APPEND STUB_COMMON_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/pipeline/action_stub.cc)
|
||||
list(APPEND STUB_COMMON_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/ps/ps_stub.cc)
|
||||
list(APPEND STUB_COMMON_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/cluster/cluster_stub.cc)
|
||||
list(APPEND STUB_COMMON_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/profiling/parallel_strategy_profiling_stub.cc)
|
||||
|
||||
list(APPEND EXPRESSION_STUB_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/fl/fl_stub.cc)
|
||||
list(APPEND EXPRESSION_STUB_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/ps/ps_core_stub.cc)
|
||||
|
||||
add_library(stub_common STATIC ${STUB_COMMON_SOURCE})
|
||||
target_link_libraries(mindspore_common PUBLIC stub_common)
|
||||
|
||||
add_library(expression_ STATIC ${EXPRESSION_STUB_SOURCE})
|
||||
target_link_libraries(_c_expression PUBLIC expression_)
|
||||
endif()
|
||||
|
||||
if(NOT ENABLE_TESTCASES AND NOT (ENABLE_D OR ENABLE_CPU OR ENABLE_GPU))
|
||||
include_directories(${CMAKE_BINARY_DIR})
|
||||
list(APPEND EXPRESSION_STUB_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/cluster/cluster_stub.cc)
|
||||
list(APPEND EXPRESSION_STUB_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/ps/ps_stub.cc)
|
||||
list(APPEND EXPRESSION_STUB_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/fl/fl_stub.cc)
|
||||
list(APPEND EXPRESSION_STUB_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/fl/server_stub.cc)
|
||||
list(APPEND EXPRESSION_STUB_SOURCE ${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/ps/ps_core_stub.cc)
|
||||
list(APPEND EXPRESSION_STUB_SOURCE
|
||||
${CMAKE_SOURCE_DIR}/tests/ut/cpp/stub/profiling/parallel_strategy_profiling_stub.cc)
|
||||
|
||||
add_library(expression_ STATIC ${EXPRESSION_STUB_SOURCE})
|
||||
target_link_libraries(_c_expression PUBLIC expression_)
|
||||
endif()
|
||||
add_subdirectory(cxx_api)
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include "ps/ps_context.h"
|
||||
#include "utils/anf_utils.h"
|
||||
#ifdef ENABLE_D
|
||||
#include "include/transform/graph_ir/convert.h"
|
||||
#include "include/transform/graph_ir/utils.h"
|
||||
#endif
|
||||
namespace mindspore {
|
||||
const char kMsConvert[] = "ms";
|
||||
|
@ -658,7 +658,7 @@ bool GraphPartition::IsCut(const AnfNodePtr &node) {
|
|||
#ifdef ENABLE_D
|
||||
if (backend_name_ == kGeVm) {
|
||||
auto name = GetCNodeFuncName(cnode);
|
||||
auto adpt = transform::DfGraphConvertor::FindAdapter(name);
|
||||
auto adpt = transform::FindAdapter(name);
|
||||
if (adpt == nullptr) {
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -23,12 +23,8 @@
|
|||
#include <queue>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "abstract/abstract_value.h"
|
||||
#include "abstract/abstract_function.h"
|
||||
#ifdef ENABLE_D
|
||||
#include "include/transform/graph_ir/convert.h"
|
||||
#endif
|
||||
#include "ir/graph_utils.h"
|
||||
#include "utils/ms_context.h"
|
||||
#include "utils/trace_base.h"
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
#include "utils/log_adapter.h"
|
||||
#include "utils/convert_utils_base.h"
|
||||
#include "utils/ms_exception.h"
|
||||
#include "utils/ms_context.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace common {
|
||||
#if ENABLE_D || ENABLE_GPU
|
||||
constexpr size_t kDeviceNum = 8;
|
||||
#endif
|
||||
constexpr size_t kMaxThreadNum = 23;
|
||||
constexpr size_t kYieldThreshold = 1000;
|
||||
|
||||
|
@ -34,11 +33,13 @@ ThreadPool::ThreadPool() {
|
|||
if (process_core_num < 1) {
|
||||
process_core_num = 1;
|
||||
}
|
||||
#if ENABLE_D || ENABLE_GPU
|
||||
max_thread_num_ = process_core_num / kDeviceNum;
|
||||
#else
|
||||
max_thread_num_ = process_core_num;
|
||||
#endif
|
||||
auto ms_context = MsContext::GetInstance();
|
||||
auto device_target = ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET);
|
||||
if (device_target == kAscendDevice || device_target == kGPUDevice) {
|
||||
max_thread_num_ = process_core_num / kDeviceNum;
|
||||
} else {
|
||||
max_thread_num_ = process_core_num;
|
||||
}
|
||||
if (max_thread_num_ < 1) {
|
||||
max_thread_num_ = 1;
|
||||
}
|
||||
|
|
|
@ -16,8 +16,7 @@
|
|||
|
||||
#include "cxx_api/model/acl/model_converter.h"
|
||||
#include <memory>
|
||||
#include "include/transform/graph_ir/convert.h"
|
||||
#include "include/transform/graph_ir/graph_runner.h"
|
||||
#include "include/transform/graph_ir/utils.h"
|
||||
#include "mindspore/core/utils/ms_context.h"
|
||||
#include "include/api/serialization.h"
|
||||
#include "graph/model.h"
|
||||
|
@ -43,23 +42,23 @@ transform::TensorOrderMap GetParams(const FuncGraphPtr &anf_graph) {
|
|||
}
|
||||
|
||||
bool CreateSessionAndGraphRunner() {
|
||||
std::shared_ptr<ge::Session> sess = transform::DfGraphManager::GetInstance().GetGeSession();
|
||||
std::shared_ptr<ge::Session> sess = transform::GetGeSession();
|
||||
if (sess == nullptr) {
|
||||
transform::SessionOptions options;
|
||||
options["ge.trainFlag"] = "0";
|
||||
options["ge.enablePrintOpPass"] = "0";
|
||||
sess = transform::GraphRunner::NewSession(options);
|
||||
transform::DfGraphManager::GetInstance().SetGeSession(sess);
|
||||
sess = transform::NewSession(options);
|
||||
transform::SetGeSession(sess);
|
||||
}
|
||||
|
||||
transform::GraphRunnerOptions options;
|
||||
options.sess_ptr = sess;
|
||||
auto graph_runner = std::make_shared<transform::GraphRunner>(options);
|
||||
auto graph_runner = transform::NewGraphRunner(options);
|
||||
if (graph_runner == nullptr) {
|
||||
MS_LOG(ERROR) << "Create new graph runner failed";
|
||||
return false;
|
||||
} else {
|
||||
transform::DfGraphManager::GetInstance().SetGraphRunner(graph_runner);
|
||||
transform::SetGraphRunner(graph_runner);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -68,27 +67,29 @@ bool CreateSessionAndGraphRunner() {
|
|||
|
||||
transform::DfGraphPtr ModelConverter::ConvertFuncGraphToAIR(const FuncGraphPtr &anf_graph) {
|
||||
MS_EXCEPTION_IF_NULL(anf_graph);
|
||||
transform::DfGraphConvertor converter(anf_graph);
|
||||
auto converter = transform::NewConverter(anf_graph);
|
||||
std::string net_id = "0";
|
||||
std::string init_graph = "init_subgraph." + net_id;
|
||||
std::string checkpoint_name = "save." + net_id;
|
||||
|
||||
converter.set_training(false);
|
||||
(void)converter.ConvertAllNode().InitParam(GetParams(anf_graph)).BuildGraph();
|
||||
(void)converter.GenerateCheckpointGraph();
|
||||
if (converter.ErrCode() != 0) {
|
||||
transform::DfGraphManager::GetInstance().ClearGraph();
|
||||
MS_LOG(ERROR) << "Convert df graph failed, err:" << converter.ErrCode();
|
||||
transform::SetTraining(converter, false);
|
||||
|
||||
(void)transform::BuildGraph(converter, GetParams(anf_graph));
|
||||
|
||||
(void)transform::GenerateCheckpointGraph(converter);
|
||||
auto err_code = transform::ErrCode(converter);
|
||||
if (err_code != 0) {
|
||||
transform::ClearGraph();
|
||||
MS_LOG(ERROR) << "Convert df graph failed, err:" << err_code;
|
||||
return nullptr;
|
||||
}
|
||||
(void)transform::DfGraphManager::GetInstance().AddGraph(anf_graph->ToString(), converter.GetComputeGraph());
|
||||
(void)transform::DfGraphManager::GetInstance().AddGraph(init_graph, converter.GetInitGraph());
|
||||
(void)transform::DfGraphManager::GetInstance().AddGraph(BROADCAST_GRAPH_NAME, converter.GetBroadcastGraph());
|
||||
(void)transform::AddGraph(anf_graph->ToString(), transform::GetComputeGraph(converter));
|
||||
(void)transform::AddGraph(init_graph, transform::GetInitGraph(converter));
|
||||
(void)transform::AddGraph(BROADCAST_GRAPH_NAME, transform::GetBroadcastGraph(converter));
|
||||
|
||||
transform::Status ret =
|
||||
transform::DfGraphManager::GetInstance().AddGraph(checkpoint_name, converter.GetSaveCheckpointGraph());
|
||||
transform::Status ret = transform::AddGraph(checkpoint_name, transform::GetSaveCheckpointGraph(converter));
|
||||
if (ret == transform::Status::SUCCESS) {
|
||||
transform::DfGraphManager::GetInstance().SetAnfGraph(checkpoint_name, anf_graph);
|
||||
transform::SetAnfGraph(checkpoint_name, anf_graph);
|
||||
}
|
||||
|
||||
(void)setenv("GE_TRAIN", "0", 1);
|
||||
|
@ -98,7 +99,7 @@ transform::DfGraphPtr ModelConverter::ConvertFuncGraphToAIR(const FuncGraphPtr &
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
auto wrap_ptr = transform::DfGraphManager::GetInstance().GetGraphByName(anf_graph->ToString());
|
||||
auto wrap_ptr = transform::GetGraphByName(anf_graph->ToString());
|
||||
if (wrap_ptr == nullptr) {
|
||||
MS_LOG(ERROR) << "Get graph form DfGraphManager failed!";
|
||||
return nullptr;
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include "include/common/utils/utils.h"
|
||||
#include "ir/func_graph.h"
|
||||
#include "distributed/constants.h"
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
#include "distributed/cluster/cluster_context.h"
|
||||
#else
|
||||
#include "distributed/cluster/dummy_cluster_context.h"
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include <algorithm>
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#if !defined(NO_DLIB) || defined(ENABLE_GPU)
|
||||
#if (!defined(_WIN32) && !defined(__APPLE__) && !(defined(ENABLE_TESTCASES) || defined(ENABLE_TEST)))
|
||||
#include "backend/common/session/executor_manager.h"
|
||||
#else
|
||||
#include "frontend/parallel/parallel_stub/executor_manager_stub.h"
|
||||
|
@ -70,7 +70,7 @@ Status Group::GetIndex(size_t *index) {
|
|||
|
||||
GroupManager::GroupManager() { groups_.clear(); }
|
||||
|
||||
#if !defined(NO_DLIB) || defined(ENABLE_GPU)
|
||||
#if (!defined(_WIN32) && !defined(__APPLE__) && !(defined(ENABLE_TESTCASES) || defined(ENABLE_TEST)))
|
||||
bool GroupManager::CreateGroupByExecutor(const std::string &device_name, const std::string &group_name,
|
||||
const std::vector<uint32_t> ranks, uint32_t device_id) {
|
||||
// The group operation thread must be same with nccl init thread in the GPU device.
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "frontend/parallel/device_matrix.h"
|
||||
#include "frontend/parallel/graph_util/generate_graph.h"
|
||||
#include "include/common/utils/parallel_context.h"
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
#include "ps/ps_cache/ps_cache_manager.h"
|
||||
#include "utils/ms_context.h"
|
||||
#endif
|
||||
|
@ -160,7 +160,7 @@ Status GatherInfo::GetAttrs() {
|
|||
if (std::find(inputs_shape_[1].begin(), inputs_shape_[1].end(), -1) != inputs_shape_[1].end()) {
|
||||
dynamic_shape_indices_ = true;
|
||||
}
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
if (ps::PsDataPrefetch::GetInstance().cache_enable()) {
|
||||
dynamic_shape_indices_ = true;
|
||||
}
|
||||
|
@ -713,7 +713,7 @@ Status GatherInfo::InferBias() {
|
|||
rank = rank % (params_strategy[0] * params_strategy[1]);
|
||||
}
|
||||
}
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
if (ps::PsDataPrefetch::GetInstance().cache_enable()) {
|
||||
bias_ = static_cast<int64_t>(ps::PsCacheManager::GetInstance().cache_indices_lower_bound());
|
||||
return SUCCESS;
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include "frontend/parallel/strategy.h"
|
||||
#include "include/common/utils/parallel_context.h"
|
||||
#include "frontend/parallel/tensor_layout/tensor_redistribution.h"
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && (!defined(__APPLE__)))
|
||||
#ifdef WITH_BACKEND
|
||||
#include "ps/ps_cache/ps_cache_manager.h"
|
||||
#endif
|
||||
|
||||
|
@ -99,8 +99,7 @@ std::vector<StrategyPtr> UniqueInfo::GenerateOpStrategies(int64_t stage_id) {
|
|||
|
||||
return sp_vector;
|
||||
}
|
||||
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && (!defined(__APPLE__)))
|
||||
#ifdef WITH_BACKEND
|
||||
Status UniqueInfo::ComputeReplaceGraph(const CNodePtr &cnode) {
|
||||
GenerateGraph gen_g = GenerateGraph(attrs_);
|
||||
if (gen_g.Init(cnode) != SUCCESS) {
|
||||
|
@ -135,9 +134,8 @@ Status UniqueInfo::ComputeReplaceGraph(const CNodePtr &cnode) {
|
|||
return SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
ReplaceGraphPtr UniqueInfo::replace_graph(const CNodePtr &cnode) {
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
if (ps::PsDataPrefetch::GetInstance().cache_enable()) {
|
||||
auto inputs = cnode->inputs();
|
||||
if (inputs.empty()) {
|
||||
|
|
|
@ -47,7 +47,7 @@ class UniqueInfo : public OperatorInfo {
|
|||
Status InferDevMatrixShape() override;
|
||||
Status InferForwardCommunication() override { return SUCCESS; }
|
||||
Status InferAsLossDivisor() override { return SUCCESS; }
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && (!defined(__APPLE__)))
|
||||
#ifdef WITH_BACKEND
|
||||
Status ComputeReplaceGraph(const CNodePtr &cnode);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -48,14 +48,30 @@
|
|||
#include "ir/anf.h"
|
||||
#include "ir/param_info.h"
|
||||
#include "ir/tensor.h"
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32))
|
||||
#ifdef WITH_BACKEND
|
||||
#include "ps/util.h"
|
||||
#endif
|
||||
|
||||
namespace mindspore {
|
||||
namespace parallel {
|
||||
void SearchParallelStrategy(const std::string &strategy_search_mode, const FuncGraphPtr &root,
|
||||
const std::vector<AnfNodePtr> &all_nodes) {
|
||||
if ((strategy_search_mode == kDynamicProgramming) || (strategy_search_mode == kShardingPropagation)) {
|
||||
if (ParallelStrategySearch(all_nodes, root) != SUCCESS) {
|
||||
MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using " << strategy_search_mode
|
||||
<< " searching mode";
|
||||
}
|
||||
} else if (strategy_search_mode == kRecursiveProgramming) {
|
||||
if (ParallelStrategyRecSearch(all_nodes, root) != SUCCESS) {
|
||||
MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using RP searching mode";
|
||||
}
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Auto-parallel strategy searching mode unexpected: " << strategy_search_mode;
|
||||
}
|
||||
}
|
||||
|
||||
bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) {
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
if (ps::Util::IsRoleOfPServer() || ps::Util::IsRoleOfScheduler()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -108,19 +124,7 @@ bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) {
|
|||
}
|
||||
|
||||
// search parallelization strategy
|
||||
if ((strategy_search_mode == kDynamicProgramming) || (strategy_search_mode == kShardingPropagation)) {
|
||||
if (ParallelStrategySearch(all_nodes, root) != SUCCESS) {
|
||||
MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using " << strategy_search_mode
|
||||
<< " searching mode";
|
||||
}
|
||||
} else if (strategy_search_mode == kRecursiveProgramming) {
|
||||
if (ParallelStrategyRecSearch(all_nodes, root) != SUCCESS) {
|
||||
MS_LOG(EXCEPTION) << "Auto-parallel strategy search failed when using RP searching mode";
|
||||
}
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Auto-parallel strategy searching mode unexpected: " << strategy_search_mode;
|
||||
}
|
||||
|
||||
SearchParallelStrategy(strategy_search_mode, root, all_nodes);
|
||||
(void)gettimeofday(&end_time, nullptr);
|
||||
uint64_t time = kUSecondInSecond * static_cast<uint64_t>(end_time.tv_sec - start_time.tv_sec);
|
||||
time += static_cast<uint64_t>(end_time.tv_usec - start_time.tv_usec);
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
#include "utils/symbolic.h"
|
||||
#include "mindspore/core/utils/parallel_node_check.h"
|
||||
#include "frontend/parallel/parallel_optimizer/opt_param_mgr.h"
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32))
|
||||
#ifdef WITH_BACKEND
|
||||
#include "ps/util.h"
|
||||
#include "ps/ps_context.h"
|
||||
#endif
|
||||
|
@ -2924,7 +2924,7 @@ CommInfo GetCommInfo() {
|
|||
device_num = UintToInt(world_rank_size);
|
||||
MS_LOG(INFO) << "Get device num from communication model, the device num is " << device_num;
|
||||
}
|
||||
#if ENABLE_D || ENABLE_GPU
|
||||
#if (!defined(_WIN32) && !defined(__APPLE__) && !(defined(ENABLE_TESTCASES) || defined(ENABLE_TEST)))
|
||||
if (ParallelContext::GetInstance()->device_num_is_set() && world_rank_size != device_num &&
|
||||
!ParallelContext::GetInstance()->hccl_test_available()) {
|
||||
// hccl_test_available is used when we compile graphs in real ascend card environment, but with hccl_test.
|
||||
|
@ -3284,7 +3284,7 @@ static void HandlGlobalNormScale(const FuncGraphPtr &root, const std::vector<Anf
|
|||
}
|
||||
|
||||
bool StepParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &optimizer) {
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
if (ps::PSContext::instance()->is_server() || ps::PSContext::instance()->is_scheduler()) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -15,14 +15,12 @@
|
|||
*/
|
||||
#ifndef MINDSPORE_CCSRC_INCLUDE_COMMON_UTILS_CALLBACKS_GE_H_
|
||||
#define MINDSPORE_CCSRC_INCLUDE_COMMON_UTILS_CALLBACKS_GE_H_
|
||||
|
||||
#ifdef ENABLE_D
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include "include/transform/graph_ir/types.h"
|
||||
#include "include/transform/graph_ir/util.h"
|
||||
#include "ir/tensor.h"
|
||||
#include "include/common/visible.h"
|
||||
|
||||
|
|
|
@ -50,6 +50,21 @@ using OperatorPtr = std::shared_ptr<ge::Operator>;
|
|||
using DfGraph = ge::Graph;
|
||||
using DfGraphPtr = std::shared_ptr<DfGraph>;
|
||||
using TensorMap = mindspore::HashMap<std::string, std::shared_ptr<MeTensor>>;
|
||||
using OptionMap = std::map<std::string, std::string>;
|
||||
using TensorOrderMap = std::map<std::string, std::shared_ptr<tensor::Tensor>>;
|
||||
|
||||
struct DfGraphWrapper {
|
||||
public:
|
||||
DfGraphWrapper(const std::string &name, const int &id, const DfGraphPtr &graph_ptr, const OptionMap &options);
|
||||
~DfGraphWrapper() {}
|
||||
|
||||
std::string name_;
|
||||
int id_;
|
||||
DfGraphPtr graph_ptr_;
|
||||
OptionMap options_ = {};
|
||||
};
|
||||
|
||||
using DfGraphWrapperPtr = std::shared_ptr<DfGraphWrapper>;
|
||||
|
||||
struct OutHandler {
|
||||
OperatorPtr op;
|
||||
|
@ -64,6 +79,20 @@ struct ControlEdge {
|
|||
OperatorPtr src_op;
|
||||
OperatorPtr dest_op;
|
||||
};
|
||||
|
||||
using SessionOptions = std::map<std::string, std::string>;
|
||||
|
||||
struct GraphRunnerOptions {
|
||||
std::string target{"default_graph_runner"};
|
||||
SessionOptions options;
|
||||
// if sess_ptr is nullptr, GraphRunner will create a new ge session
|
||||
std::shared_ptr<ge::Session> sess_ptr{nullptr};
|
||||
};
|
||||
|
||||
struct RunOptions {
|
||||
// graph's name
|
||||
std::string name;
|
||||
};
|
||||
} // namespace transform
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_TYPES_H_
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_UTILS_H_
|
||||
#define MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_UTILS_H_
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "transform/graph_ir/convert.h"
|
||||
#include "transform/graph_ir/graph_runner.h"
|
||||
#include "include/transform/graph_ir/types.h"
|
||||
#include "transform/graph_ir/op_adapter_base.h"
|
||||
#include "include/common/utils/config_manager.h"
|
||||
#include "include/common/visible.h"
|
||||
|
||||
namespace mindspore {
|
||||
const char BROADCAST_GRAPH_NAME[] = "broadcast_subgraph";
|
||||
namespace transform {
|
||||
using OpAdapterPtr = std::shared_ptr<transform::BaseOpAdapter>;
|
||||
using GraphRunnerPtr = std::shared_ptr<transform::GraphRunner>;
|
||||
using DfGraphConvertorPtr = std::shared_ptr<transform::DfGraphConvertor>;
|
||||
COMMON_EXPORT OpAdapterPtr FindAdapter(const std::string &op_name, bool train = false);
|
||||
COMMON_EXPORT OpAdapterPtr FindAdapter(AnfNodePtr node, bool train = false);
|
||||
|
||||
COMMON_EXPORT bool IsPartialSuccNode(const AnfNodePtr node);
|
||||
COMMON_EXPORT bool IsWhileNode(const AnfNodePtr &node);
|
||||
COMMON_EXPORT std::string GetCNodeTargetFuncName(const CNodePtr cnode);
|
||||
COMMON_EXPORT bool IsCaseNode(const CNodePtr node);
|
||||
COMMON_EXPORT bool IsPartialCNode(const AnfNodePtr node);
|
||||
|
||||
COMMON_EXPORT void EraseGeResource();
|
||||
COMMON_EXPORT void ClearGraphWrapper();
|
||||
COMMON_EXPORT void ClearGeSessionAndRunner();
|
||||
// convert_type
|
||||
COMMON_EXPORT std::vector<GeTensorPtr> ConvertInputTensors(const std::vector<MeTensorPtr> &me_tensors,
|
||||
const std::string &format);
|
||||
COMMON_EXPORT std::vector<MeTensorPtr> ConvertGeTensors(const std::vector<GeTensorPtr> &ge_tensors);
|
||||
COMMON_EXPORT GeDataType ConvertDataType(const MeDataType &type);
|
||||
|
||||
COMMON_EXPORT MeTensorPtr ConvertGeTensor(GeTensorPtr ge_tensor, const ShapeVector &request_dims);
|
||||
COMMON_EXPORT MeTensorPtr ConvertGeTensor(const GeTensorPtr &tensor);
|
||||
COMMON_EXPORT MeTensorPtr ConvertGeTensor(const GeTensorPtr &tensor, const TypeId &me_type);
|
||||
// df graph manager
|
||||
COMMON_EXPORT std::shared_ptr<transform::GraphRunner> GetGraphRunner();
|
||||
COMMON_EXPORT std::shared_ptr<ge::Session> GetGeSession();
|
||||
COMMON_EXPORT void SetGeSession(const std::shared_ptr<ge::Session> &sess_ptr);
|
||||
COMMON_EXPORT GraphRunnerPtr NewGraphRunner(const GraphRunnerOptions &options);
|
||||
COMMON_EXPORT void SetGraphRunner(const GraphRunnerPtr &runner);
|
||||
COMMON_EXPORT void ClearGraph();
|
||||
COMMON_EXPORT Status AddGraph(const std::string &name, const DfGraphPtr &graph, const OptionMap &options = {});
|
||||
COMMON_EXPORT void SetAnfGraph(const std::string &name, const AnfGraphPtr &anf_graph_ptr);
|
||||
COMMON_EXPORT DfGraphWrapperPtr GetGraphByName(const std::string &name);
|
||||
|
||||
COMMON_EXPORT FuncGraphPtr GetAnfGraph(uint32_t graph_id);
|
||||
|
||||
// convert
|
||||
COMMON_EXPORT DfGraphConvertorPtr NewConverter(const FuncGraphPtr &graph);
|
||||
|
||||
COMMON_EXPORT void SetTraining(DfGraphConvertorPtr converter, bool training);
|
||||
COMMON_EXPORT void BuildGraph(DfGraphConvertorPtr converter,
|
||||
const std::map<std::string, std::shared_ptr<tensor::Tensor>> &maps);
|
||||
COMMON_EXPORT void GenerateBroadcastGraph(DfGraphConvertorPtr converter, const TensorOrderMap &tensors);
|
||||
COMMON_EXPORT void GenerateCheckpointGraph(DfGraphConvertorPtr converter);
|
||||
COMMON_EXPORT int ErrCode(DfGraphConvertorPtr converter);
|
||||
COMMON_EXPORT void DrawComputeGraph(DfGraphConvertorPtr converter, const std::string &name);
|
||||
COMMON_EXPORT void DrawInitGraph(DfGraphConvertorPtr converter, const std::string &name);
|
||||
COMMON_EXPORT void DrawSaveCheckpointGraph(DfGraphConvertorPtr converter, const std::string &name);
|
||||
COMMON_EXPORT DfGraphPtr GetComputeGraph(DfGraphConvertorPtr converter);
|
||||
COMMON_EXPORT DfGraphPtr GetInitGraph(DfGraphConvertorPtr converter);
|
||||
COMMON_EXPORT DfGraphPtr GetSaveCheckpointGraph(DfGraphConvertorPtr converter);
|
||||
COMMON_EXPORT DfGraphPtr GetBroadcastGraph(DfGraphConvertorPtr converter);
|
||||
|
||||
// new session
|
||||
COMMON_EXPORT std::shared_ptr<ge::Session> NewSession(const SessionOptions &sess_options);
|
||||
|
||||
COMMON_EXPORT Status RunGraph(const std::shared_ptr<GraphRunner> &runner, const RunOptions &options,
|
||||
const std::vector<GeTensorPtr> &inputs, std::vector<GeTensorPtr> *outputs);
|
||||
|
||||
COMMON_EXPORT Status RunGraph(const std::shared_ptr<GraphRunner> &runner, const RunOptions &options,
|
||||
const std::vector<GeTensorPtr> &inputs, std::vector<MeTensorPtr> *outputs,
|
||||
const std::vector<TypeId> &me_types);
|
||||
COMMON_EXPORT void ClearOpAdapterMap();
|
||||
|
||||
COMMON_EXPORT transform::Status CompileDatasetGraph(const DatasetGraphParam ¶m,
|
||||
const std::string &phase = "dataset");
|
||||
} // namespace transform
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_UTILS_H_
|
|
@ -53,7 +53,7 @@
|
|||
#include "backend/graph_compiler/transform.h"
|
||||
#include "load_mindir/infer_mindir.h"
|
||||
#include "debug/data_dump/dump_json_parser.h"
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
#include "ps/parameter_server.h"
|
||||
#include "ps/scheduler.h"
|
||||
#include "ps/worker.h"
|
||||
|
@ -667,7 +667,7 @@ bool OptInlineAction(const ResourcePtr &resource) {
|
|||
bool GeOptimizeAction(const ResourcePtr &resource) { return OptimizeAction(resource, kGePasses); }
|
||||
|
||||
bool VmOptimizeAction(const ResourcePtr &resource) {
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
if (ps::PSContext::instance()->is_ps_mode()) {
|
||||
(void)kVmPasses.emplace_back(PassItem("server_communication_op_fusion", ps::Util::FuseServerCommOps));
|
||||
}
|
||||
|
@ -1093,7 +1093,7 @@ bool ExecuteAction(const ResourcePtr &resource) {
|
|||
return true;
|
||||
}
|
||||
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
bool StartPSWorkerAction(const ResourcePtr &) {
|
||||
ps::Worker::GetInstance().Run();
|
||||
return true;
|
||||
|
@ -1479,7 +1479,7 @@ std::vector<ActionItem> VmPipeline(const ResourcePtr &resource) {
|
|||
(void)actions.emplace_back(std::make_pair("validate", ValidateAction));
|
||||
}
|
||||
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
(void)actions.emplace_back(std::make_pair("distribtued_split", DistributedSplitAction));
|
||||
if (ps::PSContext::instance()->is_worker()) {
|
||||
if (distributed::cluster::ClusterContext::instance()->initialized()) {
|
||||
|
@ -1521,7 +1521,7 @@ std::vector<ActionItem> MindIRPipeline() {
|
|||
return actions;
|
||||
}
|
||||
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
std::vector<ActionItem> ServerPipeline(const ResourcePtr &resource) {
|
||||
if (resource->EnableCompileCache() && resource->func_graph() != nullptr) {
|
||||
return {std::make_pair("server", StartServerAction)};
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include "include/common/utils/utils.h"
|
||||
#include "frontend/parallel/step_parallel.h"
|
||||
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
#include "ps/ps_context.h"
|
||||
#include "ps/core/node.h"
|
||||
#include "distributed/cluster/cluster_context.h"
|
||||
|
@ -69,7 +69,7 @@ std::string GetCompileCacheDir() {
|
|||
}
|
||||
|
||||
std::string GetRole() {
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
const std::string &server_mode = ps::PSContext::instance()->server_mode();
|
||||
if ((server_mode == ps::kServerModeFL || server_mode == ps::kServerModeHybrid) &&
|
||||
ps::PSContext::instance()->is_server()) {
|
||||
|
|
|
@ -57,7 +57,7 @@
|
|||
#include "frontend/optimizer/irpass/updatestate_eliminate.h"
|
||||
#include "frontend/optimizer/irpass/expand_dump_flag.h"
|
||||
#include "frontend/optimizer/irpass/ge/batchnorm_transform.h"
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32))
|
||||
#ifdef WITH_BACKEND
|
||||
#include "ps/util.h"
|
||||
#include "ps/ps_context.h"
|
||||
#endif
|
||||
|
@ -246,7 +246,7 @@ namespace {
|
|||
bool ReAutoMonadWrapper(const FuncGraphPtr &root, const opt::OptimizerPtr &) { return ReAutoMonad(root); }
|
||||
|
||||
bool parallel_mode() {
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32))
|
||||
#ifdef WITH_BACKEND
|
||||
if (ps::PSContext::instance()->is_server() || ps::PSContext::instance()->is_scheduler()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -680,7 +680,7 @@ bool CommOpAddAttrs(const ResourcePtr &resource) {
|
|||
|
||||
bool AddCacheEmbeddingPass(const ResourcePtr &resource) {
|
||||
MS_EXCEPTION_IF_NULL(resource);
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32))
|
||||
#ifdef WITH_BACKEND
|
||||
if (ps::PSContext::instance()->is_ps_mode()) {
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
#include "ps/constants.h"
|
||||
#include "ps/util.h"
|
||||
#include "ps/worker.h"
|
||||
|
@ -82,10 +82,8 @@
|
|||
|
||||
#ifdef ENABLE_D
|
||||
#include "pipeline/jit/pipeline_ge.h"
|
||||
#include "include/transform/graph_ir/convert.h"
|
||||
#include "include/transform/graph_ir/df_graph_manager.h"
|
||||
#include "include/transform/graph_ir/op_adapter_map.h"
|
||||
#include "plugin/device/ascend/hal/device/profiling/profiling_manager.h"
|
||||
#include "transform/graph_ir/op_adapter_map.h"
|
||||
#include "include/transform/graph_ir/utils.h"
|
||||
#include "plugin/device/ascend/hal/device/distribute/ascend_collective.h"
|
||||
#endif
|
||||
|
||||
|
@ -107,14 +105,9 @@ using mindspore::abstract::AbstractTensor;
|
|||
using mindspore::abstract::AbstractTensorPtr;
|
||||
using mindspore::abstract::AbstractTuple;
|
||||
using mindspore::abstract::AbstractTuplePtr;
|
||||
|
||||
#ifdef ENABLE_D
|
||||
#ifndef ENABLE_SECURITY
|
||||
using mindspore::device::ascend::ProfilingManager;
|
||||
#endif
|
||||
using HcclCollectiveGroup = mindspore::device::ascend::collective::HcclCollectiveGroup;
|
||||
#endif
|
||||
|
||||
const char IR_TYPE_ANF[] = "anf_ir";
|
||||
const char IR_TYPE_ONNX[] = "onnx_ir";
|
||||
const char IR_TYPE_MINDIR[] = "mind_ir";
|
||||
|
@ -529,27 +522,26 @@ py::dict GraphExecutorPy::GetAllreduceFusion(const std::string &phase) {
|
|||
// Not support multi thread, not support nested call too.
|
||||
// Here using nested_called flg to avoid nested call.
|
||||
void GraphExecutorPy::DelNetRes(const py::set &id) {
|
||||
#ifdef ENABLE_D
|
||||
auto ms_context = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(ms_context);
|
||||
auto device_target = ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET);
|
||||
std::string backend = ms_context->backend_policy();
|
||||
if (backend == "ge") {
|
||||
FinalizeBackend();
|
||||
if (device_target == kAscendDevice) {
|
||||
if (backend == "ge") {
|
||||
FinalizeBackend();
|
||||
} else {
|
||||
ConfigManager::GetInstance().ResetIterNum();
|
||||
}
|
||||
} else {
|
||||
ConfigManager::GetInstance().ResetIterNum();
|
||||
}
|
||||
#else
|
||||
ConfigManager::GetInstance().ResetIterNum();
|
||||
#endif
|
||||
for (auto item : id) {
|
||||
DelOneNetRes(item);
|
||||
}
|
||||
#ifdef ENABLE_D
|
||||
if (backend == "ge" && !id.empty() && info_.size() == 0) {
|
||||
// because Ge only support one Session exist at the same time ,so we delete the old one
|
||||
transform::DfGraphManager::GetInstance().DeleteGraphRunner();
|
||||
transform::DfGraphManager::GetInstance().EraseAnfGraph();
|
||||
transform::DfGraphManager::GetInstance().DeleteGeSession();
|
||||
transform::EraseGeResource();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -754,10 +746,10 @@ bool IsPhaseLoadFromMindIR(const std::string &phase) {
|
|||
std::vector<ActionItem> GetPipeline(const ResourcePtr &resource, const std::string &phase, bool use_vm) {
|
||||
MS_EXCEPTION_IF_NULL(resource);
|
||||
bool is_air = IsPhaseExportAir(phase);
|
||||
|
||||
std::string backend = MsContext::GetInstance()->backend_policy();
|
||||
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
auto ms_context = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(ms_context);
|
||||
std::string backend = ms_context->backend_policy();
|
||||
#ifdef WITH_BACKEND
|
||||
if (distributed::cluster::ClusterContext::instance()->initialized()) {
|
||||
auto node = distributed::cluster::ClusterContext::instance()->node();
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
|
@ -1107,7 +1099,6 @@ void Pipeline::Run() {
|
|||
MS_LOG(INFO) << "Pipeline run";
|
||||
MS_EXCEPTION_IF_NULL(resource_);
|
||||
FuncGraphPtr user_graph = nullptr;
|
||||
|
||||
WITH(MsProfile::GetProfile())[&user_graph, this]() {
|
||||
size_t i = 0;
|
||||
for (auto &action : actions_) {
|
||||
|
@ -1238,6 +1229,7 @@ py::object GraphExecutorPy::Run(const py::tuple &args, const py::object &phase_o
|
|||
}
|
||||
auto phase = py::cast<std::string>(phase_obj);
|
||||
auto ms_context = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(ms_context);
|
||||
#ifdef ENABLE_D
|
||||
if (ms_context->backend_policy() == "ge") {
|
||||
return ExecDFGraph(info_, args, phase);
|
||||
|
@ -1380,7 +1372,7 @@ bool InitExecDataset(const std::string &queue_name, int64_t iter_num, int64_t ba
|
|||
bool InitExecDatasetVm(const std::string &queue_name, int64_t size, int64_t batch_size,
|
||||
const std::vector<TypePtr> &types, const std::vector<std::vector<int64_t>> &shapes,
|
||||
const std::vector<int64_t> &input_indexes, bool need_run) {
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
if ((ps::PSContext::instance()->is_ps_mode()) && (!ps::PSContext::instance()->is_worker())) {
|
||||
return true;
|
||||
}
|
||||
|
@ -1449,7 +1441,7 @@ bool InitExecDatasetVm(const std::string &queue_name, int64_t size, int64_t batc
|
|||
auto runner = convert_fn(segment, "");
|
||||
ConfigManager::GetInstance().set_iter_num(queue_name, size);
|
||||
// PS cache does not support loop sink.
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
if (ps::PSContext::instance()->is_worker() && ps::PsDataPrefetch::GetInstance().cache_enable()) {
|
||||
ps::PsDataPrefetch::GetInstance().CreateDataChannel(queue_name, LongToSize(size));
|
||||
ConfigManager::GetInstance().set_iter_num(queue_name, 1);
|
||||
|
@ -1530,9 +1522,9 @@ void InitHccl() {
|
|||
}
|
||||
|
||||
void FinalizeHccl() {
|
||||
#ifdef ENABLE_D
|
||||
auto ms_context = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(ms_context);
|
||||
#ifdef ENABLE_D
|
||||
auto backend = ms_context->backend_policy();
|
||||
if (backend == "ge") {
|
||||
(void)FinalizeBackend();
|
||||
|
@ -1641,7 +1633,7 @@ void ClearResAtexit() {
|
|||
device::DeviceContextManager::GetInstance().WaitTaskFinishOnDevice();
|
||||
|
||||
RecordExitStatus();
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
if (distributed::cluster::ClusterContext::instance()->initialized()) {
|
||||
(void)distributed::cluster::ClusterContext::instance()->Finalize(UINT32_MAX);
|
||||
} else if (ps::PSContext::instance()->is_ps_mode() && ps::PSContext::instance()->is_worker()) {
|
||||
|
@ -1702,8 +1694,8 @@ void ClearResAtexit() {
|
|||
auto ms_context = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(ms_context);
|
||||
if (ms_context->backend_policy() == "ge") {
|
||||
transform::DfGraphManager::GetInstance().ClearGraph();
|
||||
transform::OpAdapterMap::get().clear();
|
||||
transform::ClearGraphWrapper();
|
||||
transform::ClearOpAdapterMap();
|
||||
} else {
|
||||
MS_LOG(INFO) << "Start clear ConfigManager...";
|
||||
ConfigManager::GetInstance().ResetIterNum();
|
||||
|
|
|
@ -24,10 +24,7 @@
|
|||
#include "utils/hash_map.h"
|
||||
#include "include/common/debug/anf_ir_dump.h"
|
||||
#include "ir/tensor.h"
|
||||
#include "include/transform/graph_ir/convert.h"
|
||||
#include "include/transform/graph_ir/df_graph_manager.h"
|
||||
#include "include/transform/graph_ir/graph_builder.h"
|
||||
#include "include/transform/graph_ir/graph_runner.h"
|
||||
#include "include/transform/graph_ir/utils.h"
|
||||
#include "include/common/debug/draw.h"
|
||||
#include "abstract/abstract_value.h"
|
||||
#include "include/common/utils/convert_utils_py.h"
|
||||
|
@ -42,19 +39,16 @@ using mindspore::abstract::AbstractScalar;
|
|||
using mindspore::abstract::AbstractTensor;
|
||||
using mindspore::abstract::AbstractTuple;
|
||||
using mindspore::abstract::AbstractTuplePtr;
|
||||
using mindspore::transform::DfGraphConvertor;
|
||||
using mindspore::transform::DfGraphManager;
|
||||
using mindspore::transform::GeTensorPtr;
|
||||
using mindspore::transform::MeTensorPtr;
|
||||
using mindspore::transform::Status;
|
||||
using mindspore::transform::TransformUtil;
|
||||
|
||||
void DoExecNonInputGraph(const std::string &phase) {
|
||||
std::vector<GeTensorPtr> ge_tensors;
|
||||
std::vector<GeTensorPtr> ge_outputs;
|
||||
transform::RunOptions run_options;
|
||||
run_options.name = phase;
|
||||
auto graph_runner = DfGraphManager::GetInstance().GetGraphRunner();
|
||||
auto graph_runner = transform::GetGraphRunner();
|
||||
if (graph_runner == nullptr) {
|
||||
MS_LOG(ERROR) << "Can not found GraphRunner";
|
||||
return;
|
||||
|
@ -63,7 +57,7 @@ void DoExecNonInputGraph(const std::string &phase) {
|
|||
{
|
||||
// Release GIL before calling into (potentially long-running) C++ code
|
||||
py::gil_scoped_release release;
|
||||
Status ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs);
|
||||
Status ret = transform::RunGraph(graph_runner, run_options, ge_tensors, &ge_outputs);
|
||||
if (ret != Status::SUCCESS) {
|
||||
MS_LOG(ERROR) << "Exec graph:" << run_options.name << " failed";
|
||||
return;
|
||||
|
@ -76,7 +70,7 @@ void SetGeOption(const std::map<std::string, std::string> &options) {
|
|||
}
|
||||
|
||||
Status CreateSessionAndGraphRunner(bool is_training = true) {
|
||||
std::shared_ptr<ge::Session> sess = DfGraphManager::GetInstance().GetGeSession();
|
||||
std::shared_ptr<ge::Session> sess = transform::GetGeSession();
|
||||
if (sess == nullptr) {
|
||||
transform::SessionOptions options;
|
||||
if (is_training) {
|
||||
|
@ -89,14 +83,14 @@ Status CreateSessionAndGraphRunner(bool is_training = true) {
|
|||
}
|
||||
|
||||
options["ge.enablePrintOpPass"] = "0";
|
||||
sess = transform::GraphRunner::NewSession(options);
|
||||
DfGraphManager::GetInstance().SetGeSession(sess);
|
||||
sess = transform::NewSession(options);
|
||||
transform::SetGeSession(sess);
|
||||
}
|
||||
|
||||
transform::GraphRunnerOptions options;
|
||||
options.sess_ptr = sess;
|
||||
auto graph_runner = std::make_shared<transform::GraphRunner>(options);
|
||||
DfGraphManager::GetInstance().SetGraphRunner(graph_runner);
|
||||
auto graph_runner = transform::NewGraphRunner(options);
|
||||
transform::SetGraphRunner(graph_runner);
|
||||
return Status::SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -104,9 +98,8 @@ bool InitExecDatasetGe(const std::string &queue_name, int64_t size, int64_t batc
|
|||
const std::vector<TypePtr> &types, const std::vector<std::vector<int64_t>> &shapes,
|
||||
const std::vector<int64_t> &input_indexes, const std::string &phase) {
|
||||
std::vector<int64_t> ge_types;
|
||||
(void)std::transform(types.begin(), types.end(), std::back_inserter(ge_types), [](const TypePtr &i) -> int64_t {
|
||||
return transform::TransformUtil::ConvertDataType(i->type_id());
|
||||
});
|
||||
(void)std::transform(types.begin(), types.end(), std::back_inserter(ge_types),
|
||||
[](const TypePtr &i) -> int64_t { return transform::ConvertDataType(i->type_id()); });
|
||||
|
||||
ConfigManager::GetInstance().set_dataset_mode(DatasetMode::DS_SINK_MODE);
|
||||
ConfigManager::GetInstance().set_iter_num(queue_name, size);
|
||||
|
@ -115,7 +108,7 @@ bool InitExecDatasetGe(const std::string &queue_name, int64_t size, int64_t batc
|
|||
DatasetGraphParam param(queue_name, size, batch_size, ge_types, shapes, input_indexes);
|
||||
ConfigManager::GetInstance().set_dataset_param(param);
|
||||
|
||||
if (transform::BuildDatasetGraph(param, phase) != transform::SUCCESS) {
|
||||
if (transform::CompileDatasetGraph(param, phase) != transform::SUCCESS) {
|
||||
MS_LOG(ERROR) << "Build dateset graph failed.";
|
||||
return false;
|
||||
}
|
||||
|
@ -174,19 +167,19 @@ void ConvertObjectToTensors(const py::dict &dict, TensorOrderMap *const tensors)
|
|||
bool AddDFGraph(const std::map<std::string, ExecutorInfoPtr> &info, const py::dict &init_params,
|
||||
const std::string &phase, const py::object &broadcast_params) {
|
||||
FuncGraphPtr anf_graph = info.at(phase)->func_graph;
|
||||
DfGraphConvertor converter(anf_graph);
|
||||
auto converter = transform::NewConverter(anf_graph);
|
||||
|
||||
size_t pos = phase.find('.');
|
||||
std::string net_id = ((pos == std::string::npos || pos == phase.size() - 1) ? phase : phase.substr(pos + 1));
|
||||
std::string phase_prefix = phase.substr(0, pos);
|
||||
if (phase_prefix == "export") {
|
||||
MS_LOG(INFO) << "Set DfGraphConvertor training : false";
|
||||
converter.set_training(false);
|
||||
transform::SetTraining(converter, false);
|
||||
}
|
||||
|
||||
TensorOrderMap init_tensors{};
|
||||
ConvertObjectToTensors(init_params, &init_tensors);
|
||||
(void)converter.ConvertAllNode().InitParam(init_tensors).BuildGraph();
|
||||
transform::BuildGraph(converter, init_tensors);
|
||||
|
||||
if (!broadcast_params.is_none()) {
|
||||
if (!py::isinstance<py::dict>(broadcast_params)) {
|
||||
|
@ -195,41 +188,45 @@ bool AddDFGraph(const std::map<std::string, ExecutorInfoPtr> &info, const py::di
|
|||
}
|
||||
py::dict broadcast = broadcast_params.cast<py::dict>();
|
||||
if (broadcast.empty()) {
|
||||
(void)converter.GenerateBroadcastGraph(init_tensors);
|
||||
transform::GenerateBroadcastGraph(converter, init_tensors);
|
||||
} else {
|
||||
TensorOrderMap broadcast_tensors{};
|
||||
ConvertObjectToTensors(broadcast, &broadcast_tensors);
|
||||
(void)converter.GenerateBroadcastGraph(broadcast_tensors);
|
||||
transform::GenerateBroadcastGraph(converter, broadcast_tensors);
|
||||
}
|
||||
MS_LOG(INFO) << "Generate broadcast graph with params and broadcast_empty is " << broadcast.empty();
|
||||
}
|
||||
|
||||
(void)converter.GenerateCheckpointGraph();
|
||||
if (converter.ErrCode() != 0) {
|
||||
DfGraphManager::GetInstance().ClearGraph();
|
||||
MS_LOG(ERROR) << "Convert df graph failed, err:" << converter.ErrCode();
|
||||
transform::GenerateCheckpointGraph(converter);
|
||||
auto err_code = transform::ErrCode(converter);
|
||||
if (err_code != 0) {
|
||||
transform::ClearGraph();
|
||||
MS_LOG(ERROR) << "Convert df graph failed, err:" << err_code;
|
||||
return false;
|
||||
}
|
||||
#ifdef ENABLE_DUMP_IR
|
||||
if (MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_GRAPHS_FLAG)) {
|
||||
converter.DrawComputeGraph(GetSaveGraphsPathName("ge_graph.dot")); // for debug
|
||||
converter.DrawInitGraph(GetSaveGraphsPathName("init_graph.dot")); // for debug
|
||||
converter.DrawSaveCheckpointGraph(GetSaveGraphsPathName("save_checkpoint_graph.dot")); // for debug
|
||||
// for debug
|
||||
transform::DrawComputeGraph(converter, GetSaveGraphsPathName("ge_graph.dot"));
|
||||
// for debug
|
||||
transform::DrawInitGraph(converter, GetSaveGraphsPathName("init_graph.dot"));
|
||||
// for debug
|
||||
transform::DrawSaveCheckpointGraph(converter, GetSaveGraphsPathName("save_checkpoint_graph.dot"));
|
||||
}
|
||||
#endif
|
||||
std::string init_graph = "init_subgraph." + net_id;
|
||||
std::string checkpoint_name = "save." + net_id;
|
||||
if (phase.find("train") != std::string::npos) {
|
||||
(void)DfGraphManager::GetInstance().AddGraph(phase, converter.GetComputeGraph(), {{"ge.exec.variable_acc", "1"}});
|
||||
(void)transform::AddGraph(phase, transform::GetComputeGraph(converter), {{"ge.exec.variable_acc", "1"}});
|
||||
} else {
|
||||
(void)DfGraphManager::GetInstance().AddGraph(phase, converter.GetComputeGraph());
|
||||
(void)transform::AddGraph(phase, transform::GetComputeGraph(converter));
|
||||
}
|
||||
(void)DfGraphManager::GetInstance().AddGraph(init_graph, converter.GetInitGraph());
|
||||
(void)DfGraphManager::GetInstance().AddGraph(BROADCAST_GRAPH_NAME, converter.GetBroadcastGraph());
|
||||
|
||||
Status ret = DfGraphManager::GetInstance().AddGraph(checkpoint_name, converter.GetSaveCheckpointGraph());
|
||||
(void)transform::AddGraph(init_graph, transform::GetInitGraph(converter));
|
||||
(void)transform::AddGraph(BROADCAST_GRAPH_NAME, transform::GetBroadcastGraph(converter));
|
||||
|
||||
Status ret = transform::AddGraph(checkpoint_name, transform::GetSaveCheckpointGraph(converter));
|
||||
if (ret == Status::SUCCESS) {
|
||||
DfGraphManager::GetInstance().SetAnfGraph(checkpoint_name, anf_graph);
|
||||
transform::SetAnfGraph(checkpoint_name, anf_graph);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -281,7 +278,7 @@ void RunGEInitGraph(const py::dict &init_params, const std::string &phase) {
|
|||
(void)std::transform(inputs_with_name.begin(), inputs_with_name.end(), std::back_inserter(inputs),
|
||||
[](const std::pair<std::string, tensor::TensorPtr> &item) { return item.second; });
|
||||
|
||||
std::vector<GeTensorPtr> ge_tensors = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW);
|
||||
std::vector<GeTensorPtr> ge_tensors = transform::ConvertInputTensors(inputs, kOpFormat_NCHW);
|
||||
if (ge_tensors.size() != inputs.size()) {
|
||||
MS_LOG(ERROR) << "Args convert to ge tensor error.";
|
||||
return;
|
||||
|
@ -292,18 +289,18 @@ void RunGEInitGraph(const py::dict &init_params, const std::string &phase) {
|
|||
transform::RunOptions run_options;
|
||||
|
||||
run_options.name = phase;
|
||||
if (DfGraphManager::GetInstance().GetGraphByName(phase) == nullptr) {
|
||||
if (transform::GetGraphByName(phase) == nullptr) {
|
||||
MS_LOG(WARNING) << "Can not find " << phase << " sub graph, don't need data init subgraph in INFER mode.";
|
||||
return;
|
||||
}
|
||||
auto graph_runner = DfGraphManager::GetInstance().GetGraphRunner();
|
||||
auto graph_runner = transform::GetGraphRunner();
|
||||
if (graph_runner == nullptr) {
|
||||
MS_LOG(EXCEPTION) << "Can not found GraphRunner.";
|
||||
}
|
||||
{
|
||||
// Release GIL before calling into (potentially long-running) C++ code
|
||||
py::gil_scoped_release release;
|
||||
Status ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs);
|
||||
Status ret = transform::RunGraph(graph_runner, run_options, ge_tensors, &ge_outputs);
|
||||
if (ret != Status::SUCCESS) {
|
||||
MS_LOG(EXCEPTION) << "Exec " << phase << " graph failed.";
|
||||
}
|
||||
|
@ -311,9 +308,9 @@ void RunGEInitGraph(const py::dict &init_params, const std::string &phase) {
|
|||
MS_LOG(INFO) << "Exec " << phase << " graph success.";
|
||||
|
||||
if ((ConfigManager::GetInstance().parallel_strategy() == ParallelStrategy::DISTRIBUTION) &&
|
||||
(DfGraphManager::GetInstance().GetGraphByName(BROADCAST_GRAPH_NAME) != nullptr)) {
|
||||
(transform::GetGraphByName(BROADCAST_GRAPH_NAME) != nullptr)) {
|
||||
run_options.name = BROADCAST_GRAPH_NAME;
|
||||
ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs);
|
||||
ret = transform::RunGraph(graph_runner, run_options, ge_tensors, &ge_outputs);
|
||||
if (ret != Status::SUCCESS) {
|
||||
MS_LOG(EXCEPTION) << "Exec BROADCAST_GRAPH_NAME failed.";
|
||||
}
|
||||
|
@ -423,14 +420,13 @@ void GetMeRetDataType(const AbstractBasePtr &cnode_data, std::vector<TypeId> *me
|
|||
|
||||
std::shared_ptr<py::object> DoExecGraphAsync(const FuncGraphPtr &graph, const std::vector<MeTensorPtr> &inputs,
|
||||
const std::string &phase) {
|
||||
std::vector<GeTensorPtr> ge_tensors = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW);
|
||||
std::vector<GeTensorPtr> ge_tensors = transform::ConvertInputTensors(inputs, kOpFormat_NCHW);
|
||||
if (ge_tensors.size() != inputs.size()) {
|
||||
MS_LOG(EXCEPTION) << "Convert me args to ge tensor error.";
|
||||
}
|
||||
|
||||
transform::RunOptions run_options;
|
||||
run_options.name = phase;
|
||||
auto graph_runner = DfGraphManager::GetInstance().GetGraphRunner();
|
||||
auto graph_runner = transform::GetGraphRunner();
|
||||
if (graph_runner == nullptr) {
|
||||
MS_LOG(EXCEPTION) << "Can not found GraphRunner.";
|
||||
}
|
||||
|
@ -448,7 +444,7 @@ std::shared_ptr<py::object> DoExecGraphAsync(const FuncGraphPtr &graph, const st
|
|||
py::gil_scoped_release release;
|
||||
MS_LOG(DEBUG) << "Run graph begin, inputs size is: " << inputs.size();
|
||||
try {
|
||||
Status ret = graph_runner->RunGraph(run_options, ge_tensors, &me_outputs, me_types);
|
||||
Status ret = transform::RunGraph(graph_runner, run_options, ge_tensors, &me_outputs, me_types);
|
||||
MS_LOG(DEBUG) << "Run graph finish, outputs size is: " << me_outputs.size();
|
||||
if (ret != Status::SUCCESS) {
|
||||
MS_LOG(ERROR) << "Exec graph failed";
|
||||
|
@ -543,7 +539,7 @@ py::object ExecDFGraph(const std::map<std::string, ExecutorInfoPtr> &info, const
|
|||
|
||||
void ExportDFGraph(const std::string &file_name, const std::string &phase) {
|
||||
MS_LOG(DEBUG) << "Export graph begin.";
|
||||
transform::DfGraphWrapperPtr wrap_ptr = DfGraphManager::GetInstance().GetGraphByName(phase);
|
||||
transform::DfGraphWrapperPtr wrap_ptr = transform::GetGraphByName(phase);
|
||||
if (wrap_ptr == nullptr) {
|
||||
MS_LOG(ERROR) << "Get graph form DfGraphManager failed!";
|
||||
return;
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "include/common/utils/parallel_context.h"
|
||||
#include "frontend/parallel/pipeline_transformer/pipeline_transformer.h"
|
||||
#include "frontend/parallel/step_parallel.h"
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
#include "ps/util.h"
|
||||
#include "ps/ps_context.h"
|
||||
#endif
|
||||
|
@ -529,7 +529,7 @@ void SetStrategyForShard(const FuncGraphPtr &root, const std::vector<AnfNodePtr>
|
|||
|
||||
// Only auto_parallel and semi_auto_parallel support PipelineSplit
|
||||
bool PipelineSplit(const ResourcePtr &res) {
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
if (ps::PSContext::instance()->is_server() || ps::PSContext::instance()->is_scheduler()) {
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,6 @@ namespace pynative {
|
|||
namespace py = pybind11;
|
||||
|
||||
enum RunOpArgsEnum { PY_PRIM = 0, PY_NAME, PY_INPUTS, PY_ARGS_NUM };
|
||||
|
||||
struct OpExecInfo {
|
||||
bool is_nop_prim = false;
|
||||
bool is_dynamic_shape = false;
|
||||
|
@ -56,9 +55,7 @@ struct OpExecInfo {
|
|||
// Tensor input and with its value
|
||||
std::vector<std::pair<size_t, ValuePtr>> index_with_value;
|
||||
std::vector<tensor::TensorPtr> input_tensors;
|
||||
#ifdef ENABLE_D
|
||||
py::dict op_attrs;
|
||||
#endif
|
||||
std::vector<int64_t> inputs_mask;
|
||||
bool lazy_build = false;
|
||||
};
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
#include <string>
|
||||
#include "backend/common/session/anf_runtime_algorithm.h"
|
||||
#include "include/common/utils/anfalgo.h"
|
||||
#include "include/transform/graph_ir/util.h"
|
||||
#include "abstract/utils.h"
|
||||
#include "transform/graph_ir/transform_util.h"
|
||||
#include "runtime/device/memory_manager.h"
|
||||
|
||||
namespace mindspore::hccl {
|
||||
|
@ -60,7 +61,7 @@ void AllToAllvCalcParam::CalcOpParam() {
|
|||
std::vector<size_t> output_real_mem_size(output_num);
|
||||
for (size_t i = 0; i < input_num; ++i) {
|
||||
auto ms_shape = AnfAlgo::GetInputDeviceShape(cnode, i);
|
||||
auto type_size = transform::TransformUtil::GetDataTypeSize(AnfAlgo::GetInputDeviceDataType(cnode, i));
|
||||
auto type_size = abstract::TypeIdSize(AnfAlgo::GetInputDeviceDataType(cnode, i));
|
||||
if (type_size == 0) {
|
||||
MS_LOG(EXCEPTION) << "Invalid type_size 0 of node: " << cnode->fullname_with_scope();
|
||||
}
|
||||
|
@ -71,7 +72,7 @@ void AllToAllvCalcParam::CalcOpParam() {
|
|||
}
|
||||
for (size_t i = 0; i < output_num; ++i) {
|
||||
auto ms_shape = AnfAlgo::GetOutputDeviceShape(cnode, i);
|
||||
auto type_size = transform::TransformUtil::GetDataTypeSize(AnfAlgo::GetOutputDeviceDataType(cnode, i));
|
||||
auto type_size = abstract::TypeIdSize(AnfAlgo::GetOutputDeviceDataType(cnode, i));
|
||||
if (type_size == 0) {
|
||||
MS_LOG(EXCEPTION) << "Invalid type_size 0 of node: " << cnode->fullname_with_scope();
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include "include/common/utils/anfalgo.h"
|
||||
#include "utils/log_adapter.h"
|
||||
#include "mindspore/core/ops/core_ops.h"
|
||||
#include "include/transform/graph_ir/util.h"
|
||||
#include "include/transform/graph_ir/utils.h"
|
||||
#include "plugin/device/ascend/hal/hccl_adapter/all_to_all_v_calc_param.h"
|
||||
|
||||
namespace mindspore::hccl {
|
||||
|
@ -180,18 +180,16 @@ std::tuple<ge::NodePtr, ge::ComputeGraphPtr> GenerateStubGeNode(const AnfNodePtr
|
|||
auto ms_shape = AnfAlgo::GetInputDeviceShape(cnode, i);
|
||||
std::transform(ms_shape.begin(), ms_shape.end(), std::back_inserter(ge_shape),
|
||||
[](size_t in) { return static_cast<int64_t>(in); });
|
||||
op_desc->AddInputDesc(
|
||||
ge::GeTensorDesc(ge::GeShape(ge_shape), ge::Format::FORMAT_NCHW,
|
||||
transform::TransformUtil::ConvertDataType(AnfAlgo::GetInputDeviceDataType(cnode, i))));
|
||||
op_desc->AddInputDesc(ge::GeTensorDesc(ge::GeShape(ge_shape), ge::Format::FORMAT_NCHW,
|
||||
transform::ConvertDataType(AnfAlgo::GetInputDeviceDataType(cnode, i))));
|
||||
}
|
||||
for (size_t i = 0; i < output_num; ++i) {
|
||||
std::vector<int64_t> ge_shape;
|
||||
auto ms_shape = AnfAlgo::GetOutputDeviceShape(cnode, i);
|
||||
std::transform(ms_shape.begin(), ms_shape.end(), std::back_inserter(ge_shape),
|
||||
[](size_t in) { return static_cast<int64_t>(in); });
|
||||
op_desc->AddOutputDesc(
|
||||
ge::GeTensorDesc(ge::GeShape(ge_shape), ge::Format::FORMAT_NCHW,
|
||||
transform::TransformUtil::ConvertDataType(AnfAlgo::GetOutputDeviceDataType(cnode, i))));
|
||||
op_desc->AddOutputDesc(ge::GeTensorDesc(ge::GeShape(ge_shape), ge::Format::FORMAT_NCHW,
|
||||
transform::ConvertDataType(AnfAlgo::GetOutputDeviceDataType(cnode, i))));
|
||||
}
|
||||
|
||||
// set node data type
|
||||
|
|
|
@ -31,8 +31,8 @@
|
|||
#include "common/util/error_manager/error_manager.h"
|
||||
#endif
|
||||
#ifdef ENABLE_D
|
||||
#include "include/transform/graph_ir/df_graph_manager.h"
|
||||
#include "debug/data_dump/dump_json_parser.h"
|
||||
#include "include/transform/graph_ir/utils.h"
|
||||
#endif
|
||||
#include "profiler/device/profiling.h"
|
||||
|
||||
|
@ -45,11 +45,9 @@ namespace {
|
|||
constexpr auto kMindsporeDumpConfig = "MINDSPORE_DUMP_CONFIG";
|
||||
const std::vector<std::string> kGeDumpMode = {"all", "input", "output"};
|
||||
} // namespace
|
||||
using mindspore::transform::DfGraphManager;
|
||||
#endif
|
||||
|
||||
constexpr auto kUnknowErrorString = "Unknown error occurred";
|
||||
|
||||
#ifndef NO_DLIB
|
||||
// Open tdt dataset
|
||||
bool OpenTsd(const std::shared_ptr<MsContext> &ms_context_ptr) {
|
||||
|
@ -367,8 +365,7 @@ bool FinalizeGe(const std::shared_ptr<MsContext> &ms_context_ptr, bool force) {
|
|||
if (force || ms_context_ptr->get_param<uint32_t>(MS_CTX_GE_REF) == 0) {
|
||||
ms_context_ptr->set_param<uint32_t>(MS_CTX_GE_REF, 0);
|
||||
try {
|
||||
DfGraphManager::GetInstance().DeleteGraphRunner();
|
||||
DfGraphManager::GetInstance().DeleteGeSession();
|
||||
transform::ClearGeSessionAndRunner();
|
||||
} catch (const std::exception &e) {
|
||||
MS_LOG(ERROR) << "Error occurred when deleting GE graph runner and session fail. Error: " << e.what();
|
||||
} catch (...) {
|
||||
|
|
|
@ -51,7 +51,7 @@
|
|||
#include "include/common/debug/common.h"
|
||||
#include "distributed/recovery/recovery_context.h"
|
||||
#include "distributed/collective/collective_manager.h"
|
||||
#if ((defined ENABLE_CPU) && (!defined _WIN32) && (!defined _WIN64) && !defined(__APPLE__))
|
||||
#ifdef WITH_BACKEND
|
||||
#include "distributed/cluster/cluster_context.h"
|
||||
#else
|
||||
#include "distributed/cluster/dummy_cluster_context.h"
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "include/transform/graph_ir/convert.h"
|
||||
#include "transform/graph_ir/convert.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <algorithm>
|
||||
|
@ -30,16 +30,15 @@
|
|||
#include "utils/symbolic.h"
|
||||
#include "include/common/utils/config_manager.h"
|
||||
#include "include/common/utils/convert_utils.h"
|
||||
#include "include/transform/graph_ir/utils.h"
|
||||
#include "utils/ms_context.h"
|
||||
#include "utils/check_convert_utils.h"
|
||||
#include "include/transform/graph_ir/op_adapter_map.h"
|
||||
#include "transform/graph_ir/op_adapter_map.h"
|
||||
#include "ops/state_ops.h"
|
||||
#include "ops/array_ops.h"
|
||||
#include "ops/elewise_calculation_ops.h"
|
||||
#include "ops/math_ops.h"
|
||||
#ifdef ENABLE_D
|
||||
#include "ops/save_ops.h"
|
||||
#endif
|
||||
#include "transform/graph_ir/op_adapter.h"
|
||||
#include "transform/graph_ir/op_adapter_desc.h"
|
||||
|
||||
|
@ -97,101 +96,6 @@ std::vector<AnfNodePtr> GetOrderedCNodes(const FuncGraphPtr fg, const AnfNodePtr
|
|||
} // namespace
|
||||
|
||||
// ---------------implement of DfGraphConvertor-------------
|
||||
bool IsCaseNode(const CNodePtr node) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!node->inputs().empty() && node->input(0)->isa<CNode>() &&
|
||||
GetCNodeFuncName(node->input(0)->cast<CNodePtr>()) == "switch_layer") {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool IsPartialCNode(const AnfNodePtr node) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!node->isa<CNode>()) {
|
||||
return false;
|
||||
}
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
if (GetCNodeFuncName(cnode) == prim::kPrimPartial->name()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool IsPartialSuccNode(const AnfNodePtr node) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!node->isa<CNode>()) {
|
||||
return false;
|
||||
}
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
if (!cnode->inputs().empty()) {
|
||||
for (size_t i = 0; i < cnode->inputs().size(); i++) {
|
||||
if (IsPartialCNode(cnode->input(i))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool IsWhileNode(const AnfNodePtr &node) {
|
||||
if (!node->isa<CNode>()) {
|
||||
return false;
|
||||
}
|
||||
if (!IsPartialSuccNode(node)) {
|
||||
return false;
|
||||
}
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
if (!IsPartialCNode(cnode->input(0))) {
|
||||
return false;
|
||||
}
|
||||
auto partial_node = cnode->input(0);
|
||||
MS_EXCEPTION_IF_NULL(partial_node);
|
||||
|
||||
auto c_partial_node = partial_node->cast<CNodePtr>();
|
||||
MS_EXCEPTION_IF_NULL(c_partial_node);
|
||||
|
||||
auto graph_node_input = c_partial_node->input(1);
|
||||
MS_EXCEPTION_IF_NULL(graph_node_input);
|
||||
auto graph_node = graph_node_input->cast<ValueNodePtr>();
|
||||
MS_EXCEPTION_IF_NULL(graph_node);
|
||||
auto graph_node_value = graph_node->value();
|
||||
MS_EXCEPTION_IF_NULL(graph_node_value);
|
||||
auto cond_graph = graph_node_value->cast<FuncGraphPtr>();
|
||||
MS_EXCEPTION_IF_NULL(cond_graph);
|
||||
if (!cond_graph->recursive()) {
|
||||
return false;
|
||||
}
|
||||
const auto &cond_set = cond_graph->nodes();
|
||||
for (auto beg = cond_set.begin(); beg != cond_set.end(); beg++) {
|
||||
if (!((*beg)->isa<CNode>())) {
|
||||
continue;
|
||||
}
|
||||
auto c_beg = (*beg)->cast<CNodePtr>();
|
||||
if (IsPartialSuccNode(c_beg) && c_beg->inputs().size() == kSwitchInputSize &&
|
||||
IsPartialCNode(c_beg->input(kSwitchBodyIndex)) && IsPartialCNode(c_beg->input(kSwitchAfterIndex)) &&
|
||||
GetCNodeFuncName(c_beg) == prim::kPrimSwitch->name()) {
|
||||
auto func_graph = node->func_graph();
|
||||
MS_LOG(DEBUG) << "there is while node: " << node->ToString() << " in graph: " << func_graph->ToString();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string GetCNodeTargetFuncName(const CNodePtr cnode) {
|
||||
if (IsCaseNode(cnode)) {
|
||||
return string(kNameCase);
|
||||
}
|
||||
if (IsWhileNode(cnode)) {
|
||||
return string(kNameWhile);
|
||||
}
|
||||
auto name = GetCNodeFuncName(cnode);
|
||||
if (name == "switch_layer") {
|
||||
name = "";
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
bool IsDynamicShapeNode(const AnfNodePtr node) {
|
||||
auto shape = node->Shape();
|
||||
|
@ -207,32 +111,6 @@ bool IsDynamicShapeNode(const AnfNodePtr node) {
|
|||
return false;
|
||||
}
|
||||
|
||||
OpAdapterPtr DfGraphConvertor::FindAdapter(const AnfNodePtr node, bool train) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (node->isa<CNode>()) {
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
|
||||
std::string name = kNameCustomOp;
|
||||
if (!IsCustomCNode(cnode)) {
|
||||
name = GetCNodeTargetFuncName(cnode);
|
||||
}
|
||||
|
||||
auto it_adpt = OpAdapterMap::get().find(name);
|
||||
if (it_adpt != OpAdapterMap::get().end()) {
|
||||
return it_adpt->second->Get(train);
|
||||
}
|
||||
MS_LOG(EXCEPTION) << "Can't find OpAdapter for " << name;
|
||||
}
|
||||
|
||||
if (node->isa<ValueNode>()) {
|
||||
return OpAdapterMap::get()[kNameConst]->Get(train);
|
||||
}
|
||||
if (node->isa<Parameter>()) {
|
||||
return OpAdapterMap::get()[kNameParam]->Get(train);
|
||||
}
|
||||
return OpAdapterPtr(nullptr);
|
||||
}
|
||||
|
||||
void DfGraphConvertor::InitLoopVar(std::vector<ge::Operator> *init_input) {
|
||||
MS_EXCEPTION_IF_NULL(init_input);
|
||||
if (this->training_) {
|
||||
|
@ -306,14 +184,6 @@ void DfGraphConvertor::InitLoopVar(std::vector<ge::Operator> *init_input) {
|
|||
}
|
||||
}
|
||||
|
||||
OpAdapterPtr DfGraphConvertor::FindAdapter(const std::string &name, bool train) {
|
||||
auto it = OpAdapterMap::get().find(name);
|
||||
if (it != OpAdapterMap::get().end()) {
|
||||
return it->second->Get(train);
|
||||
}
|
||||
MS_LOG(EXCEPTION) << "Can't find OpAdapter for " << name;
|
||||
}
|
||||
|
||||
void DfGraphConvertor::DrawParamInitSubGraph(const std::string &name, const AnfNodePtr &it) {
|
||||
// draw init subgraph
|
||||
init_sout_ << "op_assign" << it.get() << "[label=<";
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_CONVERT_H_
|
||||
#define MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_CONVERT_H_
|
||||
#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_CONVERT_H_
|
||||
#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_CONVERT_H_
|
||||
|
||||
#define DRAW_GE_GRAPH
|
||||
|
||||
|
@ -36,9 +36,8 @@
|
|||
#include "ops/core_ops.h"
|
||||
#include "ir/anf.h"
|
||||
#include "ir/func_graph.h"
|
||||
#include "include/transform/graph_ir/util.h"
|
||||
#include "ir/tensor.h"
|
||||
#include "include/transform/graph_ir/df_graph_manager.h"
|
||||
#include "transform/graph_ir/df_graph_manager.h"
|
||||
#include "transform/graph_ir/op_adapter.h"
|
||||
#include "graph/operator_reg.h"
|
||||
#include "external/ge/ge_api.h"
|
||||
|
@ -46,18 +45,16 @@
|
|||
#include "graph/utils/op_desc_utils.h"
|
||||
#include "graph/utils/tensor_utils.h"
|
||||
#include "ops/hcom_ops.h"
|
||||
#include "include/common/visible.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace transform {
|
||||
class BaseOpAdapter;
|
||||
using TensorOrderMap = std::map<std::string, std::shared_ptr<tensor::Tensor>>;
|
||||
using HcomBroadcast = ge::op::HcomBroadcast;
|
||||
using OpAdapterPtr = std::shared_ptr<BaseOpAdapter>;
|
||||
|
||||
using ParamIndexMap = std::map<std::size_t, std::size_t>;
|
||||
enum class GraphType { kNormal, kCond, kBody, kAfter, kBranch };
|
||||
class COMMON_EXPORT DfGraphConvertor {
|
||||
class DfGraphConvertor {
|
||||
public:
|
||||
explicit DfGraphConvertor(const AnfGraphPtr &anf_graph) : anf_graph_(anf_graph) {
|
||||
MS_EXCEPTION_IF_NULL(anf_graph);
|
||||
|
@ -138,8 +135,6 @@ class COMMON_EXPORT DfGraphConvertor {
|
|||
DfGraphPtr GetInitGraph();
|
||||
DfGraphPtr GetSaveCheckpointGraph();
|
||||
DfGraphPtr GetBroadcastGraph();
|
||||
static OpAdapterPtr FindAdapter(const std::string &op_name, bool train = false);
|
||||
static OpAdapterPtr FindAdapter(AnfNodePtr node, bool train = false);
|
||||
int ErrCode() const { return static_cast<int>(error_); }
|
||||
|
||||
bool is_training() const { return training_; }
|
||||
|
@ -292,4 +287,4 @@ class COMMON_EXPORT DfGraphConvertor {
|
|||
} // namespace transform
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_CONVERT_H_
|
||||
#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_CONVERT_H_
|
|
@ -14,7 +14,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "include/transform/graph_ir/df_graph_manager.h"
|
||||
#include "transform/graph_ir/df_graph_manager.h"
|
||||
|
||||
#include <sstream>
|
||||
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_DF_GRAPH_MANAGER_H_
|
||||
#define MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_DF_GRAPH_MANAGER_H_
|
||||
#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_DF_GRAPH_MANAGER_H_
|
||||
#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_DF_GRAPH_MANAGER_H_
|
||||
|
||||
#include <set>
|
||||
#include <string>
|
||||
|
@ -25,29 +25,12 @@
|
|||
#include <utility>
|
||||
#include "include/transform/graph_ir/types.h"
|
||||
#include "ir/anf.h"
|
||||
#include "include/common/visible.h"
|
||||
|
||||
namespace mindspore {
|
||||
const char BROADCAST_GRAPH_NAME[] = "broadcast_subgraph";
|
||||
|
||||
namespace transform {
|
||||
class GraphRunner;
|
||||
using OptionMap = std::map<std::string, std::string>;
|
||||
|
||||
struct DfGraphWrapper {
|
||||
public:
|
||||
DfGraphWrapper(const std::string &name, const int &id, const DfGraphPtr &graph_ptr, const OptionMap &options);
|
||||
~DfGraphWrapper() {}
|
||||
|
||||
std::string name_;
|
||||
int id_;
|
||||
DfGraphPtr graph_ptr_;
|
||||
OptionMap options_ = {};
|
||||
};
|
||||
|
||||
using DfGraphWrapperPtr = std::shared_ptr<DfGraphWrapper>;
|
||||
|
||||
class COMMON_EXPORT DfGraphManager {
|
||||
class DfGraphManager {
|
||||
public:
|
||||
~DfGraphManager();
|
||||
void ClearGraph() noexcept;
|
||||
|
@ -84,4 +67,4 @@ class COMMON_EXPORT DfGraphManager {
|
|||
} // namespace transform
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_DF_GRAPH_MANAGER_H_
|
||||
#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_DF_GRAPH_MANAGER_H_
|
|
@ -14,7 +14,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "include/transform/graph_ir/graph_builder.h"
|
||||
#include "transform/graph_ir/graph_builder.h"
|
||||
|
||||
#include <sstream>
|
||||
|
||||
|
|
|
@ -23,12 +23,11 @@
|
|||
#include <map>
|
||||
#include <utility>
|
||||
#include "include/transform/graph_ir/types.h"
|
||||
#include "include/transform/graph_ir/convert.h"
|
||||
#include "include/common/visible.h"
|
||||
#include "transform/graph_ir/convert.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace transform {
|
||||
COMMON_EXPORT Status BuildDatasetGraph(const DatasetGraphParam ¶m, const std::string &phase = "dataset");
|
||||
Status BuildDatasetGraph(const DatasetGraphParam ¶m, const std::string &phase = "dataset");
|
||||
} // namespace transform
|
||||
} // namespace mindspore
|
||||
|
|
@ -14,7 +14,7 @@
|
|||
* Limitations under the License.
|
||||
*/
|
||||
|
||||
#include "include/transform/graph_ir/graph_runner.h"
|
||||
#include "transform/graph_ir/graph_runner.h"
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
|
@ -71,7 +71,6 @@ GraphRunner::GraphRunner(const GraphRunnerOptions &options)
|
|||
MS_LOG(WARNING) << "graph runner sess_ is nullptr!";
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ENABLE_D
|
||||
auto ms_context = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(ms_context);
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_GRAPH_RUNNER_H_
|
||||
#define MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_GRAPH_RUNNER_H_
|
||||
#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_GRAPH_RUNNER_H_
|
||||
#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_GRAPH_RUNNER_H_
|
||||
|
||||
#include <set>
|
||||
#include <string>
|
||||
|
@ -23,29 +23,13 @@
|
|||
#include <map>
|
||||
#include <memory>
|
||||
|
||||
#include "include/transform/graph_ir/types.h"
|
||||
#include "include/transform/graph_ir/util.h"
|
||||
#include "include/transform/graph_ir/df_graph_manager.h"
|
||||
#include "include/common/visible.h"
|
||||
#include "transform/graph_ir/transform_util.h"
|
||||
#include "transform/graph_ir/df_graph_manager.h"
|
||||
#include "ir/tensor.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace transform {
|
||||
using SessionOptions = std::map<std::string, std::string>;
|
||||
|
||||
struct GraphRunnerOptions {
|
||||
std::string target{"default_graph_runner"};
|
||||
SessionOptions options;
|
||||
// if sess_ptr is nullptr, GraphRunner will create a new ge session
|
||||
std::shared_ptr<ge::Session> sess_ptr{nullptr};
|
||||
};
|
||||
|
||||
struct RunOptions {
|
||||
// graph's name
|
||||
std::string name;
|
||||
};
|
||||
|
||||
class COMMON_EXPORT GraphRunner {
|
||||
class GraphRunner {
|
||||
public:
|
||||
explicit GraphRunner(const GraphRunnerOptions &options);
|
||||
~GraphRunner() { sess_ = nullptr; }
|
||||
|
@ -63,4 +47,4 @@ class COMMON_EXPORT GraphRunner {
|
|||
} // namespace transform
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_GRAPH_RUNNER_H_
|
||||
#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_GRAPH_RUNNER_H_
|
|
@ -24,11 +24,10 @@
|
|||
#include <sstream>
|
||||
|
||||
#include "utils/hash_map.h"
|
||||
#include "include/transform/graph_ir/util.h"
|
||||
#include "transform/graph_ir/transform_util.h"
|
||||
#include "ir/anf.h"
|
||||
#include "ir/primitive.h"
|
||||
#include "ir/value.h"
|
||||
#include "include/transform/graph_ir/types.h"
|
||||
#include "graph/operator_reg.h"
|
||||
#include "external/ge/ge_api.h"
|
||||
#include "graph/tensor.h"
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "include/transform/graph_ir/op_adapter_map.h"
|
||||
#include "transform/graph_ir/op_adapter_map.h"
|
||||
#include <memory>
|
||||
#include "graph/operator.h"
|
||||
#include "transform/graph_ir/op_adapter_desc.h"
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include <string>
|
||||
#include <memory>
|
||||
#include "utils/hash_map.h"
|
||||
#include "include/common/visible.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace transform {
|
||||
|
@ -359,7 +358,7 @@ constexpr const char kNameKLDiv[] = "KLDivLoss";
|
|||
|
||||
class OpAdapterDesc;
|
||||
|
||||
class COMMON_EXPORT OpAdapterMap {
|
||||
class OpAdapterMap {
|
||||
public:
|
||||
static mindspore::HashMap<std::string, std::shared_ptr<OpAdapterDesc>> &get();
|
||||
};
|
|
@ -23,10 +23,8 @@ DYN_INPUT_MAP(Print) = {{1, DYN_INPUT_DESC(x)}};
|
|||
ATTR_MAP(Print) = EMPTY_ATTR_MAP;
|
||||
REG_ADPT_DESC(Print, kNamePrint, ADPT_DESC(Print))
|
||||
|
||||
#ifdef ENABLE_D
|
||||
INPUT_MAP(Assert) = {{1, INPUT_DESC(input_condition)}};
|
||||
DYN_INPUT_MAP(Assert) = {{2, DYN_INPUT_DESC(input_data)}};
|
||||
ATTR_MAP(Assert) = {{"summarize", ATTR_DESC(summarize, AnyTraits<int64_t>())}};
|
||||
REG_ADPT_DESC(Assert, kNameAssert, ADPT_DESC(Assert))
|
||||
#endif
|
||||
} // namespace mindspore::transform
|
||||
|
|
|
@ -26,9 +26,7 @@ namespace mindspore::transform {
|
|||
DECLARE_OP_ADAPTER(Print)
|
||||
DECLARE_OP_USE_DYN_INPUT(Print)
|
||||
|
||||
#ifdef ENABLE_D
|
||||
DECLARE_OP_ADAPTER(Assert)
|
||||
DECLARE_OP_USE_DYN_INPUT(Assert)
|
||||
#endif
|
||||
} // namespace mindspore::transform
|
||||
#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_LOGGING_OPS_DECLARE_H_
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include "utils/hash_map.h"
|
||||
#include "transform/graph_ir/op_adapter.h"
|
||||
#include "transform/graph_ir/op_adapter_desc.h"
|
||||
#include "include/transform/graph_ir/op_adapter_map.h"
|
||||
#include "transform/graph_ir/op_adapter_map.h"
|
||||
#include "mindspore/core/ops/core_ops.h"
|
||||
|
||||
namespace mindspore::transform {
|
||||
|
|
|
@ -14,8 +14,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "include/transform/graph_ir/util.h"
|
||||
|
||||
#include "transform/graph_ir/transform_util.h"
|
||||
#include <utility>
|
||||
#include <map>
|
||||
|
||||
|
@ -31,6 +30,10 @@ using std::string;
|
|||
using std::vector;
|
||||
|
||||
const size_t kErrorSize = 0;
|
||||
const size_t kIdx0 = 0;
|
||||
const size_t kIdx1 = 1;
|
||||
const size_t kIdx2 = 2;
|
||||
const size_t kIdx3 = 3;
|
||||
|
||||
vector<int64_t> TransformUtil::ConvertIntToList(int64_t data, int size) {
|
||||
vector<int64_t> list{};
|
||||
|
@ -287,8 +290,8 @@ bool IsGeShapeCompatible(const GeShape &ge_shape, const ShapeVector &request_dim
|
|||
}
|
||||
|
||||
// convert NHWC to NCHW
|
||||
if ((request_dims.size() == 1) && (ge_dims.size() == GE_DIMS) && (request_dims[0] == ge_dims[1]) &&
|
||||
(ge_dims[0] == 1) && (ge_dims[2] == 1) && (ge_dims[3] == 1)) {
|
||||
if ((request_dims.size() == 1) && (ge_dims.size() == GE_DIMS) && (request_dims[kIdx0] == ge_dims[kIdx1]) &&
|
||||
(ge_dims[kIdx0] == 1) && (ge_dims[kIdx2] == 1) && (ge_dims[kIdx3] == 1)) {
|
||||
MS_LOG(INFO) << "Ge tensor shape and request shape is compatible";
|
||||
return true;
|
||||
}
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_UTIL_H_
|
||||
#define MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_UTIL_H_
|
||||
#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_UTIL_H_
|
||||
#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_UTIL_H_
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
@ -26,13 +26,11 @@
|
|||
#include "ir/dtype.h"
|
||||
#include "ir/tensor.h"
|
||||
#include "include/transform/graph_ir/types.h"
|
||||
#include "graph/tensor.h"
|
||||
#include "utils/shape_utils.h"
|
||||
#include "include/common/visible.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace transform {
|
||||
class COMMON_EXPORT TransformUtil {
|
||||
class TransformUtil {
|
||||
public:
|
||||
/*
|
||||
* Parameters:
|
||||
|
@ -255,4 +253,4 @@ class COMMON_EXPORT TransformUtil {
|
|||
} // namespace transform
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_INCLUDE_TRANSFORM_GRAPH_IR_UTIL_H_
|
||||
#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_UTIL_H_
|
|
@ -0,0 +1,305 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "include/transform/graph_ir/utils.h"
|
||||
#include "transform/graph_ir/convert.h"
|
||||
#include "transform/graph_ir/op_adapter_map.h"
|
||||
#include "transform/graph_ir/op_adapter_util.h"
|
||||
#include "transform/graph_ir/df_graph_manager.h"
|
||||
#include "transform/graph_ir/op_adapter_desc.h"
|
||||
#include "transform/graph_ir/transform_util.h"
|
||||
#include "transform/graph_ir/graph_builder.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace transform {
|
||||
namespace {
|
||||
constexpr size_t kSwitchInputSize = 4;
|
||||
constexpr size_t kSwitchBodyIndex = 2;
|
||||
constexpr size_t kSwitchAfterIndex = 3;
|
||||
} // namespace
|
||||
|
||||
OpAdapterPtr FindAdapter(const AnfNodePtr node, bool train) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (node->isa<CNode>()) {
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
|
||||
std::string name = kNameCustomOp;
|
||||
if (!IsCustomCNode(cnode)) {
|
||||
name = GetCNodeTargetFuncName(cnode);
|
||||
}
|
||||
|
||||
auto it_adpt = OpAdapterMap::get().find(name);
|
||||
if (it_adpt != OpAdapterMap::get().end()) {
|
||||
return it_adpt->second->Get(train);
|
||||
}
|
||||
MS_LOG(EXCEPTION) << "Can't find OpAdapter for " << name;
|
||||
}
|
||||
|
||||
if (node->isa<ValueNode>()) {
|
||||
return OpAdapterMap::get()[kNameConst]->Get(train);
|
||||
}
|
||||
if (node->isa<Parameter>()) {
|
||||
return OpAdapterMap::get()[kNameParam]->Get(train);
|
||||
}
|
||||
return OpAdapterPtr(nullptr);
|
||||
}
|
||||
|
||||
OpAdapterPtr FindAdapter(const std::string &name, bool train) {
|
||||
auto it = OpAdapterMap::get().find(name);
|
||||
if (it != OpAdapterMap::get().end()) {
|
||||
return it->second->Get(train);
|
||||
}
|
||||
MS_LOG(EXCEPTION) << "Can't find OpAdapter for " << name;
|
||||
}
|
||||
|
||||
void EraseGeResource() {
|
||||
DfGraphManager::GetInstance().DeleteGraphRunner();
|
||||
DfGraphManager::GetInstance().EraseAnfGraph();
|
||||
DfGraphManager::GetInstance().DeleteGeSession();
|
||||
}
|
||||
|
||||
void ClearGraphWrapper() { DfGraphManager::GetInstance().ClearGraph(); }
|
||||
|
||||
void ClearGeSessionAndRunner() {
|
||||
DfGraphManager::GetInstance().DeleteGraphRunner();
|
||||
DfGraphManager::GetInstance().DeleteGeSession();
|
||||
}
|
||||
|
||||
bool IsPartialSuccNode(const AnfNodePtr node) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!node->isa<CNode>()) {
|
||||
return false;
|
||||
}
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
if (!cnode->inputs().empty()) {
|
||||
for (size_t i = 0; i < cnode->inputs().size(); i++) {
|
||||
if (IsPartialCNode(cnode->input(i))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool IsPartialCNode(const AnfNodePtr node) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!node->isa<CNode>()) {
|
||||
return false;
|
||||
}
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
if (GetCNodeFuncName(cnode) == prim::kPrimPartial->name()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool IsWhileNode(const AnfNodePtr &node) {
|
||||
if (!node->isa<CNode>()) {
|
||||
return false;
|
||||
}
|
||||
if (!IsPartialSuccNode(node)) {
|
||||
return false;
|
||||
}
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
if (!IsPartialCNode(cnode->input(0))) {
|
||||
return false;
|
||||
}
|
||||
auto partial_node = cnode->input(0);
|
||||
MS_EXCEPTION_IF_NULL(partial_node);
|
||||
|
||||
auto c_partial_node = partial_node->cast<CNodePtr>();
|
||||
MS_EXCEPTION_IF_NULL(c_partial_node);
|
||||
|
||||
auto graph_node_input = c_partial_node->input(1);
|
||||
MS_EXCEPTION_IF_NULL(graph_node_input);
|
||||
auto graph_node = graph_node_input->cast<ValueNodePtr>();
|
||||
MS_EXCEPTION_IF_NULL(graph_node);
|
||||
auto graph_node_value = graph_node->value();
|
||||
MS_EXCEPTION_IF_NULL(graph_node_value);
|
||||
auto cond_graph = graph_node_value->cast<FuncGraphPtr>();
|
||||
MS_EXCEPTION_IF_NULL(cond_graph);
|
||||
if (!cond_graph->recursive()) {
|
||||
return false;
|
||||
}
|
||||
const auto &cond_set = cond_graph->nodes();
|
||||
for (auto beg = cond_set.begin(); beg != cond_set.end(); beg++) {
|
||||
if (!((*beg)->isa<CNode>())) {
|
||||
continue;
|
||||
}
|
||||
auto c_beg = (*beg)->cast<CNodePtr>();
|
||||
if (IsPartialSuccNode(c_beg) && c_beg->inputs().size() == kSwitchInputSize &&
|
||||
IsPartialCNode(c_beg->input(kSwitchBodyIndex)) && IsPartialCNode(c_beg->input(kSwitchAfterIndex)) &&
|
||||
GetCNodeFuncName(c_beg) == prim::kPrimSwitch->name()) {
|
||||
auto func_graph = node->func_graph();
|
||||
MS_LOG(DEBUG) << "there is while node: " << node->ToString() << " in graph: " << func_graph->ToString();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string GetCNodeTargetFuncName(const CNodePtr cnode) {
|
||||
if (IsCaseNode(cnode)) {
|
||||
return string(kNameCase);
|
||||
}
|
||||
if (IsWhileNode(cnode)) {
|
||||
return string(kNameWhile);
|
||||
}
|
||||
auto name = GetCNodeFuncName(cnode);
|
||||
if (name == "switch_layer") {
|
||||
name = "";
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
bool IsCaseNode(const CNodePtr node) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
if (!node->inputs().empty() && node->input(0)->isa<CNode>() &&
|
||||
GetCNodeFuncName(node->input(0)->cast<CNodePtr>()) == "switch_layer") {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<GeTensorPtr> ConvertInputTensors(const std::vector<MeTensorPtr> &me_tensors, const std::string &format) {
|
||||
return TransformUtil::ConvertInputTensors(me_tensors, format);
|
||||
}
|
||||
|
||||
std::vector<MeTensorPtr> ConvertGeTensors(const std::vector<GeTensorPtr> &ge_tensors) {
|
||||
return TransformUtil::ConvertGeTensors(ge_tensors);
|
||||
}
|
||||
|
||||
GeDataType ConvertDataType(const MeDataType &type) { return TransformUtil::ConvertDataType(type); }
|
||||
|
||||
MeTensorPtr ConvertGeTensor(GeTensorPtr ge_tensor, const ShapeVector &request_dims) {
|
||||
return TransformUtil::ConvertGeTensor(ge_tensor, request_dims);
|
||||
}
|
||||
|
||||
MeTensorPtr ConvertGeTensor(const GeTensorPtr &tensor) { return TransformUtil::ConvertGeTensor(tensor); }
|
||||
|
||||
MeTensorPtr ConvertGeTensor(const GeTensorPtr &tensor, const TypeId &me_type) {
|
||||
return TransformUtil::ConvertGeTensor(tensor, me_type);
|
||||
}
|
||||
|
||||
std::shared_ptr<transform::GraphRunner> GetGraphRunner() { return DfGraphManager::GetInstance().GetGraphRunner(); }
|
||||
|
||||
std::shared_ptr<ge::Session> GetGeSession() { return DfGraphManager::GetInstance().GetGeSession(); }
|
||||
|
||||
void SetGeSession(const std::shared_ptr<ge::Session> &sess_ptr) {
|
||||
DfGraphManager::GetInstance().SetGeSession(sess_ptr);
|
||||
}
|
||||
|
||||
GraphRunnerPtr NewGraphRunner(const GraphRunnerOptions &options) {
|
||||
auto graph_runner = std::make_shared<transform::GraphRunner>(options);
|
||||
return graph_runner;
|
||||
}
|
||||
|
||||
void SetGraphRunner(const GraphRunnerPtr &runner) { DfGraphManager::GetInstance().SetGraphRunner(runner); }
|
||||
void ClearGraph() { DfGraphManager::GetInstance().ClearGraph(); }
|
||||
Status AddGraph(const std::string &name, const DfGraphPtr &graph, const OptionMap &options) {
|
||||
return DfGraphManager::GetInstance().AddGraph(name, graph, options);
|
||||
}
|
||||
void SetAnfGraph(const std::string &name, const AnfGraphPtr &anf_graph_ptr) {
|
||||
DfGraphManager::GetInstance().SetAnfGraph(name, anf_graph_ptr);
|
||||
}
|
||||
|
||||
FuncGraphPtr GetAnfGraph(uint32_t graph_id) { return DfGraphManager::GetInstance().GetAnfGraph(graph_id); }
|
||||
|
||||
DfGraphWrapperPtr GetGraphByName(const std::string &name) { return DfGraphManager::GetInstance().GetGraphByName(name); }
|
||||
|
||||
// convert
|
||||
|
||||
DfGraphConvertorPtr NewConverter(const FuncGraphPtr &graph) {
|
||||
auto converter = std::make_shared<transform::DfGraphConvertor>(graph);
|
||||
return converter;
|
||||
}
|
||||
|
||||
void SetTraining(DfGraphConvertorPtr converter, bool training) {
|
||||
MS_EXCEPTION_IF_NULL(converter);
|
||||
converter->set_training(training);
|
||||
}
|
||||
|
||||
void BuildGraph(DfGraphConvertorPtr converter, const std::map<std::string, std::shared_ptr<tensor::Tensor>> &maps) {
|
||||
MS_EXCEPTION_IF_NULL(converter);
|
||||
(void)converter->ConvertAllNode().InitParam(maps).BuildGraph();
|
||||
}
|
||||
|
||||
void GenerateBroadcastGraph(DfGraphConvertorPtr converter, const TensorOrderMap &tensors) {
|
||||
MS_EXCEPTION_IF_NULL(converter);
|
||||
(void)converter->GenerateBroadcastGraph(tensors);
|
||||
}
|
||||
void GenerateCheckpointGraph(DfGraphConvertorPtr converter) {
|
||||
MS_EXCEPTION_IF_NULL(converter);
|
||||
(void)converter->GenerateCheckpointGraph();
|
||||
}
|
||||
int ErrCode(DfGraphConvertorPtr converter) {
|
||||
MS_EXCEPTION_IF_NULL(converter);
|
||||
return converter->ErrCode();
|
||||
}
|
||||
|
||||
void DrawComputeGraph(DfGraphConvertorPtr converter, const std::string &name) {
|
||||
MS_EXCEPTION_IF_NULL(converter);
|
||||
return converter->DrawComputeGraph(name);
|
||||
}
|
||||
void DrawInitGraph(DfGraphConvertorPtr converter, const std::string &name) {
|
||||
MS_EXCEPTION_IF_NULL(converter);
|
||||
return converter->DrawInitGraph(name);
|
||||
}
|
||||
void DrawSaveCheckpointGraph(DfGraphConvertorPtr converter, const std::string &name) {
|
||||
MS_EXCEPTION_IF_NULL(converter);
|
||||
return converter->DrawSaveCheckpointGraph(name);
|
||||
}
|
||||
|
||||
DfGraphPtr GetComputeGraph(DfGraphConvertorPtr converter) {
|
||||
MS_EXCEPTION_IF_NULL(converter);
|
||||
return converter->GetComputeGraph();
|
||||
}
|
||||
DfGraphPtr GetInitGraph(DfGraphConvertorPtr converter) {
|
||||
MS_EXCEPTION_IF_NULL(converter);
|
||||
return converter->GetInitGraph();
|
||||
}
|
||||
DfGraphPtr GetSaveCheckpointGraph(DfGraphConvertorPtr converter) {
|
||||
MS_EXCEPTION_IF_NULL(converter);
|
||||
return converter->GetSaveCheckpointGraph();
|
||||
}
|
||||
DfGraphPtr GetBroadcastGraph(DfGraphConvertorPtr converter) {
|
||||
MS_EXCEPTION_IF_NULL(converter);
|
||||
return converter->GetBroadcastGraph();
|
||||
}
|
||||
|
||||
std::shared_ptr<ge::Session> NewSession(const SessionOptions &sess_options) {
|
||||
return transform::GraphRunner::NewSession(sess_options);
|
||||
}
|
||||
|
||||
Status RunGraph(const std::shared_ptr<transform::GraphRunner> &runner, const RunOptions &options,
|
||||
const std::vector<GeTensorPtr> &inputs, std::vector<GeTensorPtr> *outputs) {
|
||||
MS_EXCEPTION_IF_NULL(runner);
|
||||
return runner->RunGraph(options, inputs, outputs);
|
||||
}
|
||||
|
||||
Status RunGraph(const std::shared_ptr<GraphRunner> &runner, const RunOptions &options,
|
||||
const std::vector<GeTensorPtr> &inputs, std::vector<MeTensorPtr> *outputs,
|
||||
const std::vector<TypeId> &me_types) {
|
||||
MS_EXCEPTION_IF_NULL(runner);
|
||||
return runner->RunGraph(options, inputs, outputs, me_types);
|
||||
}
|
||||
|
||||
void ClearOpAdapterMap() { transform::OpAdapterMap::get().clear(); }
|
||||
|
||||
transform::Status CompileDatasetGraph(const DatasetGraphParam ¶m, const std::string &phase) {
|
||||
return BuildDatasetGraph(param, phase);
|
||||
}
|
||||
} // namespace transform
|
||||
} // namespace mindspore
|
|
@ -13,13 +13,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifdef ENABLE_D
|
||||
#include "include/common/utils/callbacks_ge.h"
|
||||
#include "pybind11/pybind11.h"
|
||||
#include "ir/param_info.h"
|
||||
#include "include/transform/graph_ir/df_graph_manager.h"
|
||||
#include "include/transform/graph_ir/util.h"
|
||||
#include "include/transform/graph_ir/utils.h"
|
||||
#include "pipeline/jit/parse/data_converter.h"
|
||||
#include "include/common/utils/python_adapter.h"
|
||||
#include "utils/shape_utils.h"
|
||||
|
@ -34,7 +32,6 @@ const char kCheckPoint[] = "Save";
|
|||
const int ONE_SHAPE = 1;
|
||||
|
||||
using mindspore::transform::Status;
|
||||
using mindspore::transform::TransformUtil;
|
||||
|
||||
bool GetParameterShape(const FuncGraphPtr &graph, const std::string ¶m_name,
|
||||
const std::shared_ptr<ShapeVector> &shape) {
|
||||
|
@ -69,7 +66,7 @@ bool GetParameterShape(const FuncGraphPtr &graph, const std::string ¶m_name,
|
|||
|
||||
static TensorPtr GetMeTensorTransformed(uint32_t graph_id, const std::string ¶meter_name,
|
||||
const std::shared_ptr<ge::Tensor> &ge_tensor_ptr) {
|
||||
FuncGraphPtr anf_graph = transform::DfGraphManager::GetInstance().GetAnfGraph(graph_id);
|
||||
FuncGraphPtr anf_graph = transform::GetAnfGraph(graph_id);
|
||||
if (anf_graph == nullptr) {
|
||||
MS_LOG(ERROR) << "Get anf graph failed during callback";
|
||||
return nullptr;
|
||||
|
@ -81,7 +78,7 @@ static TensorPtr GetMeTensorTransformed(uint32_t graph_id, const std::string &pa
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
return TransformUtil::ConvertGeTensor(ge_tensor_ptr, *parameter_shape_ptr);
|
||||
return transform::ConvertGeTensor(ge_tensor_ptr, *parameter_shape_ptr);
|
||||
}
|
||||
|
||||
uint32_t CheckpointSaveCallback(uint32_t graph_id, const std::map<std::string, ge::Tensor> ¶ms_list) {
|
||||
|
@ -134,19 +131,19 @@ static TensorPtr GetMeTensorForSummary(const std::string &name, const std::share
|
|||
// Because the ge tensor is dim = 4, so set the (1,1,1,1)-->(1,)
|
||||
// We do the (1,) shape is scalar
|
||||
auto shape = ShapeVector({ONE_SHAPE});
|
||||
return TransformUtil::ConvertGeTensor(ge_tensor_ptr, shape);
|
||||
return transform::ConvertGeTensor(ge_tensor_ptr, shape);
|
||||
}
|
||||
if (tname == "[:Tensor]" || tname == "[:Histogram]") {
|
||||
MS_LOG(DEBUG) << "The summary(" << name << ") is Tensor";
|
||||
// process the tensor summary
|
||||
// Now we can't get the real shape, so we keep same shape with GE
|
||||
return TransformUtil::ConvertGeTensor(ge_tensor_ptr);
|
||||
return transform::ConvertGeTensor(ge_tensor_ptr);
|
||||
}
|
||||
if (tname == "[:Image]") {
|
||||
MS_LOG(DEBUG) << "The summary(" << name << ") is Image";
|
||||
// process the Image summary
|
||||
// Image dim = 4, is same with ge, so we keep same shape with GE
|
||||
return TransformUtil::ConvertGeTensor(ge_tensor_ptr);
|
||||
return transform::ConvertGeTensor(ge_tensor_ptr);
|
||||
}
|
||||
|
||||
MS_LOG(EXCEPTION) << "The summary name(" << name << ") is invalid.";
|
||||
|
|
|
@ -99,6 +99,12 @@ if(ENABLE_MINDDATA)
|
|||
list(REMOVE_ITEM UT_SRCS ${PYTHON_RELATED_SRCS})
|
||||
endif()
|
||||
|
||||
set(REPEATED_DEFINED_FILE
|
||||
stub/fl/fl_stub.cc
|
||||
stub/fl/server_stub.cc
|
||||
stub/ps/ps_core_stub.cc)
|
||||
list(REMOVE_ITEM UT_SRCS ${REPEATED_DEFINED_FILE})
|
||||
|
||||
if(NOT ENABLE_ACL)
|
||||
set(ASCEND310_RELATED_SRCS
|
||||
dataset/dvpp_decode_jpeg_test.cc
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "distributed/cluster/cluster_context.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace distributed {
|
||||
namespace cluster {
|
||||
ClusterContext::ClusterContext()
|
||||
: inited_(false),
|
||||
finalized_(true),
|
||||
cluster_ready_(false),
|
||||
node_num_each_role_({}),
|
||||
scheduler_host_(kLocalHost),
|
||||
scheduler_port_(kDefaultSchedPort),
|
||||
node_(nullptr),
|
||||
node_role_(""),
|
||||
cluster_config_(nullptr) {}
|
||||
|
||||
ClusterContext::~ClusterContext() {
|
||||
if (!finalized_) {
|
||||
try {
|
||||
(void)Finalize();
|
||||
} catch (std::exception &) {
|
||||
MS_LOG(ERROR) << "Failed to finalize cluster context.";
|
||||
}
|
||||
}
|
||||
finalized_ = true;
|
||||
node_ = nullptr;
|
||||
}
|
||||
bool ClusterContext::Initialize() { return true; }
|
||||
bool ClusterContext::Finalize(uint32_t timeout) { return true; }
|
||||
const std::shared_ptr<ps::core::Node> &ClusterContext::node() const { return node_; }
|
||||
} // namespace cluster
|
||||
} // namespace distributed
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,29 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "fl/worker/fl_worker.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace fl {
|
||||
namespace worker {
|
||||
FLWorker &FLWorker::GetInstance() {
|
||||
static FLWorker instance;
|
||||
return instance;
|
||||
}
|
||||
void FLWorker::Finalize() {}
|
||||
void FLWorker::Run() {}
|
||||
} // namespace worker
|
||||
} // namespace fl
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,32 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "fl/server/server.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace fl {
|
||||
namespace server {
|
||||
Server &Server::GetInstance() {
|
||||
static Server instance;
|
||||
return instance;
|
||||
}
|
||||
|
||||
void Server::Initialize(bool use_tcp, bool use_http, uint16_t http_port, const std::vector<RoundConfig> &rounds_config,
|
||||
const CipherConfig &cipher_config, const FuncGraphPtr &func_graph, size_t executor_threshold) {}
|
||||
|
||||
void Server::Run() {}
|
||||
} // namespace server
|
||||
} // namespace fl
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,50 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "graph/ascend_string.h"
|
||||
#include "graph/operator.h"
|
||||
#include "graph/operator_factory.h"
|
||||
namespace ge {
|
||||
AscendString::AscendString(char const *name) {}
|
||||
|
||||
Operator::Operator(const AscendString &name, const AscendString &type) {}
|
||||
Operator::Operator(const char *name, const char *type) {}
|
||||
Operator::Operator(const std::string &type) {}
|
||||
|
||||
std::string Operator::GetName() const { return ""; }
|
||||
|
||||
void Operator::InputRegister(const std::string &name) {}
|
||||
void Operator::InputRegister(const char *name) {}
|
||||
|
||||
void Operator::OutputRegister(const std::string &name) {}
|
||||
void Operator::OutputRegister(const char *name) {}
|
||||
|
||||
void Operator::OptionalInputRegister(const std::string &name) {}
|
||||
void Operator::OptionalInputRegister(const char *name) {}
|
||||
|
||||
void Operator::DynamicInputRegister(const std::string &name, const uint32_t num, bool is_push_back) {}
|
||||
void Operator::DynamicInputRegister(const char *name, const uint32_t num, bool is_push_back) {}
|
||||
void Operator::DynamicOutputRegister(const std::string &name, const uint32_t num, bool is_push_back) {}
|
||||
void Operator::DynamicOutputRegister(const char *name, const uint32_t num, bool is_push_back) {}
|
||||
|
||||
void Operator::AttrRegister(const std::string &name, int64_t attr_value) {}
|
||||
void Operator::AttrRegister(const char *name, int64_t attr_value) {}
|
||||
void Operator::RequiredAttrRegister(const std::string &name) {}
|
||||
void Operator::RequiredAttrRegister(const char *name) {}
|
||||
|
||||
OperatorCreatorRegister::OperatorCreatorRegister(const std::string &operator_type, OpCreator const &op_creator) {}
|
||||
OperatorCreatorRegister::OperatorCreatorRegister(char const *,
|
||||
std::function<ge::Operator(ge::AscendString const &)> const &) {}
|
||||
} // namespace ge
|
|
@ -15,7 +15,7 @@
|
|||
*/
|
||||
|
||||
#include "plugin/device/ascend/hal/device/distribute/ascend_collective.h"
|
||||
|
||||
#include "distributed/cluster/cluster_context.h"
|
||||
namespace mindspore {
|
||||
namespace device {
|
||||
namespace ascend {
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "pipeline/jit/action.h"
|
||||
namespace mindspore {
|
||||
namespace pipeline {
|
||||
std::vector<ActionItem> PServerPipeline(const ResourcePtr &resource) { return {}; }
|
||||
std::vector<ActionItem> PSchedulerPipeline(const ResourcePtr &resource) { return {}; }
|
||||
std::vector<ActionItem> ServerPipeline(const ResourcePtr &resource) { return {}; }
|
||||
} // namespace pipeline
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,90 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "ps/core/follower_scaler.h"
|
||||
#include "ps/core/abstract_node.h"
|
||||
#include "ps/core/abstract_ps_node.h"
|
||||
#include "ps/core/node.h"
|
||||
#include "ps/core/scheduler_node.h"
|
||||
#include "ps/core/file_configuration.h"
|
||||
#include "ps/core/ps_scheduler_node.h"
|
||||
#include "ps/core/ps_worker_node.h"
|
||||
#include "ps/ps_cache/ps_data/ps_data_prefetch.h"
|
||||
namespace mindspore {
|
||||
namespace ps {
|
||||
void PsDataPrefetch::CreateDataChannel(const std::string &channel_name, size_t step_num) {}
|
||||
namespace core {
|
||||
FollowerScaler::~FollowerScaler() {}
|
||||
|
||||
bool AbstractPSNode::InitClientToScheduler() { return true; }
|
||||
void AbstractPSNode::RegisterInitCollectCommResphandler() {}
|
||||
void AbstractPSNode::RegisterRecoveryRespHandler() {}
|
||||
|
||||
AbstractNode::~AbstractNode() {}
|
||||
void AbstractNode::Register(const std::shared_ptr<TcpClient> &client) {}
|
||||
bool AbstractNode::InitClientToScheduler() { return true; }
|
||||
bool AbstractNode::SendMessageSync(const std::shared_ptr<TcpClient> &client, const std::shared_ptr<MessageMeta> &meta,
|
||||
const Protos &protos, const void *data, size_t size, const uint32_t &timeout) {
|
||||
return true;
|
||||
}
|
||||
bool AbstractNode::SendMessageSync(const std::shared_ptr<TcpClient> &client, const CommMessage &message,
|
||||
const uint32_t &timeout) {
|
||||
return true;
|
||||
}
|
||||
void AbstractNode::NotifyMessageArrival(const std::shared_ptr<MessageMeta> &meta) {}
|
||||
|
||||
void PSSchedulerNode::RunRecovery() {}
|
||||
void PSSchedulerNode::RegisterInitCollectCommServiceHandler() {}
|
||||
void PSSchedulerNode::RegisterRecoveryServiceHandler() {}
|
||||
void PSSchedulerNode::HandleNodeTimeoutForRecovery(
|
||||
const std::unordered_map<std::string, NodeInfo> &timeout_nodes_infos) {}
|
||||
void PSSchedulerNode::HandleNodeRecoverByHeartBeat(uint32_t rank_id) {}
|
||||
void PSSchedulerNode::RecoverFromPersistence() {}
|
||||
|
||||
SchedulerNode::~SchedulerNode() {}
|
||||
bool SchedulerNode::Start(const uint32_t &timeout) { return true; }
|
||||
bool SchedulerNode::Stop() { return true; }
|
||||
bool SchedulerNode::Finish(const uint32_t &timeout) { return true; }
|
||||
void SchedulerNode::RunRecovery() {}
|
||||
bool SchedulerNode::SendPrepareBuildingNetwork(const std::unordered_map<std::string, NodeInfo> &node_infos) {
|
||||
return true;
|
||||
}
|
||||
void SchedulerNode::RecordSchedulerRestartInfo() {}
|
||||
void SchedulerNode::InitEventTxtFile() {}
|
||||
|
||||
void PSWorkerNode::Register(const std::shared_ptr<TcpClient> &client) {}
|
||||
bool PSWorkerNode::Start(const uint32_t &timeout) { return true; }
|
||||
bool PSWorkerNode::Stop() { return true; }
|
||||
bool PSWorkerNode::Finish(const uint32_t &timeout) { return true; }
|
||||
|
||||
bool FileConfiguration::Initialize() { return true; }
|
||||
bool FileConfiguration::IsInitialized() const { return true; }
|
||||
std::string FileConfiguration::Get(const std::string &key, const std::string &defaultvalue) const { return ""; }
|
||||
std::string FileConfiguration::GetString(const std::string &key, const std::string &defaultvalue) const { return ""; }
|
||||
std::vector<nlohmann::json> FileConfiguration::GetVector(const std::string &key) const { return {}; }
|
||||
int64_t FileConfiguration::GetInt(const std::string &key, int64_t default_value) const { return 0; }
|
||||
void FileConfiguration::Put(const std::string &key, const std::string &value) {}
|
||||
bool FileConfiguration::Exists(const std::string &key) const { return true; }
|
||||
|
||||
void FileConfiguration::PersistFile(const core::ClusterConfig &clusterConfig) const {}
|
||||
|
||||
void FileConfiguration::PersistNodes(const core::ClusterConfig &clusterConfig) const {}
|
||||
|
||||
std::string FileConfiguration::file_path() const { return ""; }
|
||||
uint32_t Node::rank_id() const { return 0; }
|
||||
} // namespace core
|
||||
} // namespace ps
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,59 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "ps/ps_cache/ps_cache_manager.h"
|
||||
#include "ps/util.h"
|
||||
#include "ps/worker.h"
|
||||
#include "ps/scheduler.h"
|
||||
#include "ps/parameter_server.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ps {
|
||||
PsCacheManager &PsCacheManager::GetInstance() {
|
||||
static PsCacheManager instance{};
|
||||
return instance;
|
||||
}
|
||||
|
||||
void PsCacheManager::Finalize() {}
|
||||
int PsCacheManager::cache_indices_lower_bound() const { return 1; }
|
||||
|
||||
bool Util::IsRoleOfPServer() { return true; }
|
||||
bool Util::IsRoleOfScheduler() { return true; }
|
||||
bool Util::FuseServerCommOps(const pipeline::ResourcePtr &res) { return true; }
|
||||
|
||||
Worker &Worker::GetInstance() {
|
||||
static Worker instance{};
|
||||
return instance;
|
||||
}
|
||||
|
||||
void Worker::Run() {}
|
||||
void Worker::Finalize() {}
|
||||
|
||||
ParameterServer &ParameterServer::GetInstance() {
|
||||
static ParameterServer instance{};
|
||||
return instance;
|
||||
}
|
||||
|
||||
void ParameterServer::Run(const FuncGraphPtr &func_graph) {}
|
||||
|
||||
Scheduler &Scheduler::GetInstance() {
|
||||
static Scheduler instance{};
|
||||
return instance;
|
||||
}
|
||||
|
||||
void Scheduler::Run() {}
|
||||
} // namespace ps
|
||||
} // namespace mindspore
|
|
@ -14,7 +14,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "include/transform/graph_ir/util.h"
|
||||
#include "include/transform/graph_ir/utils.h"
|
||||
|
||||
#include <utility>
|
||||
#include <map>
|
||||
|
@ -22,9 +22,16 @@
|
|||
#include "securec/include/securec.h"
|
||||
#include "include/common/utils/convert_utils.h"
|
||||
#include "include/common/utils/utils.h"
|
||||
#include "transform/graph_ir/df_graph_manager.h"
|
||||
#include "transform/graph_ir/op_adapter_map.h"
|
||||
#include "transform/graph_ir/op_adapter.h"
|
||||
#include "transform/graph_ir/op_adapter_desc.h"
|
||||
#include "transform/graph_ir/op_adapter_util.h"
|
||||
#include "graph/operator.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace transform {
|
||||
namespace {
|
||||
const size_t kErrorSize = 0;
|
||||
static std::map<MeDataType, size_t> datatype_size_map = {
|
||||
{MeDataType::kNumberTypeFloat16, sizeof(float) / 2}, {MeDataType::kNumberTypeFloat32, sizeof(float)}, // 1/2 of float
|
||||
|
@ -34,6 +41,10 @@ static std::map<MeDataType, size_t> datatype_size_map = {
|
|||
{MeDataType::kNumberTypeUInt16, sizeof(uint16_t)}, {MeDataType::kNumberTypeUInt32, sizeof(uint32_t)},
|
||||
{MeDataType::kNumberTypeUInt64, sizeof(uint64_t)}, {MeDataType::kNumberTypeBool, sizeof(bool)}};
|
||||
|
||||
mindspore::HashMap<std::string, OpAdapterDescPtr> adpt_map_ = {
|
||||
{kNameCustomOp, std::make_shared<OpAdapterDesc>(std::make_shared<OpAdapter<Operator>>())}};
|
||||
} // namespace
|
||||
|
||||
size_t TransformUtil::GetDataTypeSize(const MeDataType &type) {
|
||||
if (datatype_size_map.find(type) != datatype_size_map.end()) {
|
||||
return datatype_size_map[type];
|
||||
|
@ -42,5 +53,40 @@ size_t TransformUtil::GetDataTypeSize(const MeDataType &type) {
|
|||
return kErrorSize;
|
||||
}
|
||||
}
|
||||
|
||||
AnfGraphPtr GetAnfGraph(uint32_t graph_id) { return nullptr; }
|
||||
MeTensorPtr ConvertGeTensor(const GeTensorPtr ge_tensor, const ShapeVector &request_dims) { return nullptr; }
|
||||
MeTensorPtr ConvertGeTensor(const GeTensorPtr &ge_tensor) { return nullptr; }
|
||||
MeTensorPtr ConvertGeTensor(const GeTensorPtr &tensor, const TypeId &me_type) { return nullptr; }
|
||||
OpAdapterPtr FindAdapter(const std::string &op_name, bool train) { return nullptr; }
|
||||
|
||||
OperatorPtr OpAdapterImpl::GenerateCustomOp(const AnfNodePtr anf) { return nullptr; }
|
||||
int OpAdapterImpl::setAttr(const OperatorPtr &op, const std::string &attr_key, const ValuePtr &attr_value) { return 0; }
|
||||
int OpAdapterImpl::setAttr(const OperatorPtr &op, const PrimitivePtr &prim) { return 0; }
|
||||
int OpAdapterImpl::setAttr(const OperatorPtr &op, const AnfNodePtr &node) { return 0; }
|
||||
int OpAdapterImpl::setInput(const OperatorPtr &op, int index, const OutHandler &handle) { return 0; }
|
||||
int OpAdapterImpl::setInput(const OperatorPtr &op, int index, const OperatorPtr &input) { return 0; }
|
||||
int OpAdapterImpl::setInput(const OperatorPtr &op, int index,
|
||||
const std::shared_ptr<std::vector<OutHandler>> &handler_vec) {
|
||||
return 0;
|
||||
}
|
||||
void OpAdapterImpl::updateOutputDesc(const OperatorPtr &op, const abstract::BaseShapePtr &shp, const TypePtr &type,
|
||||
const AnfNodePtr &node) {}
|
||||
OutHandler OpAdapterImpl::getOutput(const OperatorPtr &op, int index) {
|
||||
OutHandler handler;
|
||||
return handler;
|
||||
}
|
||||
|
||||
Status OpAdapterImpl::SetOpSubgraphFunc(const OperatorPtr &op, int index,
|
||||
const std::shared_ptr<std::vector<DfGraph>> &branches) {
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
Status OpAdapterImpl::SetOpSubgraphFunc(const OperatorPtr &op, const std::shared_ptr<std::vector<DfGraph>> &subgraphs) {
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
bool IsCustomCNode(const mindspore::AnfNodePtr &node) { return true; }
|
||||
std::string TransformUtil::NormOpName(const std::string &anf_name) { return ""; }
|
||||
} // namespace transform
|
||||
} // namespace mindspore
|
||||
} // namespace mindspore
|
|
@ -20,7 +20,7 @@
|
|||
#include "common/common_test.h"
|
||||
#include "pipeline/jit/pipeline.h"
|
||||
#include "include/common/utils/python_adapter.h"
|
||||
#include "include/transform/graph_ir/df_graph_manager.h"
|
||||
#include "mindspore/ccsrc/transform/graph_ir/df_graph_manager.h"
|
||||
#include "include/common/debug/draw.h"
|
||||
#ifdef ENABLE_D
|
||||
#include "include/common/utils/callbacks_ge.h"
|
||||
|
|
Loading…
Reference in New Issue