forked from mindspore-Ecosystem/mindspore
Optimize the Executors routines.
- Fix the key generating. - Distinguish the executors.
This commit is contained in:
parent
ba46e7cd56
commit
a137fa1d0b
|
@ -30,7 +30,7 @@ from mindspore import context
|
|||
from mindspore import log as logger
|
||||
from mindspore import nn
|
||||
from mindspore import ops
|
||||
from mindspore.common.api import _MindSporeFunction
|
||||
from mindspore.common.api import _MindsporeFunctionExecutor
|
||||
from mindspore.common.dtype import pytype_to_dtype
|
||||
from .namespace import CellNamespace, ClosureNamespace, ClassMemberNamespace
|
||||
from .resources import parse_object_map, convert_object_map, trope_ns, SYMBOL_UNDEFINE, NO_IMPLEMENT
|
||||
|
@ -177,8 +177,8 @@ def resolve_symbol(namespace, symbol):
|
|||
logger.debug("resolve exception occurred, value = %r", e)
|
||||
logger.debug("resolve type is invalid, namespace = %s, symbol = %s",
|
||||
namespace.__str__(), symbol)
|
||||
if isinstance(resolve_, _MindSporeFunction):
|
||||
logger.debug("resolve class _MindSporeFunction, resolve fn instead.")
|
||||
if isinstance(resolve_, _MindsporeFunctionExecutor):
|
||||
logger.debug("resolve class _MindsporeFunctionExecutor, resolve fn instead.")
|
||||
resolve_ = resolve_.fn
|
||||
return resolve_
|
||||
|
||||
|
|
|
@ -342,7 +342,7 @@ void Debugger::PreExecute(const KernelGraphPtr &graph_ptr) {
|
|||
// Multiple graph, and not the initial step,
|
||||
// stop only when receive the first sub run graph for each step
|
||||
// if we have stopped for the last kernel before, no need to stop again
|
||||
if (pipeline::ExecutorPy::GetDebugTerminate()) {
|
||||
if (pipeline::GraphExecutorPy::GetDebugTerminate()) {
|
||||
return;
|
||||
}
|
||||
if (!(run_level_ == "node" && suspended_at_last_kernel_)) {
|
||||
|
@ -449,7 +449,7 @@ void Debugger::PostExecuteGraphDebugger() {
|
|||
void Debugger::PostExecute() {
|
||||
// access lock for public method
|
||||
std::lock_guard<std::mutex> a_lock(access_lock_);
|
||||
if (pipeline::ExecutorPy::GetDebugTerminate()) {
|
||||
if (pipeline::GraphExecutorPy::GetDebugTerminate()) {
|
||||
return;
|
||||
}
|
||||
if (debugger_->DebuggerBackendEnabled()) {
|
||||
|
@ -486,7 +486,7 @@ bool Debugger::ReadNodeDataRequired(const CNodePtr &kernel) const {
|
|||
void Debugger::PostExecuteNode(const CNodePtr &kernel, bool last_kernel) {
|
||||
// access lock for public method
|
||||
std::lock_guard<std::mutex> a_lock(access_lock_);
|
||||
if (pipeline::ExecutorPy::GetDebugTerminate()) {
|
||||
if (pipeline::GraphExecutorPy::GetDebugTerminate()) {
|
||||
return;
|
||||
}
|
||||
if (debugger_enabled_ && !is_dataset_graph_) {
|
||||
|
@ -1074,7 +1074,7 @@ void Debugger::Exit() {
|
|||
// debugger will notify main thread to exit because main thread can only exit at step boundary.
|
||||
MS_LOG(INFO) << "Exit Debugger";
|
||||
SetEnableHeartbeat(false);
|
||||
pipeline::ExecutorPy::DebugTerminate(true);
|
||||
pipeline::GraphExecutorPy::DebugTerminate(true);
|
||||
}
|
||||
|
||||
std::list<WatchpointHit> Debugger::CheckWatchpoints(const std::string &watchnode, const CNodePtr &kernel,
|
||||
|
|
|
@ -201,7 +201,7 @@ void ReflectParamBackToPython(const AnfNodePtr ¶m, const string ¶m_name,
|
|||
// 1. Get current cell object
|
||||
auto ppm = opt::python_pass::PyPassManager::GetInstance();
|
||||
auto resource = ppm->GetResource();
|
||||
py::object top_cell = resource->input();
|
||||
py::object top_cell = resource->source_input();
|
||||
if (py::isinstance<py::none>(top_cell)) {
|
||||
MS_LOG(EXCEPTION) << "Failed to get top cell from resource.";
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ bool GetLoopIndexFromCNode(const CNodePtr &cnode, size_t *loop_index) {
|
|||
}
|
||||
|
||||
void SetOpsNumToExecutor(size_t num_ops) {
|
||||
auto executor = pipeline::ExecutorPy::GetInstance();
|
||||
auto executor = pipeline::GraphExecutorPy::GetInstance();
|
||||
executor->SetNumOpsInfo(num_ops);
|
||||
}
|
||||
} // namespace parallel
|
||||
|
|
|
@ -2952,7 +2952,7 @@ static AnfNodePtr FindGrad(const CNodePtr &cnode, size_t curr_depth) {
|
|||
void HandleRootReshapeAndSaveStrategy(const std::vector<AnfNodePtr> &all_nodes) {
|
||||
// If root graph has reshape op. Find the corresponding parameter.
|
||||
// Reshape's shape is the shape of the parameter.
|
||||
auto executor = pipeline::ExecutorPy::GetInstance();
|
||||
auto executor = pipeline::GraphExecutorPy::GetInstance();
|
||||
for (auto &node : all_nodes) {
|
||||
if (!node->isa<CNode>()) {
|
||||
continue;
|
||||
|
|
|
@ -319,11 +319,11 @@ void CheckRootInputShapeAndType(const ResourcePtr &res, const FuncGraphPtr &load
|
|||
|
||||
bool ParseAction(const ResourcePtr &res) {
|
||||
MS_EXCEPTION_IF_NULL(res);
|
||||
if (!res->input()) {
|
||||
if (!res->source_input()) {
|
||||
MS_LOG(EXCEPTION) << "Parse error";
|
||||
}
|
||||
|
||||
py::object input = res->input();
|
||||
py::object input = res->source_input();
|
||||
parse::Parser::InitParserEnvironment(input);
|
||||
py::module path = py::module::import("os.path");
|
||||
std::string dir = path.attr("dirname")(py::globals()["__file__"]).cast<std::string>();
|
||||
|
@ -642,7 +642,7 @@ bool EliminateForwardCNode(const ResourcePtr &res) {
|
|||
return true;
|
||||
}
|
||||
|
||||
auto graph_executor = pipeline::ExecutorPy::GetInstance();
|
||||
auto graph_executor = pipeline::GraphExecutorPy::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(graph_executor);
|
||||
auto phase = graph_executor->phase();
|
||||
MS_LOG(DEBUG) << "The phase of current pipeline graph is: " << phase;
|
||||
|
@ -913,7 +913,7 @@ bool ValidateAction(const ResourcePtr &res) { return ValidatePass(res); }
|
|||
bool SetMindIRGraphAction(const ResourcePtr &res) {
|
||||
MS_EXCEPTION_IF_NULL(res);
|
||||
res->set_is_load(true);
|
||||
auto cell = py::cast<CellPtr>(res->input());
|
||||
auto cell = py::cast<CellPtr>(res->source_input());
|
||||
if (cell == nullptr) {
|
||||
MS_LOG(EXCEPTION) << "The graph loaded from mindir is null.";
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
namespace py = pybind11;
|
||||
|
||||
using EnvInstance = mindspore::EnvInstance;
|
||||
using ExecutorPy = mindspore::pipeline::ExecutorPy;
|
||||
using GraphExecutorPy = mindspore::pipeline::GraphExecutorPy;
|
||||
using Pipeline = mindspore::pipeline::Pipeline;
|
||||
using PrimitivePy = mindspore::PrimitivePy;
|
||||
using MetaFuncGraph = mindspore::MetaFuncGraph;
|
||||
|
@ -70,40 +70,41 @@ PYBIND11_MODULE(_c_expression, m) {
|
|||
mindspore::ScopedLongRunning::SetHook(std::make_unique<mindspore::GilScopedLongRunningHook>());
|
||||
|
||||
// Class Pipeline interface
|
||||
(void)py::class_<ExecutorPy, std::shared_ptr<ExecutorPy>>(m, "Executor_")
|
||||
.def_static("get_instance", &ExecutorPy::GetInstance, "Executor get_instance.")
|
||||
.def("__call__", &ExecutorPy::Run, py::arg("args"), py::arg("phase") = py::str(""), "Executor run function.")
|
||||
.def("del_net_res", &ExecutorPy::DelNetRes, py::arg("network_id") = py::str(""), "Delete network resource.")
|
||||
.def("get_func_graph", &ExecutorPy::GetFuncGraph, py::arg("phase") = py::str(""), "Get graph pointer.")
|
||||
.def("get_func_graph_proto", &ExecutorPy::GetFuncGraphProto, py::arg("phase") = py::str(""),
|
||||
(void)py::class_<GraphExecutorPy, std::shared_ptr<GraphExecutorPy>>(m, "GraphExecutor_")
|
||||
.def_static("get_instance", &GraphExecutorPy::GetInstance, "Executor get_instance.")
|
||||
.def("__call__", &GraphExecutorPy::Run, py::arg("args"), py::arg("phase") = py::str(""), "Executor run function.")
|
||||
.def("del_net_res", &GraphExecutorPy::DelNetRes, py::arg("network_id") = py::str(""), "Delete network resource.")
|
||||
.def("get_func_graph", &GraphExecutorPy::GetFuncGraph, py::arg("phase") = py::str(""), "Get graph pointer.")
|
||||
.def("get_func_graph_proto", &GraphExecutorPy::GetFuncGraphProto, py::arg("phase") = py::str(""),
|
||||
py::arg("type") = py::str("onnx_ir"), "Get graph proto string by specifying ir type.")
|
||||
.def("compile", &ExecutorPy::Compile, py::arg("obj"), py::arg("args"), py::arg("phase") = py::str(""),
|
||||
.def("compile", &GraphExecutorPy::Compile, py::arg("obj"), py::arg("args"), py::arg("phase") = py::str(""),
|
||||
py::arg("use_vm") = py::bool_(false), py::arg("queue_name"), "Compile obj by executor.")
|
||||
.def("updata_param_node_default_input", &ExecutorPy::UpdataParamNodeDefaultInput, py::arg("phase"),
|
||||
.def("updata_param_node_default_input", &GraphExecutorPy::UpdataParamNodeDefaultInput, py::arg("phase"),
|
||||
py::arg("params"), "Fetch the inputs of Conv or Matmul for quant export.")
|
||||
.def("get_parameter_layout", &ExecutorPy::GetParameterLayout, py::arg("phase") = py::str("train"),
|
||||
.def("get_parameter_layout", &GraphExecutorPy::GetParameterLayout, py::arg("phase") = py::str("train"),
|
||||
"Get Parameter Tensor Layout Dictionary.")
|
||||
.def("get_parallel_parameter_name_list", &ExecutorPy::GetParallelParameterNameList,
|
||||
.def("get_parallel_parameter_name_list", &GraphExecutorPy::GetParallelParameterNameList,
|
||||
py::arg("phase") = py::str("train"), "Get Parallel Parameter Name List.")
|
||||
.def("get_strategy", &ExecutorPy::GetCNodeStrategy, py::arg("phase") = py::str("train"),
|
||||
.def("get_strategy", &GraphExecutorPy::GetCNodeStrategy, py::arg("phase") = py::str("train"),
|
||||
"Get CNode Strategy Dictionary.")
|
||||
.def("get_num_parallel_ops", &ExecutorPy::GetNumOpsInfo, py::arg("phase") = py::str("train"),
|
||||
.def("get_num_parallel_ops", &GraphExecutorPy::GetNumOpsInfo, py::arg("phase") = py::str("train"),
|
||||
"Get the number of parallel operators.")
|
||||
.def("get_allreduce_fusion", &ExecutorPy::GetAllreduceFusion, py::arg("phase") = py::str("train"),
|
||||
.def("get_allreduce_fusion", &GraphExecutorPy::GetAllreduceFusion, py::arg("phase") = py::str("train"),
|
||||
"Get Allreduce Fusion Dictionary.")
|
||||
.def("fetch_info_for_quant_export", &ExecutorPy::FetchInfoForQuantExport, py::arg("phase") = py::str("train"),
|
||||
.def("fetch_info_for_quant_export", &GraphExecutorPy::FetchInfoForQuantExport, py::arg("phase") = py::str("train"),
|
||||
"Fetch the inputs of Conv or Matmul for quant export.")
|
||||
.def("build_data_graph", &ExecutorPy::BuildGraph, py::arg("build_params"), py::arg("phase") = py::str("train"),
|
||||
.def("build_data_graph", &GraphExecutorPy::BuildGraph, py::arg("build_params"), py::arg("phase") = py::str("train"),
|
||||
py::arg("broadcast_params") = py::dict(), "Build data graph.")
|
||||
.def("has_compiled", &ExecutorPy::HasCompiled, py::arg("phase") = py::str(""), "get if cell compiled.")
|
||||
.def("run_init_graph", &ExecutorPy::RunInitGraph, "Run init Graph.")
|
||||
.def("set_py_exe_path", &ExecutorPy::PyExePath, py::arg("py_exe_path") = py::str(""), "set python executable path.")
|
||||
.def("set_kernel_build_server_dir", &ExecutorPy::KernelBuildServerDir,
|
||||
py::arg("kernel_build_server_dir") = py::str(""), "set kernel build server directory path.");
|
||||
.def("has_compiled", &GraphExecutorPy::HasCompiled, py::arg("phase") = py::str(""), "Get if cell compiled.")
|
||||
.def("run_init_graph", &GraphExecutorPy::RunInitGraph, "Run init Graph.")
|
||||
.def("set_py_exe_path", &GraphExecutorPy::PyExePath, py::arg("py_exe_path") = py::str(""),
|
||||
"Set python executable path.")
|
||||
.def("set_kernel_build_server_dir", &GraphExecutorPy::KernelBuildServerDir,
|
||||
py::arg("kernel_build_server_dir") = py::str(""), "Set kernel build server directory path.");
|
||||
|
||||
(void)py::class_<EnvInstance, std::shared_ptr<EnvInstance>>(m, "EnvInstance_").def(py::init());
|
||||
|
||||
(void)m.def("generate_key", &mindspore::pipeline::GenerateKey, "Generate the function graph key.");
|
||||
(void)m.def("generate_arguments_key", &mindspore::pipeline::GenerateArgumentsKey, "Generate unique key of argument.");
|
||||
(void)m.def("real_run_op", &mindspore::pynative::RealRunOp, "Run op pynatively.");
|
||||
(void)m.def("reset_op_id", &mindspore::pipeline::ResetOpId, "Reset Operator Id");
|
||||
(void)m.def("init_hccl", &mindspore::pipeline::InitHccl, "Init Hccl");
|
||||
|
|
|
@ -99,11 +99,11 @@ const char IR_TYPE_ANF[] = "anf_ir";
|
|||
const char IR_TYPE_ONNX[] = "onnx_ir";
|
||||
const char IR_TYPE_MINDIR[] = "mind_ir";
|
||||
|
||||
ExecutorPyPtr ExecutorPy::executor_ = nullptr;
|
||||
std::mutex ExecutorPy::instance_lock_;
|
||||
bool ExecutorPy::debugger_terminate_ = false;
|
||||
GraphExecutorPyPtr GraphExecutorPy::executor_ = nullptr;
|
||||
std::mutex GraphExecutorPy::instance_lock_;
|
||||
bool GraphExecutorPy::debugger_terminate_ = false;
|
||||
|
||||
std::unordered_map<abstract::AbstractBasePtrList, int64_t, abstract::AbstractBasePtrListHasher,
|
||||
std::unordered_map<abstract::AbstractBasePtrList, uint64_t, abstract::AbstractBasePtrListHasher,
|
||||
abstract::AbstractBasePtrListEqual>
|
||||
g_args_cache;
|
||||
|
||||
|
@ -236,30 +236,33 @@ void CheckArgsValid(const py::tuple &args) {
|
|||
}
|
||||
}
|
||||
|
||||
py::tuple GenerateKey(const std::string &name, const std::unordered_map<std::string, py::object> &defaults) {
|
||||
MS_LOG(DEBUG) << "GenerateKey args size:" << defaults.size();
|
||||
py::object GenerateArgumentsKey(const std::unordered_map<std::string, py::object> &args) {
|
||||
MS_LOG(DEBUG) << "GenerateArgumentsKey args size:" << args.size();
|
||||
abstract::AbstractBasePtrList args_spec;
|
||||
|
||||
for (const auto &arg : defaults) {
|
||||
for (const auto &arg : args) {
|
||||
if (py::isinstance<py::module>(arg.second)) {
|
||||
MS_LOG(EXCEPTION) << "GenerateKey failed, argument input should not be py::module";
|
||||
MS_LOG(EXCEPTION) << "GenerateArgumentsKey failed, argument input should not be py::module";
|
||||
}
|
||||
ValuePtr converted = nullptr;
|
||||
if (!parse::ConvertData(arg.second, &converted)) {
|
||||
MS_LOG(EXCEPTION) << "GenerateKey convert arg failed";
|
||||
MS_LOG(EXCEPTION) << "GenerateArgumentsKey convert arg failed";
|
||||
}
|
||||
args_spec.push_back(ArgsToAbstract(converted));
|
||||
}
|
||||
if (g_args_cache.count(args_spec) == 0) {
|
||||
static int64_t key = 0;
|
||||
MS_LOG(INFO) << "Start new args and compile key:" << key;
|
||||
g_args_cache[args_spec] = key++;
|
||||
|
||||
uint64_t key;
|
||||
auto iter = g_args_cache.find(args_spec);
|
||||
if (iter == g_args_cache.end()) {
|
||||
static uint64_t key_counter = 0;
|
||||
key = key_counter;
|
||||
++key_counter;
|
||||
g_args_cache[args_spec] = key;
|
||||
MS_LOG(INFO) << "Generate a new compile key for new args, key: " << key;
|
||||
} else {
|
||||
key = iter->second;
|
||||
}
|
||||
constexpr size_t arg_size = 2;
|
||||
auto argSpec = py::tuple(arg_size);
|
||||
argSpec[0] = name;
|
||||
argSpec[1] = g_args_cache[args_spec];
|
||||
return argSpec;
|
||||
return py::int_(key);
|
||||
}
|
||||
|
||||
py::bool_ VerifyInputSignature(const py::list &input_signature, const py::tuple &inputs) {
|
||||
|
@ -300,9 +303,9 @@ py::bool_ VerifyInputSignature(const py::list &input_signature, const py::tuple
|
|||
return true;
|
||||
}
|
||||
|
||||
ExecutorPy::ExecutorPy() {}
|
||||
GraphExecutorPy::GraphExecutorPy() {}
|
||||
|
||||
ResourcePtr ExecutorPy::GetResource(const std::string &phase) {
|
||||
ResourcePtr GraphExecutorPy::GetResource(const std::string &phase) {
|
||||
MS_LOG(DEBUG) << "Phase size:" << info_.size();
|
||||
if (info_.count(phase) == 0) {
|
||||
return nullptr;
|
||||
|
@ -310,14 +313,14 @@ ResourcePtr ExecutorPy::GetResource(const std::string &phase) {
|
|||
return info_[phase]->resource;
|
||||
}
|
||||
|
||||
FuncGraphPtr ExecutorPy::GetFuncGraph(const std::string &phase) {
|
||||
FuncGraphPtr GraphExecutorPy::GetFuncGraph(const std::string &phase) {
|
||||
if (info_.count(phase) == 0) {
|
||||
MS_LOG(EXCEPTION) << "No phase in executor:" << GetPhasePrefix(phase);
|
||||
MS_LOG(EXCEPTION) << "No executor info. found for phase: " << phase;
|
||||
}
|
||||
return info_[phase]->func_graph;
|
||||
}
|
||||
|
||||
FuncGraphPtr ExecutorPy::GetGradGraph(const std::string &phase) {
|
||||
FuncGraphPtr GraphExecutorPy::GetGradGraph(const std::string &phase) {
|
||||
if (phase.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The input phase is empty.";
|
||||
}
|
||||
|
@ -332,7 +335,7 @@ FuncGraphPtr ExecutorPy::GetGradGraph(const std::string &phase) {
|
|||
return grad_graph;
|
||||
}
|
||||
|
||||
void ExecutorPy::SetGradGraph(const FuncGraphPtr &grad_graph, const std::string &phase) {
|
||||
void GraphExecutorPy::SetGradGraph(const FuncGraphPtr &grad_graph, const std::string &phase) {
|
||||
if (phase.empty()) {
|
||||
MS_LOG(EXCEPTION) << "The input phase is empty.";
|
||||
}
|
||||
|
@ -349,7 +352,7 @@ void ExecutorPy::SetGradGraph(const FuncGraphPtr &grad_graph, const std::string
|
|||
execute_info->grad_graph = grad_graph;
|
||||
}
|
||||
|
||||
compile::VmEvalFuncPtr ExecutorPy::GetVmEvalFunc(const std::string &phase) {
|
||||
compile::VmEvalFuncPtr GraphExecutorPy::GetVmEvalFunc(const std::string &phase) {
|
||||
ResourcePtr res = GetResource(phase);
|
||||
MS_EXCEPTION_IF_NULL(res);
|
||||
if (res->results().find(kOutput) != res->results().end() && res->results()[kOutput].is<compile::VmEvalFuncPtr>()) {
|
||||
|
@ -359,14 +362,14 @@ compile::VmEvalFuncPtr ExecutorPy::GetVmEvalFunc(const std::string &phase) {
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
bool ExecutorPy::HasCompiled(const std::string &phase) const {
|
||||
bool GraphExecutorPy::HasCompiled(const std::string &phase) const {
|
||||
if (info_.count(phase) == 0) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
py::bytes ExecutorPy::GetFuncGraphProto(const std::string &phase, const std::string &ir_type) {
|
||||
py::bytes GraphExecutorPy::GetFuncGraphProto(const std::string &phase, const std::string &ir_type) {
|
||||
FuncGraphPtr fg_ptr = GetFuncGraph(phase);
|
||||
if (fg_ptr == nullptr) {
|
||||
for (auto &item : info_) {
|
||||
|
@ -402,40 +405,40 @@ py::bytes ExecutorPy::GetFuncGraphProto(const std::string &phase, const std::str
|
|||
MS_LOG(EXCEPTION) << "Unknown ir type: " << ir_type;
|
||||
}
|
||||
|
||||
py::dict ExecutorPy::GetParameterLayout(const std::string &phase) {
|
||||
py::dict GraphExecutorPy::GetParameterLayout(const std::string &phase) {
|
||||
MS_LOG(DEBUG) << "GetParameterLayout!";
|
||||
std::string layout_graph = phase + kStepParallelGraph;
|
||||
auto graph = GetFuncGraph(layout_graph);
|
||||
return mindspore::parallel::GetParameterLayout(graph);
|
||||
}
|
||||
|
||||
py::dict ExecutorPy::GetCNodeStrategy(const std::string &phase) {
|
||||
py::dict GraphExecutorPy::GetCNodeStrategy(const std::string &phase) {
|
||||
MS_LOG(DEBUG) << "GetCNodeStrategy!";
|
||||
return stra_dict_[phase];
|
||||
}
|
||||
|
||||
py::list ExecutorPy::GetParallelParameterNameList(const std::string &phase) {
|
||||
py::list GraphExecutorPy::GetParallelParameterNameList(const std::string &phase) {
|
||||
std::string param_graph = phase + kStepParallelGraph;
|
||||
auto graph = GetFuncGraph(param_graph);
|
||||
return mindspore::parallel::GetParallelParameterNameList(graph);
|
||||
}
|
||||
|
||||
void ExecutorPy::SetCNodeStrategy(const std::string &name, const parallel::Strategys &strategy) {
|
||||
void GraphExecutorPy::SetCNodeStrategy(const std::string &name, const parallel::Strategys &strategy) {
|
||||
MS_LOG(DEBUG) << "SetCNodeStrategy!";
|
||||
stra_dict_[phase_][py::str(name)] = strategy;
|
||||
}
|
||||
|
||||
size_t ExecutorPy::GetNumOpsInfo(const std::string &phase) {
|
||||
size_t GraphExecutorPy::GetNumOpsInfo(const std::string &phase) {
|
||||
MS_LOG(DEBUG) << "GetNumOpsInfo!";
|
||||
return phase_to_num_op_info_[phase];
|
||||
}
|
||||
|
||||
void ExecutorPy::SetNumOpsInfo(size_t num_ops) {
|
||||
void GraphExecutorPy::SetNumOpsInfo(size_t num_ops) {
|
||||
MS_LOG(DEBUG) << "SetNumOpsInfo!";
|
||||
phase_to_num_op_info_[phase_] = num_ops;
|
||||
}
|
||||
|
||||
py::dict ExecutorPy::GetAllreduceFusion(const std::string &phase) {
|
||||
py::dict GraphExecutorPy::GetAllreduceFusion(const std::string &phase) {
|
||||
MS_LOG(INFO) << "GetAllreduceFusion!";
|
||||
auto graph = GetFuncGraph(phase);
|
||||
return mindspore::parallel::GetAllreduceFusion(graph);
|
||||
|
@ -443,7 +446,7 @@ py::dict ExecutorPy::GetAllreduceFusion(const std::string &phase) {
|
|||
|
||||
// Not support multi thread, not support nested call too.
|
||||
// Here using nested_called flg to avoid nested call.
|
||||
void ExecutorPy::DelNetRes(const std::string &id) {
|
||||
void GraphExecutorPy::DelNetRes(const std::string &id) {
|
||||
static bool nested_called = false;
|
||||
if (nested_called) {
|
||||
return;
|
||||
|
@ -479,17 +482,18 @@ void ExecutorPy::DelNetRes(const std::string &id) {
|
|||
nested_called = false;
|
||||
}
|
||||
|
||||
void ExecutorPy::ClearRes() {
|
||||
void GraphExecutorPy::ClearRes() {
|
||||
MS_LOG(INFO) << "Clean executor resource!";
|
||||
executor_ = nullptr;
|
||||
}
|
||||
|
||||
ExecutorPy::~ExecutorPy() {
|
||||
GraphExecutorPy::~GraphExecutorPy() {
|
||||
MS_LOG(INFO) << "Release Executor!";
|
||||
ConfigManager::GetInstance().ResetConfig();
|
||||
}
|
||||
|
||||
void ExecutorPy::GetWeightInfo(const CNodePtr &root_node, const AnfNodePtr &weight_node,
|
||||
void GraphExecutorPy::GetWeightInfo(
|
||||
const CNodePtr &root_node, const AnfNodePtr &weight_node,
|
||||
std::map<std::string, std::pair<PrimitivePyAdapterPtr, std::string>> *fake_quant_table) {
|
||||
MS_EXCEPTION_IF_NULL(root_node);
|
||||
MS_EXCEPTION_IF_NULL(fake_quant_table);
|
||||
|
@ -557,11 +561,11 @@ void ExecutorPy::GetWeightInfo(const CNodePtr &root_node, const AnfNodePtr &weig
|
|||
(*fake_quant_table)[weight_name] = std::make_pair(quant_op->adapter(), fakequant_min_node_name);
|
||||
}
|
||||
|
||||
std::map<std::string, std::pair<PrimitivePyAdapterPtr, std::string>> ExecutorPy::FetchInfoForQuantExport(
|
||||
const std::string &phase_s) {
|
||||
FuncGraphPtr func_graph = info_[phase_s]->resource->func_graph();
|
||||
std::map<std::string, std::pair<PrimitivePyAdapterPtr, std::string>> GraphExecutorPy::FetchInfoForQuantExport(
|
||||
const std::string &phase) {
|
||||
FuncGraphPtr func_graph = info_[phase]->resource->func_graph();
|
||||
MS_EXCEPTION_IF_NULL(func_graph);
|
||||
MS_LOG(DEBUG) << "FetchInfoForQuantExport func graph(" << func_graph->ToString() << ") phase(" << phase_s << ")!";
|
||||
MS_LOG(DEBUG) << "FetchInfoForQuantExport func graph(" << func_graph->ToString() << ") phase(" << phase << ")!";
|
||||
std::map<std::string, std::pair<PrimitivePyAdapterPtr, std::string>> fake_quant_table;
|
||||
auto filter = [](const AnfNodePtr &node) {
|
||||
return !(IsPrimitiveCNode(node, prim::kPrimConv2D) || IsPrimitiveCNode(node, prim::kPrimMatMul) ||
|
||||
|
@ -605,21 +609,21 @@ std::map<std::string, std::pair<PrimitivePyAdapterPtr, std::string>> ExecutorPy:
|
|||
return fake_quant_table;
|
||||
}
|
||||
|
||||
void ExecutorPy::SaveCompiledGraph(const std::string &phase_s) {
|
||||
// save the graph to ExecutorPy
|
||||
FuncGraphPtr func_graph = info_[phase_s]->resource->func_graph();
|
||||
void GraphExecutorPy::SaveCompiledGraph(const std::string &phase) {
|
||||
// save the graph to GraphExecutorPy
|
||||
FuncGraphPtr func_graph = info_[phase]->resource->func_graph();
|
||||
MS_EXCEPTION_IF_NULL(func_graph);
|
||||
MS_EXCEPTION_IF_NULL(parallel::ParallelContext::GetInstance());
|
||||
std::string parallel_mode = parallel::ParallelContext::GetInstance()->parallel_mode();
|
||||
|
||||
MS_LOG(INFO) << "Save compiled func graph(" << func_graph->ToString() << ") phase(" << phase_s << ")!";
|
||||
info_[phase_s]->func_graph = func_graph;
|
||||
MS_LOG(INFO) << "Save compiled func graph(" << func_graph->ToString() << ") phase(" << phase << ")!";
|
||||
info_[phase]->func_graph = func_graph;
|
||||
if ((func_graph != nullptr) && func_graph->has_flag(parallel::AUTO_PARALLEL) &&
|
||||
((parallel_mode == parallel::AUTO_PARALLEL) || (parallel_mode == parallel::SEMI_AUTO_PARALLEL))) {
|
||||
MS_LOG(DEBUG) << "Save model parallel parameter layout graph!";
|
||||
func_graph = info_[phase_s]->resource->results()[kStepParallelGraph].cast<FuncGraphPtr>();
|
||||
func_graph = info_[phase]->resource->results()[kStepParallelGraph].cast<FuncGraphPtr>();
|
||||
ExecutorInfoPtr executor_info = std::make_shared<ExecutorInfo>();
|
||||
std::string layout_graph = phase_s + kStepParallelGraph;
|
||||
std::string layout_graph = phase + kStepParallelGraph;
|
||||
executor_info->func_graph = func_graph;
|
||||
info_[layout_graph] = executor_info;
|
||||
} else {
|
||||
|
@ -628,7 +632,7 @@ void ExecutorPy::SaveCompiledGraph(const std::string &phase_s) {
|
|||
MS_LOG(INFO) << "End save compiled func graph!";
|
||||
}
|
||||
|
||||
void ExecutorPy::GetGeBackendPolicy() const {
|
||||
void GraphExecutorPy::GetGeBackendPolicy() const {
|
||||
auto ms_context = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(ms_context);
|
||||
std::string backend = ms_context->backend_policy();
|
||||
|
@ -637,24 +641,24 @@ void ExecutorPy::GetGeBackendPolicy() const {
|
|||
}
|
||||
}
|
||||
|
||||
bool IsPhaseExportAir(const std::string &phase_s) {
|
||||
bool IsPhaseExportAir(const std::string &phase) {
|
||||
auto phase_to_export = "export.air";
|
||||
return phase_s.rfind(phase_to_export) != std::string::npos;
|
||||
return phase.rfind(phase_to_export) != std::string::npos;
|
||||
}
|
||||
|
||||
bool IsPhaseTrain(const std::string &phase_s) {
|
||||
bool IsPhaseTrain(const std::string &phase) {
|
||||
const std::string phase_to_train = "train";
|
||||
return phase_s.rfind(phase_to_train) != std::string::npos;
|
||||
return phase.rfind(phase_to_train) != std::string::npos;
|
||||
}
|
||||
|
||||
bool IsPhaseLoadFromMindIR(const std::string &phase_s) {
|
||||
bool IsPhaseLoadFromMindIR(const std::string &phase) {
|
||||
const std::string mindir_graph = "graph_load_from_mindir";
|
||||
return phase_s.rfind(mindir_graph) != std::string::npos;
|
||||
return phase.rfind(mindir_graph) != std::string::npos;
|
||||
}
|
||||
|
||||
std::vector<ActionItem> GetPipeline(const ResourcePtr &resource, const std::string &phase_s, bool use_vm) {
|
||||
std::vector<ActionItem> GetPipeline(const ResourcePtr &resource, const std::string &phase, bool use_vm) {
|
||||
MS_EXCEPTION_IF_NULL(resource);
|
||||
bool is_air = IsPhaseExportAir(phase_s);
|
||||
bool is_air = IsPhaseExportAir(phase);
|
||||
|
||||
std::string backend = MsContext::GetInstance()->backend_policy();
|
||||
|
||||
|
@ -682,11 +686,11 @@ std::vector<ActionItem> GetPipeline(const ResourcePtr &resource, const std::stri
|
|||
resource->results()[kBackend] = backend_ptr;
|
||||
// If the 'use_frontend_compile_cache' context has been set true and the cache is read successfully,
|
||||
// do the backend actions only.
|
||||
if (IsPhaseTrain(phase_s) && MsContext::GetInstance()->get_param<bool>(MS_CTX_LOAD_COMPILE_CACHE) &&
|
||||
if (IsPhaseTrain(phase) && MsContext::GetInstance()->get_param<bool>(MS_CTX_LOAD_COMPILE_CACHE) &&
|
||||
resource->func_graph() != nullptr) {
|
||||
return BackendPipeline();
|
||||
}
|
||||
if (IsPhaseLoadFromMindIR(phase_s)) {
|
||||
if (IsPhaseLoadFromMindIR(phase)) {
|
||||
return MindIRPipeline();
|
||||
}
|
||||
return VmPipeline();
|
||||
|
@ -694,28 +698,30 @@ std::vector<ActionItem> GetPipeline(const ResourcePtr &resource, const std::stri
|
|||
return GePipeline();
|
||||
}
|
||||
|
||||
bool ExecutorPy::CompileInner(const py::object &obj, const py::tuple &args, const py::object &phase, bool use_vm,
|
||||
const std::string &queue_name) {
|
||||
MS_LOG(DEBUG) << "Start ExecutorPy compile!";
|
||||
if ((!py::isinstance<py::str>(phase))) {
|
||||
MS_LOG(ERROR) << "Arg phase must be string.";
|
||||
bool GraphExecutorPy::CompileInner(const py::object &source_obj, const py::tuple &args, const py::object &phase_obj,
|
||||
bool use_vm, const std::string &queue_name) {
|
||||
// Check if the phase is valid.
|
||||
if ((!py::isinstance<py::str>(phase_obj))) {
|
||||
MS_LOG(ERROR) << "The `phase` must be string.";
|
||||
return false;
|
||||
}
|
||||
// check the function or net is valid
|
||||
if (py::isinstance<py::none>(obj)) {
|
||||
MS_LOG(ERROR) << "Find error: parse obj is None.";
|
||||
// Check if the function or net is valid.
|
||||
if (py::isinstance<py::none>(source_obj)) {
|
||||
MS_LOG(ERROR) << "The source object to compile should not be None.";
|
||||
return false;
|
||||
}
|
||||
// check the args of function or net is valid
|
||||
// Check if the args of function or net is valid.
|
||||
CheckArgsValid(args);
|
||||
|
||||
auto phase = py::cast<std::string>(phase_obj);
|
||||
MS_LOG(INFO) << "Start compiling, phase: " << phase << ", source: {" << py::str(source_obj) << "}";
|
||||
MS_LOG(DEBUG) << "args: " << py::str(const_cast<py::tuple &>(args));
|
||||
|
||||
#ifdef ENABLE_GE
|
||||
GetGeBackendPolicy();
|
||||
#endif
|
||||
ExecutorInfoPtr executor_info = std::make_shared<ExecutorInfo>();
|
||||
auto phase_s = py::cast<std::string>(phase);
|
||||
phase_ = phase_s;
|
||||
MS_LOG(INFO) << "ExecutorPy compile phase:" << phase_s << "!";
|
||||
ResourcePtr resource = std::make_shared<Resource>(obj);
|
||||
ResourcePtr resource = std::make_shared<Resource>(source_obj);
|
||||
|
||||
if (MsContext::GetInstance()->get_param<bool>(MS_CTX_LOAD_COMPILE_CACHE)) {
|
||||
#ifdef ENABLE_PROFILE
|
||||
|
@ -728,41 +734,42 @@ bool ExecutorPy::CompileInner(const py::object &obj, const py::tuple &args, cons
|
|||
#endif
|
||||
}
|
||||
|
||||
auto p_actions = GetPipeline(resource, phase_s, use_vm);
|
||||
std::shared_ptr<Pipeline> pip = std::make_shared<Pipeline>(resource, FilterActions(p_actions, phase_s));
|
||||
phase_ = phase;
|
||||
auto actions = GetPipeline(resource, phase, use_vm);
|
||||
std::shared_ptr<Pipeline> pip = std::make_shared<Pipeline>(resource, FilterActions(actions, phase));
|
||||
|
||||
// get the parameters items and add the value to args_spec
|
||||
// Get the parameters items and add the value to args_spec.
|
||||
abstract::AbstractBasePtrList args_spec;
|
||||
std::size_t size = args.size();
|
||||
for (std::size_t i = 0; i < size; i++) {
|
||||
ValuePtr converted = nullptr;
|
||||
bool succ = parse::ConvertData(args[i], &converted);
|
||||
if (!succ) {
|
||||
MS_LOG(EXCEPTION) << "Args convert error";
|
||||
MS_LOG(EXCEPTION) << "Fail to convert the " << i << "th argument, args[" << i << "]: " << py::str(args[i]);
|
||||
}
|
||||
args_spec.push_back(ArgsToAbstract(converted));
|
||||
}
|
||||
|
||||
resource->set_args_spec(args_spec);
|
||||
executor_info->arg_list_size = size;
|
||||
executor_info->resource = resource;
|
||||
info_[phase_s] = executor_info;
|
||||
pip->Run(phase_s);
|
||||
info_[phase] = executor_info;
|
||||
pip->Run(phase);
|
||||
|
||||
// save the run graph func to MsPipeLine
|
||||
SaveCompiledGraph(phase_s);
|
||||
// Save the compiled graph to MsPipeLine.
|
||||
SaveCompiledGraph(phase);
|
||||
|
||||
opt::python_pass::PyPassManager::GetInstance()->ClearPipelineRes();
|
||||
abstract::AnalysisContext::ClearContext();
|
||||
// Reclaim all resource used by optimizer;
|
||||
// Reclaim all resource used by optimizer.
|
||||
ReclaimOptimizer();
|
||||
resource->Clean();
|
||||
|
||||
MS_LOG(INFO) << "End ExecutorPy compile!";
|
||||
MS_LOG(INFO) << "Finish compiling.";
|
||||
return true;
|
||||
}
|
||||
|
||||
std::vector<ActionItem> ExecutorPy::FilterActions(const std::vector<ActionItem> &actions, const std::string &phase) {
|
||||
std::vector<ActionItem> GraphExecutorPy::FilterActions(const std::vector<ActionItem> &actions,
|
||||
const std::string &phase) {
|
||||
// filter action after validate when 'export'.
|
||||
if (GetPhasePrefix(phase).rfind("export", 0) == std::string::npos) {
|
||||
return actions;
|
||||
|
@ -778,7 +785,7 @@ std::vector<ActionItem> ExecutorPy::FilterActions(const std::vector<ActionItem>
|
|||
return filtered_actions;
|
||||
}
|
||||
|
||||
void ExecutorPy::ReleaseResource(const py::object &phase) {
|
||||
void GraphExecutorPy::ReleaseResource(const py::object &phase) {
|
||||
ResourcePtr res = GetResource(py::cast<std::string>(phase));
|
||||
if (res != nullptr) {
|
||||
res->Clean();
|
||||
|
@ -787,17 +794,11 @@ void ExecutorPy::ReleaseResource(const py::object &phase) {
|
|||
ReclaimOptimizer();
|
||||
}
|
||||
|
||||
static std::string PrintArgs(const py::tuple &args) {
|
||||
py::print(args);
|
||||
return "";
|
||||
}
|
||||
|
||||
bool ExecutorPy::Compile(const py::object &obj, const py::tuple &args, const py::object &phase, bool use_vm,
|
||||
bool GraphExecutorPy::Compile(const py::object &source_obj, const py::tuple &args, const py::object &phase, bool use_vm,
|
||||
const std::string &queue_name) {
|
||||
bool ret_value = false;
|
||||
try {
|
||||
MS_LOG(DEBUG) << PrintArgs(args);
|
||||
ret_value = CompileInner(obj, args, phase, use_vm, queue_name);
|
||||
ret_value = CompileInner(source_obj, args, phase, use_vm, queue_name);
|
||||
} catch (const py::error_already_set &ex) {
|
||||
if (!StaticAnalysisException::Instance().HasException()) {
|
||||
// print function call stack info before release
|
||||
|
@ -840,8 +841,8 @@ bool ExecutorPy::Compile(const py::object &obj, const py::tuple &args, const py:
|
|||
return ret_value;
|
||||
}
|
||||
|
||||
void CacheValidateFuncGraph(const std::string &phase_s, const ResourcePtr &resource) {
|
||||
if (IsPhaseTrain(phase_s) && MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_COMPILE_CACHE)) {
|
||||
void CacheValidateFuncGraph(const std::string &phase, const ResourcePtr &resource) {
|
||||
if (IsPhaseTrain(phase) && MsContext::GetInstance()->get_param<bool>(MS_CTX_SAVE_COMPILE_CACHE)) {
|
||||
#ifdef ENABLE_PROFILE
|
||||
double t1 = GetTime();
|
||||
#endif
|
||||
|
@ -853,12 +854,12 @@ void CacheValidateFuncGraph(const std::string &phase_s, const ResourcePtr &resou
|
|||
}
|
||||
}
|
||||
|
||||
void Pipeline::Run(const std::string &phase_s) {
|
||||
void Pipeline::Run(const std::string &phase) {
|
||||
MS_LOG(INFO) << "Pipeline run";
|
||||
MS_EXCEPTION_IF_NULL(resource_);
|
||||
FuncGraphPtr user_graph = nullptr;
|
||||
|
||||
WITH(MsProfile::GetProfile())[&user_graph, &phase_s, this]() {
|
||||
WITH(MsProfile::GetProfile())[&user_graph, &phase, this]() {
|
||||
size_t i = 0;
|
||||
for (auto &action : actions_) {
|
||||
#ifdef ENABLE_TIMELINE
|
||||
|
@ -874,7 +875,7 @@ void Pipeline::Run(const std::string &phase_s) {
|
|||
if (action.first == "task_emit") {
|
||||
SetLoopCount(resource_);
|
||||
} else if (action.first == "validate") {
|
||||
CacheValidateFuncGraph(phase_s, resource_);
|
||||
CacheValidateFuncGraph(phase, resource_);
|
||||
}
|
||||
if (!result) {
|
||||
MS_LOG(EXCEPTION) << "Pipeline running to end, failed in step:" << action.first;
|
||||
|
@ -983,11 +984,11 @@ void ProcessVmArgInner(const py::tuple &args, const ResourcePtr &res, VectorRef
|
|||
}
|
||||
}
|
||||
|
||||
void ExecutorPy::ProcessVmArg(const py::tuple &args, const std::string &phase, VectorRef *const arg_list) {
|
||||
void GraphExecutorPy::ProcessVmArg(const py::tuple &args, const std::string &phase, VectorRef *const arg_list) {
|
||||
ProcessVmArgInner(args, GetResource(phase), arg_list);
|
||||
}
|
||||
|
||||
void ExecutorPy::TerminateDebugger() {
|
||||
void GraphExecutorPy::TerminateDebugger() {
|
||||
if (debugger_terminate_) {
|
||||
MS_LOG(INFO) << "Terminate debugger and clear resources!";
|
||||
ClearResAtexit();
|
||||
|
@ -995,23 +996,23 @@ void ExecutorPy::TerminateDebugger() {
|
|||
}
|
||||
}
|
||||
|
||||
py::object ExecutorPy::Run(const py::tuple &args, const py::object &phase) {
|
||||
py::object GraphExecutorPy::Run(const py::tuple &args, const py::object &phase_obj) {
|
||||
// Mindspore debugger notify main thread to exit after one step, and will not run next step
|
||||
TerminateDebugger();
|
||||
std::size_t size = args.size();
|
||||
if (!py::isinstance<py::str>(phase)) {
|
||||
if (!py::isinstance<py::str>(phase_obj)) {
|
||||
MS_LOG(EXCEPTION) << "Run failed, phase input is not a str";
|
||||
}
|
||||
auto phase_s = py::cast<std::string>(phase);
|
||||
auto phase = py::cast<std::string>(phase_obj);
|
||||
std::string backend = MsContext::GetInstance()->backend_policy();
|
||||
#ifdef ENABLE_GE
|
||||
if (backend == "ge") {
|
||||
return ExecDFGraph(info_, args, phase_s);
|
||||
return ExecDFGraph(info_, args, phase);
|
||||
}
|
||||
#else
|
||||
auto ret_val = std::make_shared<py::object>();
|
||||
if (info_.count(phase_s) != 0 && info_[phase_s]->func_graph != nullptr) {
|
||||
if (IsGraphOutputValueNodeOrParameter(info_[phase_s]->func_graph->output(), args, ret_val)) {
|
||||
if (info_.count(phase) != 0 && info_[phase]->func_graph != nullptr) {
|
||||
if (IsGraphOutputValueNodeOrParameter(info_[phase]->func_graph->output(), args, ret_val)) {
|
||||
// Check the input arg must be Tensor when backend is "ms".
|
||||
if (MsContext::GetInstance()->backend_policy() == kMsConvert) {
|
||||
for (std::size_t i = 0; i < size; i++) {
|
||||
|
@ -1032,24 +1033,24 @@ py::object ExecutorPy::Run(const py::tuple &args, const py::object &phase) {
|
|||
return args;
|
||||
}
|
||||
#endif
|
||||
auto iter = info_.find(phase_s);
|
||||
auto iter = info_.find(phase);
|
||||
if (iter == info_.end()) {
|
||||
MS_LOG(EXCEPTION) << "No phase in executor:" << GetPhasePrefix(phase_s);
|
||||
MS_LOG(EXCEPTION) << "No executor info. found for phase: " << phase;
|
||||
}
|
||||
auto &execute_info = iter->second;
|
||||
MS_EXCEPTION_IF_NULL(execute_info);
|
||||
if (size > execute_info->arg_list_size) {
|
||||
MS_LOG(WARNING) << "The arg num : size = " << size << ". full_arg_size = " << execute_info->arg_list_size;
|
||||
}
|
||||
ProcessVmArg(args, phase_s, &execute_info->arg_list);
|
||||
ProcessVmArg(args, phase, &execute_info->arg_list);
|
||||
// Start to run phase.
|
||||
compile::VmEvalFuncPtr run = GetVmEvalFunc(phase_s);
|
||||
compile::VmEvalFuncPtr run = GetVmEvalFunc(phase);
|
||||
if (run == nullptr) {
|
||||
MS_LOG(EXCEPTION) << "Can't find run graph func for " << phase_s;
|
||||
MS_LOG(EXCEPTION) << "Can't find run graph func for " << phase;
|
||||
}
|
||||
// Set loopsink size for each phase.
|
||||
bool vm_loop_flag = info_[phase_s]->resource->vm_loop_flag();
|
||||
int64_t loop_size = info_[phase_s]->resource->loop_size();
|
||||
bool vm_loop_flag = info_[phase]->resource->vm_loop_flag();
|
||||
int64_t loop_size = info_[phase]->resource->loop_size();
|
||||
int64_t vm_loop = 1;
|
||||
if (vm_loop_flag) {
|
||||
vm_loop = loop_size;
|
||||
|
@ -1069,7 +1070,7 @@ py::object ExecutorPy::Run(const py::tuple &args, const py::object &phase) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
FuncGraphPtr ExecutorPy::BuildGraph(const py::dict &init_params, const std::string &phase,
|
||||
FuncGraphPtr GraphExecutorPy::BuildGraph(const py::dict &init_params, const std::string &phase,
|
||||
const py::object &broadcast_params) {
|
||||
#if ((defined ENABLE_GE) || (defined ENABLE_D))
|
||||
return BuildDFGraph(info_, init_params, phase, broadcast_params);
|
||||
|
@ -1078,8 +1079,8 @@ FuncGraphPtr ExecutorPy::BuildGraph(const py::dict &init_params, const std::stri
|
|||
#endif
|
||||
}
|
||||
|
||||
void ExecutorPy::UpdataParamNodeDefaultInput(const std::string &phase,
|
||||
const std::unordered_map<std::string, tensor::TensorPtr> ¶ms_value) {
|
||||
void GraphExecutorPy::UpdataParamNodeDefaultInput(
|
||||
const std::string &phase, const std::unordered_map<std::string, tensor::TensorPtr> ¶ms_value) {
|
||||
FuncGraphPtr func_graph = info_[phase]->resource->func_graph();
|
||||
MS_EXCEPTION_IF_NULL(func_graph);
|
||||
MS_LOG(DEBUG) << "UpdataParamNodeDefaultInput for func graph(" << func_graph->ToString() << ") phase(" << phase
|
||||
|
@ -1096,13 +1097,13 @@ void ExecutorPy::UpdataParamNodeDefaultInput(const std::string &phase,
|
|||
}
|
||||
}
|
||||
|
||||
void ExecutorPy::RunInitGraph(const py::dict &init_params, const std::string &phase) const {
|
||||
void GraphExecutorPy::RunInitGraph(const py::dict &init_params, const std::string &phase) const {
|
||||
#ifdef ENABLE_GE
|
||||
RunGEInitGraph(init_params, phase);
|
||||
#endif
|
||||
}
|
||||
|
||||
void ExecutorPy::PyExePath(const py::object &py_exe_path) {
|
||||
void GraphExecutorPy::PyExePath(const py::object &py_exe_path) {
|
||||
if (!py::isinstance<py::str>(py_exe_path)) {
|
||||
MS_LOG(EXCEPTION) << "Failed, py_exe_path input is not a str";
|
||||
}
|
||||
|
@ -1111,7 +1112,7 @@ void ExecutorPy::PyExePath(const py::object &py_exe_path) {
|
|||
ms_context->set_param<std::string>(MS_CTX_PYTHON_EXE_PATH, py_exe_path_s);
|
||||
}
|
||||
|
||||
void ExecutorPy::KernelBuildServerDir(const py::object &kernel_build_server_dir) {
|
||||
void GraphExecutorPy::KernelBuildServerDir(const py::object &kernel_build_server_dir) {
|
||||
if (!py::isinstance<py::str>(kernel_build_server_dir)) {
|
||||
MS_LOG(EXCEPTION) << "Failed, kernel_build_server_dir input is not a str";
|
||||
}
|
||||
|
@ -1423,7 +1424,7 @@ void ClearResAtexit() {
|
|||
abstract::ClearPrimEvaluatorMap();
|
||||
pipeline::GetMethodMap().clear();
|
||||
pipeline::GetAttrMap().clear();
|
||||
pipeline::ExecutorPy::ClearRes();
|
||||
pipeline::GraphExecutorPy::ClearRes();
|
||||
pipeline::ReclaimOptimizer();
|
||||
pynative::PynativeExecutor::GetInstance()->ClearRes();
|
||||
opt::python_pass::PyPassManager::GetInstance()->ClearRes();
|
||||
|
|
|
@ -49,7 +49,7 @@ class Pipeline {
|
|||
|
||||
~Pipeline() = default;
|
||||
|
||||
void Run(const std::string &phase_s);
|
||||
void Run(const std::string &phase);
|
||||
|
||||
ResourcePtr resource() { return resource_; }
|
||||
|
||||
|
@ -59,29 +59,29 @@ class Pipeline {
|
|||
};
|
||||
|
||||
// A function pipeline.
|
||||
class ExecutorPy : public std::enable_shared_from_this<ExecutorPy> {
|
||||
class GraphExecutorPy : public std::enable_shared_from_this<GraphExecutorPy> {
|
||||
public:
|
||||
static std::shared_ptr<ExecutorPy> GetInstance() {
|
||||
static std::shared_ptr<GraphExecutorPy> GetInstance() {
|
||||
std::lock_guard<std::mutex> i_lock(instance_lock_);
|
||||
if (executor_ == nullptr) {
|
||||
executor_ = std::shared_ptr<ExecutorPy>(new (std::nothrow) ExecutorPy());
|
||||
executor_ = std::shared_ptr<GraphExecutorPy>(new (std::nothrow) GraphExecutorPy());
|
||||
}
|
||||
return executor_;
|
||||
}
|
||||
|
||||
~ExecutorPy();
|
||||
~GraphExecutorPy();
|
||||
|
||||
const std::string &phase() const { return phase_; }
|
||||
void SaveCompiledGraph(const std::string &phase_s);
|
||||
bool CompileInner(const py::object &obj, const py::tuple &args, const py::object &phase, bool use_vm,
|
||||
void SaveCompiledGraph(const std::string &phase);
|
||||
bool CompileInner(const py::object &source_obj, const py::tuple &args, const py::object &phase_obj, bool use_vm,
|
||||
const std::string &queue_name);
|
||||
bool Compile(const py::object &obj, const py::tuple &args, const py::object &phase, bool use_vm,
|
||||
bool Compile(const py::object &source_obj, const py::tuple &args, const py::object &phase_obj, bool use_vm,
|
||||
const std::string &queue_name);
|
||||
|
||||
void ProcessVmArg(const py::tuple &args, const std::string &phase, VectorRef *arg_list);
|
||||
|
||||
// for pynative mode when use_vm is on
|
||||
py::object Run(const py::tuple &args, const py::object &phase);
|
||||
py::object Run(const py::tuple &args, const py::object &phase_obj);
|
||||
ResourcePtr GetResource(const std::string &phase);
|
||||
FuncGraphPtr GetFuncGraph(const std::string &phase);
|
||||
FuncGraphPtr GetGradGraph(const std::string &phase);
|
||||
|
@ -105,17 +105,17 @@ class ExecutorPy : public std::enable_shared_from_this<ExecutorPy> {
|
|||
void SetNumOpsInfo(size_t);
|
||||
py::dict GetAllreduceFusion(const std::string &phase);
|
||||
void DelNetRes(const std::string &id);
|
||||
void ReleaseResource(const py::object &phase);
|
||||
void ReleaseResource(const py::object &phase_obj);
|
||||
static void ClearRes();
|
||||
static bool GetDebugTerminate() { return debugger_terminate_; }
|
||||
static void DebugTerminate(bool val) { debugger_terminate_ = val; }
|
||||
void TerminateDebugger();
|
||||
|
||||
std::map<std::string, std::pair<PrimitivePyAdapterPtr, std::string>> FetchInfoForQuantExport(
|
||||
const std::string &phase_s);
|
||||
const std::string &phase);
|
||||
|
||||
private:
|
||||
ExecutorPy();
|
||||
GraphExecutorPy();
|
||||
void ConvertObjectToTensors(const py::dict &dict, std::map<std::string, tensor::TensorPtr> *tensors);
|
||||
void GetWeightInfo(const CNodePtr &root_node, const AnfNodePtr &weight_node,
|
||||
std::map<std::string, std::pair<PrimitivePyAdapterPtr, std::string>> *fake_quant_table);
|
||||
|
@ -125,18 +125,18 @@ class ExecutorPy : public std::enable_shared_from_this<ExecutorPy> {
|
|||
static std::vector<ActionItem> FilterActions(const std::vector<ActionItem> &actions, const std::string &phase);
|
||||
|
||||
std::map<std::string, ExecutorInfoPtr> info_;
|
||||
static std::shared_ptr<ExecutorPy> executor_;
|
||||
static std::shared_ptr<GraphExecutorPy> executor_;
|
||||
static std::mutex instance_lock_;
|
||||
static bool debugger_terminate_;
|
||||
std::map<std::string, py::dict> stra_dict_;
|
||||
std::string phase_ = "";
|
||||
std::map<std::string, size_t> phase_to_num_op_info_;
|
||||
};
|
||||
using ExecutorPyPtr = std::shared_ptr<ExecutorPy>;
|
||||
using GraphExecutorPyPtr = std::shared_ptr<GraphExecutorPy>;
|
||||
|
||||
void CheckArgsValid(const py::tuple &args);
|
||||
// Generate a key for mapping function graph
|
||||
py::tuple GenerateKey(const std::string &name, const std::unordered_map<std::string, py::object> &defaults);
|
||||
py::object GenerateArgumentsKey(const std::unordered_map<std::string, py::object> &args);
|
||||
py::bool_ VerifyInputSignature(const py::list &input_signature, const py::tuple &inputs);
|
||||
|
||||
bool InitDistribute(const std::map<std::string, std::string> &options);
|
||||
|
|
|
@ -442,7 +442,7 @@ std::shared_ptr<py::object> DoExecGraph(const FuncGraphPtr &graph, const std::ve
|
|||
|
||||
void ProcessGeArg(const std::map<std::string, ExecutorInfoPtr> &info, const py::tuple &args, const std::string &phase,
|
||||
std::vector<tensor::TensorPtr> *inputs) {
|
||||
// check the arg and use the ExecutorPy args
|
||||
// check the arg and use the GraphExecutorPy args
|
||||
std::size_t size = args.size();
|
||||
|
||||
if (info.count(phase) == 0) {
|
||||
|
|
|
@ -246,7 +246,7 @@ BuiltInTypeMap &GetAttrMap() {
|
|||
|
||||
Resource::Resource(const py::object &obj)
|
||||
: engine_(std::make_shared<abstract::AnalysisEngine>(abstract::GetPrimEvaluatorConstructors(), manager_)),
|
||||
input_(obj),
|
||||
source_input_(obj),
|
||||
is_cleaned_(false) {}
|
||||
|
||||
Resource::~Resource() {
|
||||
|
@ -313,7 +313,7 @@ Any Resource::GetAttrPtr(const TypeId &type, const std::string &name) {
|
|||
void Resource::Clean() {
|
||||
// AbstractTensor->elements() will be saved in AbstractBasePtrList
|
||||
args_spec_.clear();
|
||||
input_ = py::none();
|
||||
source_input_ = py::none();
|
||||
// Context with AbstractBasePtrList may be saved in GraphEvaluator
|
||||
// some Evaluator like ResolveEvaluator may save Python object in cache,
|
||||
// it should be cleaned before Python Interpreter destructed.
|
||||
|
|
|
@ -67,7 +67,7 @@ class Resource : public ResourceBase {
|
|||
|
||||
static Any GetAttrPtr(const TypeId &type, const std::string &name);
|
||||
|
||||
const py::object &input() const { return input_; }
|
||||
const py::object &source_input() const { return source_input_; }
|
||||
|
||||
FuncGraphPtr func_graph() const { return func_graph_; }
|
||||
void set_func_graph(const FuncGraphPtr &func_graph) { func_graph_ = func_graph; }
|
||||
|
@ -84,7 +84,7 @@ class Resource : public ResourceBase {
|
|||
bool vm_loop_flag() { return vm_loop_flag_; }
|
||||
int64_t loop_size() { return loop_size_; }
|
||||
// Reclaim resource and clear the cache.
|
||||
// ExecutorPy::Compile() can be called multiple times, so cache
|
||||
// GraphExecutorPy::Compile() can be called multiple times, so cache
|
||||
// should be cleared.
|
||||
void Clean();
|
||||
|
||||
|
@ -92,7 +92,8 @@ class Resource : public ResourceBase {
|
|||
abstract::AnalysisEnginePtr engine_;
|
||||
FuncGraphPtr func_graph_;
|
||||
abstract::AbstractBasePtrList args_spec_;
|
||||
py::object input_;
|
||||
// The source obj to compile, usually a `Cell` or `ms_function` decorated function.
|
||||
py::object source_input_;
|
||||
bool is_cleaned_;
|
||||
// The func_graph_ is loaded from mindir
|
||||
bool is_load_{false};
|
||||
|
|
|
@ -2942,7 +2942,7 @@ void GradExecutor::GradMsFunction(const py::object &out, const py::args &args) {
|
|||
// Get ms_function func graph and grad graph.
|
||||
const auto &phase = graph_phase();
|
||||
MS_LOG(DEBUG) << "ms_function func graph phase: " << phase;
|
||||
auto executor = pipeline::ExecutorPy::GetInstance();
|
||||
auto executor = pipeline::GraphExecutorPy::GetInstance();
|
||||
FuncGraphPtr ms_func_graph = executor->GetFuncGraph(phase);
|
||||
MS_EXCEPTION_IF_NULL(ms_func_graph);
|
||||
FuncGraphPtr grad_graph = executor->GetGradGraph(phase);
|
||||
|
|
|
@ -25,7 +25,7 @@ from mindspore import context
|
|||
from mindspore import log as logger
|
||||
from mindspore._extends.remote import kernel_build_server
|
||||
from .tensor import Tensor as MsTensor
|
||||
from .._c_expression import generate_key, Executor_, Tensor, MetaTensor, PynativeExecutor_
|
||||
from .._c_expression import generate_arguments_key, GraphExecutor_, Tensor, MetaTensor, PynativeExecutor_
|
||||
from .._c_expression import verify_inputs_signature, init_exec_dataset, _set_dataset_mode_config, init_pipeline
|
||||
from ..parallel._ps_context import _is_role_pserver
|
||||
from ..parallel._utils import _get_device_num, _get_global_rank, _need_to_full, _check_full_batch, _to_full_tensor, \
|
||||
|
@ -92,7 +92,7 @@ def _wrap_func(fn):
|
|||
|
||||
def _exec_init_graph(obj, init_phase):
|
||||
"""Execute the parameter initializer graph."""
|
||||
inst_executor = Executor_.get_instance()
|
||||
inst_executor = GraphExecutor_.get_instance()
|
||||
param_dict = OrderedDict()
|
||||
for name, param in obj.parameters_dict().items():
|
||||
if not param.is_init:
|
||||
|
@ -104,11 +104,11 @@ def _exec_init_graph(obj, init_phase):
|
|||
inst_executor.run_init_graph(param_dict, init_phase)
|
||||
|
||||
|
||||
class _MindSporeFunction:
|
||||
class _MindsporeFunctionExecutor:
|
||||
"""
|
||||
Represents a function compiled by mind expression.
|
||||
Represents a function compiled by graph compiler.
|
||||
|
||||
_MindSporeFunction will compile the original function for every combination
|
||||
_MindsporeFunctionExecutor will compile the original function for every combination
|
||||
of argument types and shapes it is given (as well as their values, optionally).
|
||||
|
||||
Args:
|
||||
|
@ -127,22 +127,23 @@ class _MindSporeFunction:
|
|||
self.obj = None
|
||||
if hasattr(obj, fn.__name__):
|
||||
self.obj = obj
|
||||
self._executor = Executor_.get_instance()
|
||||
self._graph_executor = GraphExecutor_.get_instance()
|
||||
|
||||
def build_data_init_graph(self, graph_name):
|
||||
"""Build GE data graph and init graph for the given graph name."""
|
||||
if self.obj is None:
|
||||
logger.warning("Make sure parameter should not be used in function")
|
||||
para_dict = OrderedDict()
|
||||
self._executor.build_data_graph(para_dict, graph_name)
|
||||
self._graph_executor.build_data_graph(para_dict, graph_name)
|
||||
return
|
||||
self._executor.build_data_graph(self.obj.parameters_dict(), graph_name, self.obj.parameters_broadcast_dict())
|
||||
self._graph_executor.build_data_graph(self.obj.parameters_dict(), graph_name,
|
||||
self.obj.parameters_broadcast_dict())
|
||||
init_phase = "init_subgraph" + graph_name[graph_name.find("."):]
|
||||
_exec_init_graph(self.obj, init_phase)
|
||||
|
||||
def compile(self, args_list, arg_names, method_name):
|
||||
"""Returns pipeline for the given args."""
|
||||
# verify the signature for both function and method
|
||||
# Verify the signature for both function and method
|
||||
if self.input_signature is not None:
|
||||
signatures = []
|
||||
for sig_spec in self.input_signature:
|
||||
|
@ -155,35 +156,32 @@ class _MindSporeFunction:
|
|||
|
||||
dic = dict(zip(arg_names, args_list))
|
||||
generate_name = self.fn.__module__ + "." + self.fn.__name__ + "." + self.fn.__code__.co_filename + "." + \
|
||||
str(self.fn.__code__.co_firstlineno)
|
||||
str(self.fn.__code__.co_firstlineno) + '.' + str(id(self.fn))
|
||||
self.fn.__parse_method__ = method_name
|
||||
|
||||
# add key with obj
|
||||
identify = ""
|
||||
if self.obj is None:
|
||||
identify = str(id(self.fn))
|
||||
else:
|
||||
# Add key with obj
|
||||
if self.obj is not None:
|
||||
if self.obj.__module__ != self.fn.__module__:
|
||||
logger.error(f'`obj` module not equal to `fn` module: {self.obj.__module__}, {self.fn.__module__}')
|
||||
self.obj.__parse_method__ = method_name
|
||||
generate_name = self.obj.__module__ + "." + generate_name
|
||||
identify = str(self.obj.create_time) + "_" + str(id(self.obj)) + '_' + str(id(self.fn))
|
||||
generate_name = generate_name + '.' + str(self.obj.create_time) + '.' + str(id(self.obj))
|
||||
|
||||
generate_name = generate_name + "." + identify
|
||||
key = generate_key(generate_name, dic)
|
||||
phase = str(key[1]) + generate_name
|
||||
if key not in ms_compile_cache.keys():
|
||||
key = generate_arguments_key(dic)
|
||||
phase = generate_name + '.' + str(key)
|
||||
if phase not in ms_compile_cache.keys():
|
||||
is_compile = False
|
||||
if self.obj is None:
|
||||
is_compile = self._executor.compile(self.fn, args_list, phase, True, "")
|
||||
is_compile = self._graph_executor.compile(self.fn, args_list, phase, True, "")
|
||||
else:
|
||||
is_compile = self._executor.compile(self.obj, args_list, phase, True, "")
|
||||
is_compile = self._graph_executor.compile(self.obj, args_list, phase, True, "")
|
||||
if not is_compile:
|
||||
raise RuntimeError("Executor compile failed.")
|
||||
if context.get_context("enable_ge"):
|
||||
self.build_data_init_graph(phase)
|
||||
ms_compile_cache[key] = phase
|
||||
ms_compile_cache[phase] = phase
|
||||
return phase
|
||||
|
||||
return ms_compile_cache[key]
|
||||
return phase
|
||||
|
||||
@_wrap_func
|
||||
def __call__(self, *args):
|
||||
|
@ -208,12 +206,13 @@ class _MindSporeFunction:
|
|||
new_inputs.append(i)
|
||||
elif context.get_context("grad_for_scalar") and isinstance(i, (int, float)):
|
||||
new_inputs.append(i)
|
||||
output = self._executor(tuple(new_inputs), phase)
|
||||
output = self._graph_executor(tuple(new_inputs), phase)
|
||||
|
||||
if context.get_context("mode") == context.PYNATIVE_MODE:
|
||||
_pynative_exec.set_graph_phase(phase)
|
||||
_pynative_exec.grad_ms_function(output, *new_inputs)
|
||||
_pynative_executor.set_graph_phase(phase)
|
||||
_pynative_executor.grad_ms_function(output, *new_inputs)
|
||||
output = output[0]
|
||||
|
||||
return output
|
||||
|
||||
|
||||
|
@ -283,7 +282,7 @@ def ms_function(fn=None, obj=None, input_signature=None):
|
|||
process_obj = None
|
||||
if args and not isinstance(args[0], MsTensor) and hasattr(args[0], func.__name__):
|
||||
process_obj = args[0]
|
||||
out = _MindSporeFunction(func, input_signature, process_obj)(*args)
|
||||
out = _MindsporeFunctionExecutor(func, input_signature, process_obj)(*args)
|
||||
return out
|
||||
|
||||
return staging_specialize
|
||||
|
@ -420,9 +419,9 @@ class _PynativeExecutor:
|
|||
return self._executor(obj, args)
|
||||
|
||||
|
||||
class _Executor:
|
||||
class _CellGraphExecutor:
|
||||
"""
|
||||
An executor used to compile/manage/run graph.
|
||||
An executor used to compile/manage/run graph for a Cell.
|
||||
|
||||
Including data_graph, train_graph, eval_graph and predict graph.
|
||||
|
||||
|
@ -437,10 +436,10 @@ class _Executor:
|
|||
def __init__(self):
|
||||
# create needed graph by lazy mode
|
||||
self.is_init = False
|
||||
self._executor = Executor_.get_instance()
|
||||
self._graph_executor = GraphExecutor_.get_instance()
|
||||
self.compile_cache = {}
|
||||
self._executor.set_py_exe_path(sys.executable)
|
||||
self._executor.set_kernel_build_server_dir(os.path.split(kernel_build_server.__file__)[0] + os.sep)
|
||||
self._graph_executor.set_py_exe_path(sys.executable)
|
||||
self._graph_executor.set_kernel_build_server_dir(os.path.split(kernel_build_server.__file__)[0] + os.sep)
|
||||
self.queue_name = ""
|
||||
|
||||
def init_dataset(self, queue_name, dataset_size, batch_size, dataset_types, dataset_shapes,
|
||||
|
@ -472,7 +471,7 @@ class _Executor:
|
|||
return True
|
||||
|
||||
def _build_data_graph(self, obj, phase):
|
||||
self._executor.build_data_graph(obj.parameters_dict(), phase, obj.parameters_broadcast_dict())
|
||||
self._graph_executor.build_data_graph(obj.parameters_dict(), phase, obj.parameters_broadcast_dict())
|
||||
|
||||
def _set_dataset_mode(self, args_list):
|
||||
"""set dataset mode."""
|
||||
|
@ -501,12 +500,9 @@ class _Executor:
|
|||
|
||||
args_names, args_list = _generate_pip_args(obj, *args)
|
||||
dic = dict(zip(args_names, args_list))
|
||||
key = generate_key(phase, dic)
|
||||
obj.phase_prefix = str(key[1])
|
||||
if 'export' in phase:
|
||||
phase = phase + '.' + obj.phase_prefix + '.' + str(obj.create_time) + '.' + str(id(obj))
|
||||
else:
|
||||
phase = obj.phase_prefix + phase + '.' + str(obj.create_time) + '.' + str(id(obj))
|
||||
key = generate_arguments_key(dic)
|
||||
obj.arguments_key = str(key)
|
||||
phase = phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key
|
||||
|
||||
if phase in self.compile_cache.keys():
|
||||
logger.debug("%r graph has existed.", phase)
|
||||
|
@ -524,11 +520,11 @@ class _Executor:
|
|||
enable_debug_runtime = context.get_context("enable_debug_runtime")
|
||||
enable_ge = context.get_context("enable_ge")
|
||||
use_vm = not enable_ge or (enable_debug_runtime and context.get_context("mode") == context.PYNATIVE_MODE)
|
||||
result = self._executor.compile(obj, args_list, phase, use_vm, self.queue_name)
|
||||
result = self._graph_executor.compile(obj, args_list, phase, use_vm, self.queue_name)
|
||||
self.compile_cache[phase] = phase
|
||||
if not result:
|
||||
raise RuntimeError("Executor compile failed.")
|
||||
graph = self._executor.get_func_graph(phase)
|
||||
graph = self._graph_executor.get_func_graph(phase)
|
||||
|
||||
if graph is None:
|
||||
raise RuntimeError("Compile graph failed for phase {}.".format(phase))
|
||||
|
@ -541,7 +537,6 @@ class _Executor:
|
|||
# the following GE init process is not needed when use vm or ms backend
|
||||
if enable_ge:
|
||||
self._build_data_graph(obj, phase)
|
||||
|
||||
if "export" not in phase:
|
||||
init_phase = "init_subgraph." + str(obj.create_time) + "." + str(id(obj))
|
||||
_exec_init_graph(obj, init_phase)
|
||||
|
@ -559,8 +554,8 @@ class _Executor:
|
|||
self._updata_param_node_default_input(phase, replace)
|
||||
return
|
||||
|
||||
obj.parameter_layout_dict = self._executor.get_parameter_layout(phase)
|
||||
obj.parallel_parameter_name_list = self._executor.get_parallel_parameter_name_list(phase)
|
||||
obj.parameter_layout_dict = self._graph_executor.get_parameter_layout(phase)
|
||||
obj.parallel_parameter_name_list = self._graph_executor.get_parallel_parameter_name_list(phase)
|
||||
replace = obj.init_parameters_data(auto_parallel_mode=True)
|
||||
if _get_pipeline_stages() > 1 and (not hasattr(obj, "is_first_iteration") or not obj.is_first_iteration):
|
||||
obj.remove_redundant_parameters()
|
||||
|
@ -575,19 +570,19 @@ class _Executor:
|
|||
|
||||
def _updata_param_node_default_input(self, phase, replace):
|
||||
new_param = {x.name: replace[x] for x in replace if id(x) != id(replace[x])}
|
||||
return self._executor.updata_param_node_default_input(phase, new_param)
|
||||
return self._graph_executor.updata_param_node_default_input(phase, new_param)
|
||||
|
||||
def _get_shard_strategy(self, obj):
|
||||
real_phase = obj.phase_prefix + obj.phase + '.' + str(obj.create_time) + '.' + str(id(obj))
|
||||
return self._executor.get_strategy(real_phase)
|
||||
real_phase = obj.phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key
|
||||
return self._graph_executor.get_strategy(real_phase)
|
||||
|
||||
def _get_num_parallel_ops(self, obj):
|
||||
real_phase = obj.phase_prefix + obj.phase + '.' + str(obj.create_time) + '.' + str(id(obj))
|
||||
return self._executor.get_num_parallel_ops(real_phase)
|
||||
real_phase = obj.phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key
|
||||
return self._graph_executor.get_num_parallel_ops(real_phase)
|
||||
|
||||
def _get_allreduce_fusion(self, obj):
|
||||
real_phase = obj.phase_prefix + obj.phase + '.' + str(obj.create_time) + '.' + str(id(obj))
|
||||
return self._executor.get_allreduce_fusion(real_phase)
|
||||
real_phase = obj.phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key
|
||||
return self._graph_executor.get_allreduce_fusion(real_phase)
|
||||
|
||||
def has_compiled(self, phase='predict'):
|
||||
"""
|
||||
|
@ -599,7 +594,7 @@ class _Executor:
|
|||
Returns:
|
||||
bool, specifies whether the specific graph has been compiled.
|
||||
"""
|
||||
return self._executor.has_compiled(phase)
|
||||
return self._graph_executor.has_compiled(phase)
|
||||
|
||||
def __call__(self, obj, *args, phase='predict'):
|
||||
if context.get_context("precompile_only") or _is_role_pserver():
|
||||
|
@ -615,7 +610,7 @@ class _Executor:
|
|||
raise RuntimeError('Process method parameter is failure')
|
||||
args_list = tuple(arguments_dict.values())
|
||||
obj.__parse_method__ = parse_method
|
||||
return self._executor(args_list, phase)
|
||||
return self._graph_executor(args_list, phase)
|
||||
|
||||
def run(self, obj, *args, phase='predict'):
|
||||
"""
|
||||
|
@ -628,23 +623,23 @@ class _Executor:
|
|||
Tensor/Tuple, return execute result.
|
||||
"""
|
||||
if phase == 'save':
|
||||
return self._executor((), phase + '.' + str(obj.create_time) + '.' + str(id(obj)))
|
||||
return self._graph_executor((), phase + '.' + str(obj.create_time) + '.' + str(id(obj)))
|
||||
|
||||
phase_real = obj.phase_prefix + phase + '.' + str(obj.create_time) + '.' + str(id(obj))
|
||||
phase_real = phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key
|
||||
if self.has_compiled(phase_real):
|
||||
return self._exec_pip(obj, *args, phase=phase_real)
|
||||
raise KeyError('{} graph is not exist.'.format(phase_real))
|
||||
|
||||
def del_net_res(self, net_id):
|
||||
self._executor.del_net_res(net_id)
|
||||
self._graph_executor.del_net_res(net_id)
|
||||
|
||||
def _get_func_graph_proto(self, obj, exec_id, ir_type="onnx_ir", use_prefix=False):
|
||||
"""Get graph proto from pipeline."""
|
||||
if use_prefix:
|
||||
exec_id = obj.phase_prefix + exec_id
|
||||
if self._executor.has_compiled(exec_id) is False:
|
||||
exec_id = exec_id + '.' + obj.arguments_key
|
||||
if self._graph_executor.has_compiled(exec_id) is False:
|
||||
return None
|
||||
return self._executor.get_func_graph_proto(exec_id, ir_type)
|
||||
return self._graph_executor.get_func_graph_proto(exec_id, ir_type)
|
||||
|
||||
def export(self, file_name, graph_id):
|
||||
"""
|
||||
|
@ -659,12 +654,12 @@ class _Executor:
|
|||
|
||||
def fetch_info_for_quant_export(self, exec_id):
|
||||
"""Get graph proto from pipeline."""
|
||||
if self._executor.has_compiled(exec_id) is False:
|
||||
if self._graph_executor.has_compiled(exec_id) is False:
|
||||
return None
|
||||
return self._executor.fetch_info_for_quant_export(exec_id)
|
||||
return self._graph_executor.fetch_info_for_quant_export(exec_id)
|
||||
|
||||
|
||||
_executor = _Executor()
|
||||
_pynative_exec = _PynativeExecutor()
|
||||
_cell_graph_executor = _CellGraphExecutor()
|
||||
_pynative_executor = _PynativeExecutor()
|
||||
|
||||
__all__ = ['ms_function']
|
||||
|
|
|
@ -22,7 +22,7 @@ from ... import nn, ops
|
|||
from ..._checkparam import Validator
|
||||
from ...common import Tensor
|
||||
from ...common import dtype as mstype
|
||||
from ...common.api import _executor
|
||||
from ...common.api import _cell_graph_executor as _executor
|
||||
from ...common.parameter import Parameter
|
||||
from ...nn import Cell
|
||||
from ...nn.layer import quant
|
||||
|
|
|
@ -30,7 +30,7 @@ from .. import context
|
|||
from .._c_expression import init_pipeline, Cell_, FuncGraph
|
||||
from .._checkparam import Validator
|
||||
from ..common import dtype as mstype
|
||||
from ..common.api import _executor, _pynative_exec
|
||||
from ..common.api import _cell_graph_executor, _pynative_executor
|
||||
from ..common.parameter import Parameter, ParameterTuple
|
||||
from ..common.tensor import Tensor
|
||||
from ..ops.operations import HookBackward, Cast
|
||||
|
@ -98,7 +98,7 @@ class Cell(Cell_):
|
|||
self._parallel_parameter_name_list = ()
|
||||
self._parallel_parameter_merge_net_dict = {}
|
||||
self._create_time = int(time.time() * 1e9)
|
||||
self.phase_prefix = ""
|
||||
self.arguments_key = ""
|
||||
self.parameter_broadcast_done = False
|
||||
init_pipeline()
|
||||
|
||||
|
@ -264,8 +264,8 @@ class Cell(Cell_):
|
|||
|
||||
def get_func_graph_proto(self):
|
||||
"""Return graph binary proto."""
|
||||
return _executor._get_func_graph_proto(self, self.phase + "." + str(self.create_time) + '.' + str(id(self)),
|
||||
"anf_ir", True)
|
||||
exec_id = self.phase + "." + str(self.create_time) + '.' + str(id(self))
|
||||
return _cell_graph_executor._get_func_graph_proto(self, exec_id, "anf_ir", True)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if '_params' in self.__dict__:
|
||||
|
@ -295,9 +295,9 @@ class Cell(Cell_):
|
|||
|
||||
def __del__(self):
|
||||
if context.get_context is not None and context.get_context("mode") == context.PYNATIVE_MODE:
|
||||
_pynative_exec.del_cell(str(id(self)))
|
||||
_pynative_executor.del_cell(str(id(self)))
|
||||
if hasattr(self, "_create_time"):
|
||||
_executor.del_net_res(str(self._create_time))
|
||||
_cell_graph_executor.del_net_res(str(self._create_time))
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name in self._params:
|
||||
|
@ -338,7 +338,7 @@ class Cell(Cell_):
|
|||
def do_parameter_broadcast(self):
|
||||
if context.get_auto_parallel_context("parallel_mode") == ParallelMode.DATA_PARALLEL:
|
||||
if not self.parameter_broadcast_done:
|
||||
_pynative_exec.parameter_broadcast(self, self.phase, self._auto_parallel_mode)
|
||||
_pynative_executor.parameter_broadcast(self, self.phase, self._auto_parallel_mode)
|
||||
self.parameter_broadcast_done = True
|
||||
|
||||
def run_construct(self, cast_inputs, kwargs):
|
||||
|
@ -381,6 +381,8 @@ class Cell(Cell_):
|
|||
bound_args = inspect.signature(self.construct).bind(*inputs, **kwargs)
|
||||
inputs = bound_args.args
|
||||
kwargs = bound_args.kwargs
|
||||
|
||||
# Run in Graph mode.
|
||||
if context.get_context("mode") == context.GRAPH_MODE:
|
||||
self._check_construct_args(*inputs, **kwargs)
|
||||
if self.enable_hook:
|
||||
|
@ -388,13 +390,14 @@ class Cell(Cell_):
|
|||
out = self.compile_and_run(*inputs)
|
||||
return out
|
||||
|
||||
# Run in PyNative mode.
|
||||
self.do_parameter_broadcast()
|
||||
for item in inputs:
|
||||
if isinstance(item, numpy.ndarray):
|
||||
raise TypeError("The cell inputs should not be numpy arrays.")
|
||||
if self.requires_grad is True:
|
||||
_pynative_exec.set_grad_flag(True)
|
||||
_pynative_exec.new_graph(self, *inputs, **kwargs)
|
||||
_pynative_executor.set_grad_flag(True)
|
||||
_pynative_executor.new_graph(self, *inputs, **kwargs)
|
||||
cast_inputs = list()
|
||||
if hasattr(self, "_mindspore_flags"):
|
||||
if self._mindspore_flags.get('fp16'):
|
||||
|
@ -406,7 +409,7 @@ class Cell(Cell_):
|
|||
output = self.run_construct(cast_inputs, kwargs)
|
||||
if isinstance(output, Parameter):
|
||||
output = output.data
|
||||
_pynative_exec.end_graph(self, output, *inputs, **kwargs)
|
||||
_pynative_executor.end_graph(self, output, *inputs, **kwargs)
|
||||
return output
|
||||
|
||||
def _add_attr(self, name, value):
|
||||
|
@ -551,7 +554,7 @@ class Cell(Cell_):
|
|||
"""
|
||||
Replace parameters with sliced tensors by parallel strategies.
|
||||
|
||||
Please refer to the usage in source code of `mindspore.common._Executor.compile`.
|
||||
Please refer to the usage in source code of `mindspore.common._CellGraphExecutor.compile`.
|
||||
|
||||
Args:
|
||||
params (dict): The parameters dictionary used for initializing the data graph.
|
||||
|
@ -635,7 +638,7 @@ class Cell(Cell_):
|
|||
Args:
|
||||
inputs (tuple): Inputs of the Cell object.
|
||||
"""
|
||||
_executor.compile(self, *inputs, phase=self.phase, auto_parallel_mode=self._auto_parallel_mode)
|
||||
_cell_graph_executor.compile(self, *inputs, phase=self.phase, auto_parallel_mode=self._auto_parallel_mode)
|
||||
|
||||
def compile_and_run(self, *inputs):
|
||||
"""
|
||||
|
@ -659,19 +662,19 @@ class Cell(Cell_):
|
|||
|
||||
if self._auto_parallel_mode:
|
||||
if new_inputs and isinstance(new_inputs[0], Tensor) and inputs[0].virtual_flag:
|
||||
# get parallel inputs in sink mode, parallel inputs set in _executor.compile
|
||||
# get parallel inputs in sink mode, parallel inputs set in _cell_graph_executor.compile
|
||||
parallel_inputs_run = self._parallel_inputs_run
|
||||
else:
|
||||
parallel_inputs_run = new_inputs
|
||||
return _executor(self, *parallel_inputs_run, phase=self.phase)
|
||||
return _executor(self, *new_inputs, phase=self.phase)
|
||||
return _cell_graph_executor(self, *parallel_inputs_run, phase=self.phase)
|
||||
return _cell_graph_executor(self, *new_inputs, phase=self.phase)
|
||||
|
||||
def auto_parallel_compile_and_run(self):
|
||||
return self._auto_parallel_compile_and_run
|
||||
|
||||
def exec_checkpoint_graph(self):
|
||||
"""Executes saving checkpoint graph operation."""
|
||||
_executor(self, phase='save')
|
||||
_cell_graph_executor(self, phase='save')
|
||||
|
||||
def insert_param_to_cell(self, param_name, param, check_name=True):
|
||||
"""
|
||||
|
|
|
@ -23,7 +23,7 @@ from mindspore import context
|
|||
from ..._c_expression import EnvInstance_, GradOperation_, HyperMap_, Map_, MultitypeFuncGraph_, Tail_, \
|
||||
TupleAdd_, TupleSlice_, UnpackCall_, ZipOperation_, ListAppend_, TupleGetItemTensor_
|
||||
from ...common import dtype as mstype
|
||||
from ...common.api import ms_function, _pynative_exec, _wrap_func
|
||||
from ...common.api import ms_function, _pynative_executor, _wrap_func
|
||||
from ..primitive import Primitive
|
||||
from ..operations import _grad_ops
|
||||
from .. import operations as P
|
||||
|
@ -341,14 +341,14 @@ class GradOperation(GradOperation_):
|
|||
new_kwargs = kwargs.copy()
|
||||
new_kwargs.pop('sens')
|
||||
if isinstance(fn, FunctionType):
|
||||
if not _pynative_exec.check_run(fn, *args, **new_kwargs):
|
||||
_pynative_exec.set_grad_flag(True)
|
||||
_pynative_exec.new_graph(fn, *args, **new_kwargs)
|
||||
if not _pynative_executor.check_run(fn, *args, **new_kwargs):
|
||||
_pynative_executor.set_grad_flag(True)
|
||||
_pynative_executor.new_graph(fn, *args, **new_kwargs)
|
||||
output = fn(*args, **new_kwargs)
|
||||
_pynative_exec.end_graph(fn, output, *args, **new_kwargs)
|
||||
_pynative_executor.end_graph(fn, output, *args, **new_kwargs)
|
||||
else:
|
||||
# Check if fn have run already
|
||||
if not _pynative_exec.check_run(fn, *args, **new_kwargs):
|
||||
if not _pynative_executor.check_run(fn, *args, **new_kwargs):
|
||||
fn.set_grad()
|
||||
fn(*args, **new_kwargs)
|
||||
|
||||
|
@ -368,12 +368,12 @@ class GradOperation(GradOperation_):
|
|||
else:
|
||||
@_wrap_func
|
||||
def after_grad(*args, **kwargs):
|
||||
if _pynative_exec.check_graph(fn, *args, **kwargs):
|
||||
if _pynative_executor.check_graph(fn, *args, **kwargs):
|
||||
print("Another grad step is running")
|
||||
self._pynative_forward_run(args, kwargs, fn)
|
||||
_pynative_exec.grad(grad_, fn, weights, *args, **kwargs)
|
||||
out = _pynative_exec(fn, *args, **kwargs)
|
||||
_pynative_exec.clear_grad(fn, *args, **kwargs)
|
||||
_pynative_executor.grad(grad_, fn, weights, *args, **kwargs)
|
||||
out = _pynative_executor(fn, *args, **kwargs)
|
||||
_pynative_executor.clear_grad(fn, *args, **kwargs)
|
||||
return out
|
||||
self.grad_fn = after_grad
|
||||
self.fn = fn
|
||||
|
|
|
@ -22,7 +22,7 @@ from mindspore.common.tensor import Tensor
|
|||
from mindspore.common.dtype import dtype_to_nptype, pytype_to_dtype
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore import log as logger
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.train.mind_ir_pb2 import ModelProto as mindir_model
|
||||
from mindspore.train.checkpoint_pb2 import Checkpoint
|
||||
from mindspore.train.node_strategy_pb2 import ParallelStrategyMap as ckpt_strategy
|
||||
|
@ -64,7 +64,7 @@ def _exec_datagraph(exec_dataset, dataset_size, phase='dataset', create_data_inf
|
|||
send_epoch_end = bool(dataset_size == -1)
|
||||
exec_dataset = exec_dataset.device_que(send_epoch_end=send_epoch_end, create_data_info_queue=create_data_info_queue)
|
||||
|
||||
_executor.init_dataset(exec_dataset.queue_name,
|
||||
_cell_graph_executor.init_dataset(exec_dataset.queue_name,
|
||||
dataset_size,
|
||||
batch_size,
|
||||
dataset_types,
|
||||
|
|
|
@ -37,7 +37,7 @@ from ..context import ParallelMode
|
|||
from ..parallel._cost_model_context import _set_multi_subgraphs
|
||||
from .dataset_helper import DatasetHelper, connect_network_with_dataset
|
||||
from . import amp
|
||||
from ..common.api import _pynative_exec
|
||||
from ..common.api import _pynative_executor
|
||||
|
||||
|
||||
def _transfer_tensor_to_tuple(inputs):
|
||||
|
@ -53,7 +53,7 @@ def _transfer_tensor_to_tuple(inputs):
|
|||
class _StepSync(Callback):
|
||||
@staticmethod
|
||||
def step_end(run_context):
|
||||
_pynative_exec.sync()
|
||||
_pynative_executor.sync()
|
||||
|
||||
|
||||
class Model:
|
||||
|
|
|
@ -37,7 +37,7 @@ from mindspore.train.mind_ir_pb2 import GraphProto as graph_proto
|
|||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.common.initializer import initializer
|
||||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor as _executor
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore._checkparam import check_input_data, Validator
|
||||
from mindspore.compression.export import quant_export
|
||||
|
|
|
@ -20,7 +20,7 @@ from mindspore.ops import operations as P
|
|||
from mindspore.ops import functional as F
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from mindspore.nn import Cell
|
||||
from mindspore.common.api import _pynative_exec
|
||||
from mindspore.common.api import _pynative_executor
|
||||
|
||||
from . import jacobian as jb
|
||||
from . import listvec as lv
|
||||
|
@ -231,7 +231,7 @@ class LMSolver:
|
|||
return mem
|
||||
|
||||
def timing(self):
|
||||
_pynative_exec.sync()
|
||||
_pynative_executor.sync()
|
||||
return time()
|
||||
|
||||
def initialize_variables(self):
|
||||
|
|
|
@ -21,7 +21,7 @@ import numpy as np
|
|||
|
||||
from mindspore import ParameterTuple
|
||||
from mindspore import nn, context
|
||||
from mindspore.common.api import _executor, ms_function
|
||||
from mindspore.common.api import _cell_graph_executor, ms_function
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.ops import functional as F
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -45,7 +45,7 @@ def set_block_param_with_rand(net, rand_func=None):
|
|||
def compile_block(net, *inputs, rand_func=None, training=True):
|
||||
set_block_training(net, training)
|
||||
set_block_param_with_rand(net, rand_func)
|
||||
return _executor.compile(net, *inputs)
|
||||
return _cell_graph_executor.compile(net, *inputs)
|
||||
|
||||
|
||||
def run_block(net, *inputs, rand_func=None, training=True):
|
||||
|
|
|
@ -20,7 +20,7 @@ import mindspore.ops.composite as C
|
|||
import mindspore.ops.functional as F
|
||||
import mindspore.ops.operations as P
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
|
||||
|
||||
grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True)
|
||||
|
@ -252,18 +252,18 @@ def get_loss_fun(construct_net, num_input, output_index):
|
|||
|
||||
def build_construct_graph(net, *inputs, execute=True):
|
||||
net.set_train()
|
||||
_executor.compile(net, *inputs)
|
||||
_cell_graph_executor.compile(net, *inputs)
|
||||
if execute:
|
||||
_executor(net, inputs)
|
||||
_cell_graph_executor(net, inputs)
|
||||
|
||||
|
||||
def build_backward_graph(net, output_shapes, inputs, execute=True):
|
||||
inputs = append_sens_to_inputs(output_shapes, inputs)
|
||||
net = gen_backward_net(net, len(inputs) - 1)
|
||||
net.set_train()
|
||||
_executor.compile(net, inputs)
|
||||
_cell_graph_executor.compile(net, inputs)
|
||||
if execute:
|
||||
_executor(net, inputs)
|
||||
_cell_graph_executor(net, inputs)
|
||||
|
||||
|
||||
def convert(shp, dtype=np.float32, scale=6):
|
||||
|
|
|
@ -22,7 +22,7 @@ import mindspore.nn as nn
|
|||
import mindspore.ops.composite as C
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
@ -59,7 +59,7 @@ def test_compile():
|
|||
height,
|
||||
weight) * 3, np.float32))
|
||||
|
||||
_executor.compile(net, inp)
|
||||
_cell_graph_executor.compile(net, inp)
|
||||
|
||||
|
||||
def test_compile_grad():
|
||||
|
@ -72,4 +72,4 @@ def test_compile_grad():
|
|||
sens = Tensor(np.ones([batch_size, num_class]).astype(np.float32))
|
||||
grad_op = LeNetGrad(net)
|
||||
|
||||
_executor.compile(grad_op, inp, sens)
|
||||
_cell_graph_executor.compile(grad_op, inp, sens)
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
import numpy as np
|
||||
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from .resnet_example import resnet50
|
||||
|
||||
|
||||
def test_compile():
|
||||
net = resnet50()
|
||||
inp = Tensor(np.ones([1, 3, 224, 224]).astype(np.float32))
|
||||
_executor.compile(net, inp)
|
||||
_cell_graph_executor.compile(net, inp)
|
||||
|
|
|
@ -19,7 +19,7 @@ import numpy as np
|
|||
|
||||
import mindspore.context as context
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from .resnet_example import resnet50
|
||||
from ..train_step_wrap import train_step_with_loss_warp
|
||||
|
||||
|
@ -31,4 +31,4 @@ def test_train_step():
|
|||
net.set_train()
|
||||
inp = Tensor(np.ones([1, 3, 224, 224], np.float32))
|
||||
label = Tensor(np.zeros([1, 10], np.float32))
|
||||
_executor.compile(net, inp, label)
|
||||
_cell_graph_executor.compile(net, inp, label)
|
||||
|
|
|
@ -17,7 +17,7 @@ import numpy as np
|
|||
import pytest
|
||||
from mindspore import context, nn, Tensor
|
||||
from mindspore import log as logger
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.ops import operations as P
|
||||
import mindspore.dataset as de
|
||||
|
@ -121,7 +121,7 @@ def op_network_with_step_num(dataset, step_num):
|
|||
net = SingleOpNetwork(dataset_shapes)
|
||||
net_with_dataset = NetWithTDT(net, dataset_types, dataset_shapes, queue_name)
|
||||
# when device type is Davinci, net should has get_next operation before call init_dataset
|
||||
_executor.init_dataset(dataset.queue_name, 1, batch_size, dataset_types, dataset_shapes, (), "")
|
||||
_cell_graph_executor.init_dataset(dataset.queue_name, 1, batch_size, dataset_types, dataset_shapes, (), "")
|
||||
dataset_send_tdt(dataset)
|
||||
return op_network_with_epoch(net_with_dataset, step_num)
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ import numpy as np
|
|||
import mindspore.nn as nn
|
||||
import mindspore.context as context
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from gat import GAT
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
@ -44,4 +44,4 @@ def test_GAT():
|
|||
ftr_drop=0.6,
|
||||
activation=activation,
|
||||
residual=residual)
|
||||
_executor.compile(net, input_data, biases)
|
||||
_cell_graph_executor.compile(net, input_data, biases)
|
||||
|
|
|
@ -20,7 +20,7 @@ import mindspore.context as context
|
|||
import mindspore.nn as nn
|
||||
import mindspore.ops.composite as C
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
@ -45,7 +45,7 @@ def test_MeanAggregator():
|
|||
"""Compile MeanAggregator forward graph"""
|
||||
aggregator = MeanAggregator(32, 64, activation="relu", dropout_ratio=0.5)
|
||||
input_data = Tensor(np.array(np.random.rand(32, 3, 32), dtype=np.float32))
|
||||
_executor.compile(aggregator, input_data)
|
||||
_cell_graph_executor.compile(aggregator, input_data)
|
||||
|
||||
|
||||
def test_MeanAggregator_grad():
|
||||
|
@ -54,7 +54,7 @@ def test_MeanAggregator_grad():
|
|||
input_data = Tensor(np.array(np.random.rand(32, 3, 32), dtype=np.float32))
|
||||
sens = Tensor(np.ones([32, 64]).astype(np.float32))
|
||||
grad_op = MeanAggregatorGrad(aggregator)
|
||||
_executor.compile(grad_op, input_data, sens)
|
||||
_cell_graph_executor.compile(grad_op, input_data, sens)
|
||||
|
||||
|
||||
def test_AttentionHead():
|
||||
|
@ -66,11 +66,11 @@ def test_AttentionHead():
|
|||
residual=False)
|
||||
input_data = Tensor(np.array(np.random.rand(1, 2708, 1433), dtype=np.float32))
|
||||
biases = Tensor(np.array(np.random.rand(1, 2708, 2708), dtype=np.float32))
|
||||
_executor.compile(head, input_data, biases)
|
||||
_cell_graph_executor.compile(head, input_data, biases)
|
||||
|
||||
|
||||
def test_AttentionAggregator():
|
||||
input_data = Tensor(np.array(np.random.rand(1, 2708, 1433), dtype=np.float32))
|
||||
biases = Tensor(np.array(np.random.rand(1, 2708, 2708), dtype=np.float32))
|
||||
net = AttentionAggregator(1433, 8, 8)
|
||||
_executor.compile(net, input_data, biases)
|
||||
_cell_graph_executor.compile(net, input_data, biases)
|
||||
|
|
|
@ -19,7 +19,7 @@ import mindspore.context as context
|
|||
import mindspore.dataset as ds
|
||||
import mindspore.dataset.vision.c_transforms as vision
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.dataset.vision import Inter
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -92,7 +92,7 @@ if __name__ == '__main__':
|
|||
net = dataiter()
|
||||
net.set_train()
|
||||
|
||||
_executor.init_dataset(ds1.queue_name, 39, batch_size,
|
||||
_cell_graph_executor.init_dataset(ds1.queue_name, 39, batch_size,
|
||||
dataset_types, dataset_shapes, (), 'dataset')
|
||||
ds1.send()
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ import logging
|
|||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.common.tensor import Tensor
|
||||
|
||||
|
@ -82,4 +82,4 @@ def test_get_object_graph():
|
|||
Y = Tensor(np.ones([2, 2, 2]).astype(np.float32))
|
||||
network = SimpleNet(ResNet(X), Y, True)
|
||||
print(network.parameters_dict())
|
||||
return _executor.compile(network, X, Y)
|
||||
return _cell_graph_executor.compile(network, X, Y)
|
||||
|
|
|
@ -19,7 +19,7 @@ import numpy as np
|
|||
import mindspore._c_expression as me
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common import dtype
|
||||
from mindspore.common.api import ms_function, _executor
|
||||
from mindspore.common.api import ms_function, _cell_graph_executor
|
||||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.ops import functional as F
|
||||
|
@ -153,7 +153,7 @@ class TestNet(nn.Cell):
|
|||
def test_compile_conv2d():
|
||||
net = Net()
|
||||
inputs = Tensor(np.ones([1, 3, 16, 50]).astype(np.float32))
|
||||
_executor.compile(net, inputs)
|
||||
_cell_graph_executor.compile(net, inputs)
|
||||
|
||||
|
||||
def test_none(x, y):
|
||||
|
|
|
@ -38,7 +38,7 @@ class TestCallback : public UT::Common {
|
|||
* # ut and python static info not share
|
||||
TEST_F(TestCallback, test_get_anf_tensor_shape) {
|
||||
py::object obj = python_adapter::CallPyFn("gtest_input.pipeline.parse.parse_class", "test_get_object_graph");
|
||||
FuncGraphPtr func_graph = pipeline::ExecutorPy::GetInstance()->GetFuncGraphPy(obj);
|
||||
FuncGraphPtr func_graph = pipeline::GraphExecutorPy::GetInstance()->GetFuncGraphPy(obj);
|
||||
transform::DfGraphManager::GetInstance().SetAnfGraph(func_graph);
|
||||
std::shared_ptr<std::vector<int64_t>> param_shape_ptr = std::make_shared<std::vector<int64_t>>();
|
||||
bool get_shape = callbacks::GetParameterShape(func_graph, "weight", param_shape_ptr);
|
||||
|
@ -47,7 +47,7 @@ TEST_F(TestCallback, test_get_anf_tensor_shape) {
|
|||
|
||||
TEST_F(TestCallback, test_checkpoint_save_op) {
|
||||
py::object obj = python_adapter::CallPyFn("gtest_input.pipeline.parse.parse_class", "test_get_object_graph");
|
||||
FuncGraphPtr func_graph = pipeline::ExecutorPy::GetInstance()->GetFuncGraphPy(obj);
|
||||
FuncGraphPtr func_graph = pipeline::GraphExecutorPy::GetInstance()->GetFuncGraphPy(obj);
|
||||
transform::DfGraphManager::GetInstance().SetAnfGraph(func_graph);
|
||||
|
||||
#define DTYPE float
|
||||
|
|
|
@ -18,7 +18,7 @@ import numpy as np
|
|||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.communication._comm_helper import Backend
|
||||
from mindspore.communication.management import HCCL_WORLD_COMM_GROUP, NCCL_WORLD_COMM_GROUP, GlobalComm, init
|
||||
from mindspore.nn import Dense
|
||||
|
@ -154,7 +154,7 @@ def run_allreduce(op):
|
|||
momentum=0.9)
|
||||
network = WithLossCell(network, loss_fn)
|
||||
network = TrainOneStepCell(network, optimizer)
|
||||
_executor.compile(network, input_tensor, label_tensor)
|
||||
_cell_graph_executor.compile(network, input_tensor, label_tensor)
|
||||
|
||||
|
||||
def test_allreduce():
|
||||
|
@ -178,7 +178,7 @@ def test_allgather():
|
|||
momentum=0.9)
|
||||
network = WithLossCell(network, loss_fn)
|
||||
network = TrainOneStepCell(network, optimizer)
|
||||
_executor.compile(network, input_tensor, label_tensor)
|
||||
_cell_graph_executor.compile(network, input_tensor, label_tensor)
|
||||
|
||||
def test_allswap():
|
||||
"""run_allswap"""
|
||||
|
@ -192,7 +192,7 @@ def test_allswap():
|
|||
momentum=0.9)
|
||||
network = WithLossCell(network, loss_fn)
|
||||
network = TrainOneStepCell(network, optimizer)
|
||||
_executor.compile(network, input_tensor, label_tensor)
|
||||
_cell_graph_executor.compile(network, input_tensor, label_tensor)
|
||||
|
||||
|
||||
def run_reducescatter(op):
|
||||
|
@ -207,7 +207,7 @@ def run_reducescatter(op):
|
|||
momentum=0.9)
|
||||
network = WithLossCell(network, loss_fn)
|
||||
network = TrainOneStepCell(network, optimizer)
|
||||
_executor.compile(network, input_tensor, label_tensor)
|
||||
_cell_graph_executor.compile(network, input_tensor, label_tensor)
|
||||
|
||||
|
||||
def test_reducescatter():
|
||||
|
@ -228,7 +228,7 @@ def test_broadcast():
|
|||
momentum=0.9)
|
||||
network = WithLossCell(network, loss_fn)
|
||||
network = TrainOneStepCell(network, optimizer)
|
||||
_executor.compile(network, input_tensor_1, label_tensor)
|
||||
_cell_graph_executor.compile(network, input_tensor_1, label_tensor)
|
||||
|
||||
|
||||
def test_alltoall():
|
||||
|
@ -243,4 +243,4 @@ def test_alltoall():
|
|||
momentum=0.9)
|
||||
network = WithLossCell(network, loss_fn)
|
||||
network = TrainOneStepCell(network, optimizer)
|
||||
_executor.compile(network, input_tensor, label_tensor)
|
||||
_cell_graph_executor.compile(network, input_tensor, label_tensor)
|
||||
|
|
|
@ -21,7 +21,7 @@ import numpy as np
|
|||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import Momentum
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -70,5 +70,5 @@ def test_data_parallel_dense():
|
|||
net = WithLossCell(net, loss_fn)
|
||||
net = TrainOneStepCell(net, optimizer)
|
||||
|
||||
_executor.compile(net, inp, label)
|
||||
_cell_graph_executor.compile(net, inp, label)
|
||||
context.reset_auto_parallel_context()
|
||||
|
|
|
@ -19,7 +19,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
|
@ -63,7 +63,7 @@ def test_compile_train_eval():
|
|||
train_input_data = Tensor(np.ones([1, 3, 32, 32]).astype(np.float32) * 0.01)
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
ms_executor = _executor
|
||||
ms_executor = _cell_graph_executor
|
||||
|
||||
ms_executor.init_dataset("train", 1, 1, [ms.float32], [[1, 3, 32, 32]], (), 'dataset')
|
||||
|
||||
|
|
|
@ -286,7 +286,7 @@ def test_return_tensor():
|
|||
net = Net(0)
|
||||
input_data = ms.Tensor(np.array([[1.2, 2.1], [2.2, 3.2]]).astype('float32'))
|
||||
input_data.set_dtype(ms.float32)
|
||||
exe = me._executor
|
||||
exe = me._cell_graph_executor
|
||||
exe.compile(net, input_data)
|
||||
tensor_ = exe(net, input_data)
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ import numpy as np
|
|||
|
||||
import mindspore as ms
|
||||
import mindspore.common.initializer as init
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import Cell
|
||||
from mindspore.ops import operations as P
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
@ -133,7 +133,7 @@ def test_tensor_method_sub():
|
|||
|
||||
x = ms.Tensor(np.ones([5, 3], np.float32))
|
||||
y = ms.Tensor(np.ones([8, 5, 3], np.float32))
|
||||
_executor.compile(net, x, y)
|
||||
_cell_graph_executor.compile(net, x, y)
|
||||
|
||||
|
||||
def test_tensor_method_mul():
|
||||
|
@ -152,7 +152,7 @@ def test_tensor_method_mul():
|
|||
|
||||
x = ms.Tensor(np.ones([5, 3], np.float32))
|
||||
y = ms.Tensor(np.ones([8, 5, 3], np.float32))
|
||||
_executor.compile(net, x, y)
|
||||
_cell_graph_executor.compile(net, x, y)
|
||||
|
||||
|
||||
def test_tensor_method_div():
|
||||
|
@ -171,4 +171,4 @@ def test_tensor_method_div():
|
|||
|
||||
x = ms.Tensor(np.ones([5, 3], np.float32))
|
||||
y = ms.Tensor(np.ones([8, 5, 3], np.float32))
|
||||
_executor.compile(net, x, y)
|
||||
_cell_graph_executor.compile(net, x, y)
|
||||
|
|
|
@ -19,7 +19,7 @@ import numpy as np
|
|||
|
||||
import mindspore.nn as nn # pylint: disable=C0414
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops.operations import Add
|
||||
from ...train_step_wrap import train_step_with_loss_warp
|
||||
|
||||
|
@ -244,14 +244,14 @@ def resnet9():
|
|||
def test_compile():
|
||||
net = resnet18()
|
||||
input_data = Tensor(np.ones([1, 3, 224, 224]))
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
||||
|
||||
def test_train_step():
|
||||
net = train_step_with_loss_warp(resnet9())
|
||||
input_data = Tensor(np.ones([1, 3, 224, 224]))
|
||||
label = Tensor(np.zeros([1, 10]))
|
||||
_executor.compile(net, input_data, label)
|
||||
_cell_graph_executor.compile(net, input_data, label)
|
||||
|
||||
|
||||
def test_train_step_training():
|
||||
|
@ -259,4 +259,4 @@ def test_train_step_training():
|
|||
input_data = Tensor(np.ones([1, 3, 224, 224]))
|
||||
label = Tensor(np.zeros([1, 10]))
|
||||
net.set_train()
|
||||
_executor.compile(net, input_data, label)
|
||||
_cell_graph_executor.compile(net, input_data, label)
|
||||
|
|
|
@ -18,7 +18,7 @@ import numpy as np
|
|||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import operations as P
|
||||
from ....train_step_wrap import train_step_with_loss_warp, train_step_with_sens
|
||||
|
||||
|
@ -53,14 +53,14 @@ def test_lenet5_train_step():
|
|||
predict = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32) * 0.01)
|
||||
label = Tensor(np.zeros([1, 10]).astype(np.float32))
|
||||
net = train_step_with_loss_warp(LeNet5())
|
||||
_executor.compile(net, predict, label)
|
||||
_cell_graph_executor.compile(net, predict, label)
|
||||
|
||||
|
||||
def test_lenet5_train_sens():
|
||||
predict = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32) * 0.01)
|
||||
sens = Tensor(np.ones([1, 10]).astype(np.float32))
|
||||
net = train_step_with_sens(LeNet5(), sens)
|
||||
_executor.compile(net, predict)
|
||||
_cell_graph_executor.compile(net, predict)
|
||||
|
||||
|
||||
def test_lenet5_train_step_training():
|
||||
|
@ -68,4 +68,4 @@ def test_lenet5_train_step_training():
|
|||
label = Tensor(np.zeros([1, 10]).astype(np.float32))
|
||||
net = train_step_with_loss_warp(LeNet5())
|
||||
net.set_train()
|
||||
_executor.compile(net, predict, label)
|
||||
_cell_graph_executor.compile(net, predict, label)
|
||||
|
|
|
@ -17,7 +17,7 @@ import numpy as np
|
|||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
from ....train_step_wrap import train_step_with_loss_warp
|
||||
|
@ -54,5 +54,5 @@ def test_lenet5_exception():
|
|||
label = Tensor(in2)
|
||||
net = train_step_with_loss_warp(LeNet5())
|
||||
with pytest.raises(RuntimeError) as info:
|
||||
_executor.compile(net, predict, label)
|
||||
_cell_graph_executor.compile(net, predict, label)
|
||||
assert "x_shape[C_in] / group must equal to w_shape[C_in] = " in str(info.value)
|
||||
|
|
|
@ -19,7 +19,7 @@ import mindspore.common.dtype as mstype
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, context
|
||||
from mindspore.common import ParameterTuple
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.nn import Momentum
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
|
@ -111,7 +111,7 @@ def test_data_parallel_with_cast():
|
|||
net = WithLossCell(net, loss_fn)
|
||||
net = TrainOneStepCell(net, optimizer)
|
||||
|
||||
_executor.compile(net, predict, label)
|
||||
_cell_graph_executor.compile(net, predict, label)
|
||||
context.reset_auto_parallel_context()
|
||||
|
||||
|
||||
|
@ -128,7 +128,7 @@ def test_nn_prelu():
|
|||
x = Tensor(np.ones([1, 16, 10, 10]).astype(np.float32) * 0.01)
|
||||
net = NetForPReLU().set_train()
|
||||
net.add_flags_recursive(fp16=True)
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
||||
|
||||
class NetForCast(nn.Cell):
|
||||
|
|
|
@ -19,7 +19,7 @@ import numpy as np
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter, context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim import Adagrad
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -55,4 +55,4 @@ def test_ada_grad():
|
|||
optimizer = Adagrad(net.trainable_params(), weight_decay=0.9, loss_scale=1024.0)
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
|
|
@ -18,7 +18,7 @@ import pytest
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter, context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim.adafactor import AdaFactor
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -82,7 +82,7 @@ def test_adafactor_compile1():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adafactor_compile2():
|
||||
|
@ -97,7 +97,7 @@ def test_adafactor_compile2():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adafactor_compile3():
|
||||
|
@ -113,7 +113,7 @@ def test_adafactor_compile3():
|
|||
warmup_init=False, compression=False)
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adafactor_compile4():
|
||||
|
@ -133,7 +133,7 @@ def test_adafactor_compile4():
|
|||
warmup_init=warmup_init, compression=compression)
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adafactor_compile5():
|
||||
|
@ -153,7 +153,7 @@ def test_adafactor_compile5():
|
|||
warmup_init=warmup_init, compression=compression)
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adafactor_compile6():
|
||||
|
@ -173,7 +173,7 @@ def test_adafactor_compile6():
|
|||
warmup_init=warmup_init, compression=compression)
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adafactor_group1():
|
||||
|
@ -192,7 +192,7 @@ def test_adafactor_group1():
|
|||
optimizer = AdaFactor(group_params, learning_rate=poly_decay_lr, relative_step=False)
|
||||
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adafactor_group2():
|
||||
|
@ -210,7 +210,7 @@ def test_adafactor_group2():
|
|||
{'params': [all_params[1]]}]
|
||||
optimizer = AdaFactor(group_params, learning_rate=schedule_lr, relative_step=False)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adafactor_group3():
|
||||
|
@ -227,7 +227,7 @@ def test_adafactor_group3():
|
|||
optimizer = AdaFactor(group_params, learning_rate=None)
|
||||
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adafactor_group4():
|
||||
|
@ -244,7 +244,7 @@ def test_adafactor_group4():
|
|||
{'params': [all_params[1]]}]
|
||||
optimizer = AdaFactor(group_params, learning_rate=None)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adafactor_group5():
|
||||
|
@ -261,7 +261,7 @@ def test_adafactor_group5():
|
|||
{'params': [all_params[1]]}]
|
||||
optimizer = AdaFactor(group_params, learning_rate=None, beta1=0.1)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adafactor_group6():
|
||||
|
@ -278,4 +278,4 @@ def test_adafactor_group6():
|
|||
{'params': [all_params[1]]}]
|
||||
optimizer = AdaFactor(group_params, learning_rate=None, beta1=0.2)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
|
|
@ -18,7 +18,7 @@ import pytest
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter, context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim import Adam, AdamWeightDecay
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -86,7 +86,7 @@ def test_adamw_compile():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adam_compile():
|
||||
|
@ -101,7 +101,7 @@ def test_adam_compile():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_sparse_adam_compile():
|
||||
|
@ -114,7 +114,7 @@ def test_sparse_adam_compile():
|
|||
optimizer = Adam(net.trainable_params(), learning_rate=0.1, loss_scale=1024.0, weight_decay=0.9)
|
||||
optimizer.target = 'CPU'
|
||||
train_network = TrainOneStepCell(net, optimizer)
|
||||
_executor.compile(train_network, indices, label)
|
||||
_cell_graph_executor.compile(train_network, indices, label)
|
||||
|
||||
|
||||
def test_sparse_adam():
|
||||
|
@ -126,7 +126,7 @@ def test_sparse_adam():
|
|||
|
||||
optimizer = Adam(net.trainable_params(), learning_rate=0.1, loss_scale=1024.0, weight_decay=0.9)
|
||||
train_network = TrainOneStepCell(net, optimizer)
|
||||
_executor.compile(train_network, indices, label)
|
||||
_cell_graph_executor.compile(train_network, indices, label)
|
||||
|
||||
|
||||
def test_adam_group1():
|
||||
|
@ -146,7 +146,7 @@ def test_adam_group1():
|
|||
optimizer = nn.Adam(group_params, learning_rate=0.1)
|
||||
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adam_group2():
|
||||
|
@ -164,7 +164,7 @@ def test_adam_group2():
|
|||
{'params': [all_params[1]]}]
|
||||
optimizer = nn.Adam(group_params, learning_rate=schedule_lr)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adamweightdecay_group():
|
||||
|
@ -182,7 +182,7 @@ def test_adamweightdecay_group():
|
|||
{'params': [all_params[1]]}]
|
||||
optimizer = nn.AdamWeightDecay(group_params, learning_rate=schedule_lr)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_adamoffload_group():
|
||||
|
@ -200,7 +200,7 @@ def test_adamoffload_group():
|
|||
{'params': [all_params[1]]}]
|
||||
optimizer = nn.AdamOffload(group_params, learning_rate=schedule_lr)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_AdamWeightDecay_beta1():
|
||||
|
|
|
@ -18,7 +18,7 @@ import numpy as np
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter, context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim import FTRL
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -66,7 +66,7 @@ def test_ftrl():
|
|||
optimizer = FTRL(net.trainable_params(), weight_decay=0.9, loss_scale=2.0)
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_spares_ftrl_compile():
|
||||
|
@ -79,7 +79,7 @@ def test_spares_ftrl_compile():
|
|||
optimizer = FTRL(net.trainable_params(), weight_decay=0.9, loss_scale=2.0)
|
||||
optimizer.target = 'CPU'
|
||||
train_network = TrainOneStepCell(net, optimizer)
|
||||
_executor.compile(train_network, indices, label)
|
||||
_cell_graph_executor.compile(train_network, indices, label)
|
||||
|
||||
|
||||
def test_spares_ftrl():
|
||||
|
@ -92,4 +92,4 @@ def test_spares_ftrl():
|
|||
optimizer = FTRL(net.trainable_params(), weight_decay=0.9, loss_scale=2.0)
|
||||
optimizer.target = 'Ascend'
|
||||
train_network = TrainOneStepCell(net, optimizer)
|
||||
_executor.compile(train_network, indices, label)
|
||||
_cell_graph_executor.compile(train_network, indices, label)
|
||||
|
|
|
@ -17,7 +17,7 @@ import numpy as np
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim import Lamb
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -83,7 +83,7 @@ def test_lamb_compile_dynamic_lr():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_lamb_compile():
|
||||
|
@ -98,7 +98,7 @@ def test_lamb_compile():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_lamb_group():
|
||||
|
@ -116,4 +116,4 @@ def test_lamb_group():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
|
|
@ -18,7 +18,7 @@ import numpy as np
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim import LARS, Momentum
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -61,7 +61,7 @@ def test_lars_multi_step_lr():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_lars_float_lr():
|
||||
|
@ -78,4 +78,4 @@ def test_lars_float_lr():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
|
|
@ -18,7 +18,7 @@ import pytest
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter, context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim import LazyAdam
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -70,7 +70,7 @@ def test_lazy_adam_compile():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_spares_lazy_adam_compile():
|
||||
|
@ -83,7 +83,7 @@ def test_spares_lazy_adam_compile():
|
|||
optimizer = LazyAdam(net.trainable_params(), learning_rate=0.1, weight_decay=0.9, loss_scale=2.0)
|
||||
optimizer.target = 'CPU'
|
||||
train_network = TrainOneStepCell(net, optimizer)
|
||||
_executor.compile(train_network, indices, label)
|
||||
_cell_graph_executor.compile(train_network, indices, label)
|
||||
|
||||
|
||||
def test_spares_lazy_adam():
|
||||
|
@ -96,7 +96,7 @@ def test_spares_lazy_adam():
|
|||
optimizer = LazyAdam(net.trainable_params(), learning_rate=0.1, weight_decay=0.9, loss_scale=2.0)
|
||||
optimizer.target = 'Ascend'
|
||||
train_network = TrainOneStepCell(net, optimizer)
|
||||
_executor.compile(train_network, indices, label)
|
||||
_cell_graph_executor.compile(train_network, indices, label)
|
||||
|
||||
|
||||
def test_lazy_adam_error():
|
||||
|
|
|
@ -17,7 +17,7 @@ import numpy as np
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -50,4 +50,4 @@ def test_momentum_compile():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
|
|
@ -18,7 +18,7 @@ import numpy as np
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter, context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim import ProximalAdagrad
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -66,7 +66,7 @@ def test_proximal_ada_grad():
|
|||
optimizer = ProximalAdagrad(net.trainable_params(), weight_decay=0.9, loss_scale=1024.0)
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_spares_proximal_ada_grad_compile():
|
||||
|
@ -79,7 +79,7 @@ def test_spares_proximal_ada_grad_compile():
|
|||
optimizer = ProximalAdagrad(net.trainable_params(), weight_decay=0.9, loss_scale=1024.0)
|
||||
optimizer.target = 'CPU'
|
||||
train_network = TrainOneStepCell(net, optimizer)
|
||||
_executor.compile(train_network, indices, label)
|
||||
_cell_graph_executor.compile(train_network, indices, label)
|
||||
|
||||
|
||||
def test_spares_proximal_ada_grad():
|
||||
|
@ -91,4 +91,4 @@ def test_spares_proximal_ada_grad():
|
|||
|
||||
optimizer = ProximalAdagrad(net.trainable_params(), weight_decay=0.9, loss_scale=1024.0)
|
||||
train_network = TrainOneStepCell(net, optimizer)
|
||||
_executor.compile(train_network, indices, label)
|
||||
_cell_graph_executor.compile(train_network, indices, label)
|
||||
|
|
|
@ -18,7 +18,7 @@ import pytest
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim import RMSProp
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -51,7 +51,7 @@ def test_rmsprop_compile():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_rmsprop_e():
|
||||
|
|
|
@ -18,7 +18,7 @@ import numpy as np
|
|||
import mindspore.common.dtype as mstype
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn.probability.dpn import VAE
|
||||
|
||||
|
||||
|
@ -54,4 +54,4 @@ def test_vae():
|
|||
decoder = Decoder()
|
||||
net = VAE(encoder, decoder, hidden_size=3, latent_size=2)
|
||||
input_data = Tensor(np.random.rand(32, 6), dtype=mstype.float32)
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
|
|
@ -17,7 +17,7 @@ import numpy as np
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
|
@ -74,7 +74,7 @@ class Net1(nn.Cell):
|
|||
def test_compile_relu():
|
||||
net = Net1()
|
||||
input_data = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]], dtype=np.float32))
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
||||
|
||||
class Net_gelu(nn.Cell):
|
||||
|
@ -89,7 +89,7 @@ class Net_gelu(nn.Cell):
|
|||
def test_compile_gelu():
|
||||
net = Net_gelu()
|
||||
input_data = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]], dtype=np.float32))
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
||||
|
||||
class NetLeakyReLU(nn.Cell):
|
||||
|
@ -104,4 +104,4 @@ class NetLeakyReLU(nn.Cell):
|
|||
def test_compile_leaky_relu():
|
||||
net = NetLeakyReLU(alpha=0.1)
|
||||
input_data = Tensor(np.array([[1.6, 0, 0.6], [6, 0, -6]], dtype=np.float32))
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
|
|
@ -18,7 +18,7 @@ import pytest
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
|
||||
|
||||
def test_bn_pars_valid1():
|
||||
|
@ -55,7 +55,7 @@ class Net(nn.Cell):
|
|||
def test_compile():
|
||||
net = Net()
|
||||
input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]).astype(np.float32))
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
||||
|
||||
class GroupNet(nn.Cell):
|
||||
|
@ -70,4 +70,4 @@ class GroupNet(nn.Cell):
|
|||
def test_compile_groupnorm():
|
||||
net = nn.GroupNorm(16, 64)
|
||||
input_data = Tensor(np.random.rand(1, 64, 256, 256).astype(np.float32))
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
|
|
@ -19,7 +19,7 @@ import pytest
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
|
||||
|
||||
class ModA(nn.Cell):
|
||||
|
@ -299,4 +299,4 @@ def test_cell_names():
|
|||
ta = Tensor(np.ones([2, 3]))
|
||||
mn = ModelName(ta)
|
||||
with pytest.raises(ValueError):
|
||||
_executor.compile(mn)
|
||||
_cell_graph_executor.compile(mn)
|
||||
|
|
|
@ -18,7 +18,7 @@ import pytest
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell, ParameterUpdate
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -50,7 +50,7 @@ def test_parameter_update_int32_and_tensor():
|
|||
train_network.set_train()
|
||||
inputs = Tensor(np.ones([1, 64]).astype(np.float32))
|
||||
label = Tensor(np.zeros([1, 10]).astype(np.float32))
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
# test tensor
|
||||
param_lr = train_network.parameters_dict()['learning_rate']
|
||||
|
@ -58,14 +58,14 @@ def test_parameter_update_int32_and_tensor():
|
|||
update_network.phase = 'update_param'
|
||||
|
||||
input_lr = Tensor(np.array([0.2, 0.02, 0.002]), mstype.float32)
|
||||
_executor.compile(update_network, input_lr)
|
||||
_cell_graph_executor.compile(update_network, input_lr)
|
||||
|
||||
# test int32
|
||||
param_step = train_network.parameters_dict()['global_step']
|
||||
update_global_step = ParameterUpdate(param_step)
|
||||
|
||||
input_step = Tensor(np.array([1000]), mstype.int32)
|
||||
_executor.compile(update_global_step, input_step)
|
||||
_cell_graph_executor.compile(update_global_step, input_step)
|
||||
|
||||
|
||||
def test_parameter_update_float32():
|
||||
|
@ -81,7 +81,7 @@ def test_parameter_update_float32():
|
|||
train_network.set_train()
|
||||
inputs = Tensor(np.ones([1, 64]).astype(np.float32))
|
||||
label = Tensor(np.zeros([1, 10]).astype(np.float32))
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
# construct and compile update graph
|
||||
param_lr = train_network.parameters_dict()['learning_rate']
|
||||
|
@ -89,7 +89,7 @@ def test_parameter_update_float32():
|
|||
update_network.phase = 'update_param'
|
||||
|
||||
input_lr = Tensor(0.0001, mstype.float32)
|
||||
_executor.compile(update_network, input_lr)
|
||||
_cell_graph_executor.compile(update_network, input_lr)
|
||||
|
||||
|
||||
def test_parameter_update_error():
|
||||
|
|
|
@ -21,7 +21,7 @@ import pytest
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
|
||||
|
||||
class CentralCropNet(nn.Cell):
|
||||
|
@ -37,14 +37,14 @@ def test_compile_3d_central_crop():
|
|||
central_fraction = 0.2
|
||||
net = CentralCropNet(central_fraction)
|
||||
image = Tensor(np.random.random((3, 16, 16)), mstype.float32)
|
||||
_executor.compile(net, image)
|
||||
_cell_graph_executor.compile(net, image)
|
||||
|
||||
|
||||
def test_compile_4d_central_crop():
|
||||
central_fraction = 0.5
|
||||
net = CentralCropNet(central_fraction)
|
||||
image = Tensor(np.random.random((8, 3, 16, 16)), mstype.float32)
|
||||
_executor.compile(net, image)
|
||||
_cell_graph_executor.compile(net, image)
|
||||
|
||||
|
||||
def test_central_fraction_bool():
|
||||
|
@ -71,4 +71,4 @@ def test_central_crop_invalid_5d_input():
|
|||
|
||||
net = CentralCropNet(central_fraction=0.5)
|
||||
with pytest.raises(ValueError):
|
||||
_executor.compile(net, invalid_image)
|
||||
_cell_graph_executor.compile(net, invalid_image)
|
||||
|
|
|
@ -20,7 +20,7 @@ import mindspore.context as context
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
|
@ -115,12 +115,12 @@ def test_compile():
|
|||
bias = Tensor(np.random.randint(0, 255, [8]).astype(np.float32))
|
||||
net = Net(64, 8, weight=weight, bias=bias)
|
||||
input_data = Tensor(np.random.randint(0, 255, [128, 64]).astype(np.float32))
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
||||
# training
|
||||
net_train = Net(64, 8, weight=weight, bias=bias)
|
||||
net_train.set_train()
|
||||
_executor.compile(net_train, input_data)
|
||||
_cell_graph_executor.compile(net_train, input_data)
|
||||
|
||||
|
||||
def test_compile_2():
|
||||
|
@ -129,12 +129,12 @@ def test_compile_2():
|
|||
weight = Tensor(np.random.randint(0, 255, [8, 64]).astype(np.float32))
|
||||
net = Net(64, 8, weight=weight, has_bias=False)
|
||||
input_data = Tensor(np.random.randint(0, 255, [128, 64]).astype(np.float32))
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
||||
# training
|
||||
net_train = Net(64, 8, weight=weight, has_bias=False)
|
||||
net_train.set_train()
|
||||
_executor.compile(net_train, input_data)
|
||||
_cell_graph_executor.compile(net_train, input_data)
|
||||
|
||||
|
||||
def test_compile_3():
|
||||
|
@ -144,12 +144,12 @@ def test_compile_3():
|
|||
context.set_context(mode=context.GRAPH_MODE)
|
||||
net = Net(128, 10)
|
||||
input_data = Tensor(np.random.randint(0, 255, [128, 128]).astype(np.float32))
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
||||
# training
|
||||
net_train = Net(128, 10)
|
||||
net_train.set_train()
|
||||
_executor.compile(net_train, input_data)
|
||||
_cell_graph_executor.compile(net_train, input_data)
|
||||
|
||||
|
||||
def test_compile_4():
|
||||
|
@ -159,9 +159,9 @@ def test_compile_4():
|
|||
context.set_context(mode=context.GRAPH_MODE)
|
||||
net = Net(128, 10, has_bias=False)
|
||||
input_data = Tensor(np.random.randint(0, 255, [128, 128]).astype(np.float32))
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
||||
# training
|
||||
net_train = Net(128, 10, has_bias=False)
|
||||
net_train.set_train()
|
||||
_executor.compile(net_train, input_data)
|
||||
_cell_graph_executor.compile(net_train, input_data)
|
||||
|
|
|
@ -19,7 +19,7 @@ import numpy as np
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
@ -34,4 +34,4 @@ class Net(nn.Cell):
|
|||
def test_compile():
|
||||
net = Net()
|
||||
input_data = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]]).astype(np.float32))
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
|
|
@ -20,7 +20,7 @@ import mindspore.common.dtype as mstype
|
|||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.common.api import ms_function
|
||||
|
||||
context.set_context(device_target="Ascend")
|
||||
|
@ -40,7 +40,7 @@ def test_compile():
|
|||
# input shape 1 x 1 x 2 x 2
|
||||
image = Tensor(np.array([[[[1, 2], [3, 4]]]]), dtype=mstype.int32)
|
||||
net = Net()
|
||||
_executor.compile(net, image)
|
||||
_cell_graph_executor.compile(net, image)
|
||||
|
||||
|
||||
def test_compile_multi_channel():
|
||||
|
@ -51,7 +51,7 @@ def test_compile_multi_channel():
|
|||
[[[5, 10], [15, 20]], [[25, 30], [35, 40]]],
|
||||
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]]]), dtype=dtype)
|
||||
net = Net()
|
||||
_executor.compile(net, image)
|
||||
_cell_graph_executor.compile(net, image)
|
||||
|
||||
|
||||
def test_invalid_5d_input():
|
||||
|
@ -59,4 +59,4 @@ def test_invalid_5d_input():
|
|||
image = Tensor(np.random.random([4, 1, 16, 16, 1]), dtype=dtype)
|
||||
net = Net()
|
||||
with pytest.raises(ValueError):
|
||||
_executor.compile(net, image)
|
||||
_cell_graph_executor.compile(net, image)
|
||||
|
|
|
@ -17,7 +17,7 @@ import pytest
|
|||
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn import learning_rate_schedule as lr_schedules
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
import mindspore.common.dtype as mstype
|
||||
|
||||
|
||||
|
@ -124,34 +124,34 @@ class TestInit:
|
|||
|
||||
def test_exponential_decay():
|
||||
lr_schedule = lr_schedules.ExponentialDecayLR(learning_rate, decay_rate, decay_steps, True)
|
||||
_executor.compile(lr_schedule, global_step)
|
||||
_cell_graph_executor.compile(lr_schedule, global_step)
|
||||
|
||||
|
||||
def test_enatural_exp_decay():
|
||||
lr_schedule = lr_schedules.NaturalExpDecayLR(learning_rate, decay_rate, decay_steps, True)
|
||||
_executor.compile(lr_schedule, global_step)
|
||||
_cell_graph_executor.compile(lr_schedule, global_step)
|
||||
|
||||
|
||||
def test_inverse_decay():
|
||||
lr_schedule = lr_schedules.InverseDecayLR(learning_rate, decay_rate, decay_steps, True)
|
||||
_executor.compile(lr_schedule, global_step)
|
||||
_cell_graph_executor.compile(lr_schedule, global_step)
|
||||
|
||||
|
||||
def test_cosine_decay():
|
||||
lr_schedule = lr_schedules.CosineDecayLR(min_lr, max_lr, decay_steps)
|
||||
_executor.compile(lr_schedule, global_step)
|
||||
_cell_graph_executor.compile(lr_schedule, global_step)
|
||||
|
||||
|
||||
def test_polynomial_decay():
|
||||
lr_schedule = lr_schedules.PolynomialDecayLR(learning_rate, end_learning_rate, decay_steps, power)
|
||||
_executor.compile(lr_schedule, global_step)
|
||||
_cell_graph_executor.compile(lr_schedule, global_step)
|
||||
|
||||
|
||||
def test_polynomial_decay2():
|
||||
lr_schedule = lr_schedules.PolynomialDecayLR(learning_rate, end_learning_rate, decay_steps, power, True)
|
||||
_executor.compile(lr_schedule, global_step)
|
||||
_cell_graph_executor.compile(lr_schedule, global_step)
|
||||
|
||||
|
||||
def test_warmup():
|
||||
lr_schedule = lr_schedules.WarmUpLR(learning_rate, warmup_steps)
|
||||
_executor.compile(lr_schedule, global_step)
|
||||
_cell_graph_executor.compile(lr_schedule, global_step)
|
||||
|
|
|
@ -21,7 +21,7 @@ import pytest
|
|||
import mindspore.common.dtype as mstype
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
|
||||
_MSSSIM_WEIGHTS = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333)
|
||||
|
||||
|
@ -39,7 +39,7 @@ def test_compile():
|
|||
net = MSSSIMNet(power_factors=factors)
|
||||
img1 = Tensor(np.random.random((8, 3, 128, 128)))
|
||||
img2 = Tensor(np.random.random((8, 3, 128, 128)))
|
||||
_executor.compile(net, img1, img2)
|
||||
_cell_graph_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_compile_grayscale():
|
||||
|
@ -48,7 +48,7 @@ def test_compile_grayscale():
|
|||
net = MSSSIMNet(max_val=max_val, power_factors=factors)
|
||||
img1 = Tensor(np.random.randint(0, 256, (8, 3, 128, 128), np.uint8))
|
||||
img2 = Tensor(np.random.randint(0, 256, (8, 3, 128, 128), np.uint8))
|
||||
_executor.compile(net, img1, img2)
|
||||
_cell_graph_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_msssim_max_val_negative():
|
||||
|
@ -102,7 +102,7 @@ def test_msssim_different_shape():
|
|||
img2 = Tensor(np.random.random(shape_2))
|
||||
net = MSSSIMNet(power_factors=factors)
|
||||
with pytest.raises(ValueError):
|
||||
_executor.compile(net, img1, img2)
|
||||
_cell_graph_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_msssim_different_dtype():
|
||||
|
@ -113,7 +113,7 @@ def test_msssim_different_dtype():
|
|||
img2 = Tensor(np.random.random((8, 3, 128, 128)), dtype=dtype_2)
|
||||
net = MSSSIMNet(power_factors=factors)
|
||||
with pytest.raises(TypeError):
|
||||
_executor.compile(net, img1, img2)
|
||||
_cell_graph_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_msssim_invalid_5d_input():
|
||||
|
@ -128,8 +128,8 @@ def test_msssim_invalid_5d_input():
|
|||
|
||||
net = MSSSIMNet(power_factors=factors)
|
||||
with pytest.raises(ValueError):
|
||||
_executor.compile(net, invalid_img1, img2)
|
||||
_cell_graph_executor.compile(net, invalid_img1, img2)
|
||||
with pytest.raises(ValueError):
|
||||
_executor.compile(net, img1, invalid_img2)
|
||||
_cell_graph_executor.compile(net, img1, invalid_img2)
|
||||
with pytest.raises(ValueError):
|
||||
_executor.compile(net, invalid_img1, invalid_img2)
|
||||
_cell_graph_executor.compile(net, invalid_img1, invalid_img2)
|
||||
|
|
|
@ -18,7 +18,7 @@ import pytest
|
|||
|
||||
from mindspore import Tensor
|
||||
from mindspore.common import dtype
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import Embedding, MultiFieldEmbeddingLookup
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
@ -27,21 +27,21 @@ from ..ut_filter import non_graph_engine
|
|||
def test_check_embedding_1():
|
||||
net = Embedding(20000, 768, False)
|
||||
input_data = Tensor(np.ones([8, 128]), dtype.int32)
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
||||
|
||||
@non_graph_engine
|
||||
def test_check_embedding_2():
|
||||
net = Embedding(20000, 768, True)
|
||||
input_data = Tensor(np.ones([8, 128]), dtype.int32)
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
||||
|
||||
@non_graph_engine
|
||||
def test_check_embedding_3():
|
||||
net = Embedding(20000, 768, True, "zeros")
|
||||
input_data = Tensor(np.ones([8, 128]), dtype.int32)
|
||||
_executor.compile(net, input_data)
|
||||
_cell_graph_executor.compile(net, input_data)
|
||||
|
||||
|
||||
def compile_multi_field_embedding(shape_id, shape_value, shape_field,
|
||||
|
@ -50,7 +50,7 @@ def compile_multi_field_embedding(shape_id, shape_value, shape_field,
|
|||
input_data = Tensor(np.ones(shape_id), type_id)
|
||||
input_value = Tensor(np.ones(shape_value), type_value)
|
||||
input_field = Tensor(np.ones(shape_field), type_field)
|
||||
_executor.compile(net, input_data, input_value, input_field)
|
||||
_cell_graph_executor.compile(net, input_data, input_value, input_field)
|
||||
|
||||
|
||||
@non_graph_engine
|
||||
|
|
|
@ -17,7 +17,7 @@ import numpy as np
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
|
@ -34,4 +34,4 @@ class NormNet(nn.Cell):
|
|||
def test_compile_norm():
|
||||
net = NormNet()
|
||||
x = Tensor(np.array([2.0, 1.0]))
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
|
|
@ -19,7 +19,7 @@ import numpy as np
|
|||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
|
||||
|
||||
class AvgNet(nn.Cell):
|
||||
|
@ -36,7 +36,7 @@ class AvgNet(nn.Cell):
|
|||
def test_compile_avg():
|
||||
net = AvgNet(3, 1)
|
||||
x = Tensor(np.ones([1, 3, 16, 50]).astype(np.float32))
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
||||
|
||||
class MaxNet(nn.Cell):
|
||||
|
@ -58,7 +58,7 @@ class MaxNet(nn.Cell):
|
|||
def test_compile_max():
|
||||
net = MaxNet(3, stride=1, padding=0)
|
||||
x = Tensor(np.random.randint(0, 255, [1, 3, 6, 6]).astype(np.float32))
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
||||
|
||||
class Avg1dNet(nn.Cell):
|
||||
|
@ -75,4 +75,4 @@ class Avg1dNet(nn.Cell):
|
|||
def test_avg1d():
|
||||
net = Avg1dNet(6, 1)
|
||||
input_ = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32))
|
||||
_executor.compile(net, input_)
|
||||
_cell_graph_executor.compile(net, input_)
|
||||
|
|
|
@ -21,7 +21,7 @@ import pytest
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
|
||||
|
||||
class PSNRNet(nn.Cell):
|
||||
|
@ -38,7 +38,7 @@ def test_compile_psnr():
|
|||
net = PSNRNet(max_val)
|
||||
img1 = Tensor(np.random.random((8, 3, 16, 16)))
|
||||
img2 = Tensor(np.random.random((8, 3, 16, 16)))
|
||||
_executor.compile(net, img1, img2)
|
||||
_cell_graph_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_compile_psnr_grayscale():
|
||||
|
@ -46,7 +46,7 @@ def test_compile_psnr_grayscale():
|
|||
net = PSNRNet(max_val)
|
||||
img1 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8))
|
||||
img2 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8))
|
||||
_executor.compile(net, img1, img2)
|
||||
_cell_graph_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_psnr_max_val_negative():
|
||||
|
@ -74,7 +74,7 @@ def test_psnr_different_shape():
|
|||
img2 = Tensor(np.random.random(shape_2))
|
||||
net = PSNRNet()
|
||||
with pytest.raises(ValueError):
|
||||
_executor.compile(net, img1, img2)
|
||||
_cell_graph_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_psnr_different_dtype():
|
||||
|
@ -84,7 +84,7 @@ def test_psnr_different_dtype():
|
|||
img2 = Tensor(np.random.random((8, 3, 16, 16)), dtype=dtype_2)
|
||||
net = PSNRNet()
|
||||
with pytest.raises(TypeError):
|
||||
_executor.compile(net, img1, img2)
|
||||
_cell_graph_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_psnr_invalid_5d_input():
|
||||
|
@ -98,8 +98,8 @@ def test_psnr_invalid_5d_input():
|
|||
|
||||
net = PSNRNet()
|
||||
with pytest.raises(ValueError):
|
||||
_executor.compile(net, invalid_img1, img2)
|
||||
_cell_graph_executor.compile(net, invalid_img1, img2)
|
||||
with pytest.raises(ValueError):
|
||||
_executor.compile(net, img1, invalid_img2)
|
||||
_cell_graph_executor.compile(net, img1, invalid_img2)
|
||||
with pytest.raises(ValueError):
|
||||
_executor.compile(net, invalid_img1, invalid_img2)
|
||||
_cell_graph_executor.compile(net, invalid_img1, invalid_img2)
|
||||
|
|
|
@ -21,7 +21,7 @@ import pytest
|
|||
import mindspore.common.dtype as mstype
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
|
||||
|
||||
class SSIMNet(nn.Cell):
|
||||
|
@ -37,7 +37,7 @@ def test_compile():
|
|||
net = SSIMNet()
|
||||
img1 = Tensor(np.random.random((8, 3, 16, 16)), mstype.float32)
|
||||
img2 = Tensor(np.random.random((8, 3, 16, 16)), mstype.float32)
|
||||
_executor.compile(net, img1, img2)
|
||||
_cell_graph_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_ssim_max_val_negative():
|
||||
|
@ -85,7 +85,7 @@ def test_ssim_different_shape():
|
|||
img2 = Tensor(np.random.random(shape_2))
|
||||
net = SSIMNet()
|
||||
with pytest.raises(TypeError):
|
||||
_executor.compile(net, img1, img2)
|
||||
_cell_graph_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_ssim_different_dtype():
|
||||
|
@ -95,7 +95,7 @@ def test_ssim_different_dtype():
|
|||
img2 = Tensor(np.random.random((8, 3, 16, 16)), dtype=dtype_2)
|
||||
net = SSIMNet()
|
||||
with pytest.raises(TypeError):
|
||||
_executor.compile(net, img1, img2)
|
||||
_cell_graph_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_ssim_invalid_5d_input():
|
||||
|
@ -109,8 +109,8 @@ def test_ssim_invalid_5d_input():
|
|||
|
||||
net = SSIMNet()
|
||||
with pytest.raises(TypeError):
|
||||
_executor.compile(net, invalid_img1, img2)
|
||||
_cell_graph_executor.compile(net, invalid_img1, img2)
|
||||
with pytest.raises(TypeError):
|
||||
_executor.compile(net, img1, invalid_img2)
|
||||
_cell_graph_executor.compile(net, img1, invalid_img2)
|
||||
with pytest.raises(TypeError):
|
||||
_executor.compile(net, invalid_img1, invalid_img2)
|
||||
_cell_graph_executor.compile(net, invalid_img1, invalid_img2)
|
||||
|
|
|
@ -19,7 +19,7 @@ from mindspore import Tensor
|
|||
from mindspore.common import dtype
|
||||
from mindspore.parallel.nn import MultiHeadAttention, FeedForward, TransformerEncoderLayer, TransformerEncoder, \
|
||||
TransformerDecoder, TransformerDecoderLayer, Transformer, CrossEntropyLoss, AttentionMask, FixedSparseAttention
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
|
||||
|
||||
def test_transformer_encoder_only():
|
||||
|
@ -34,7 +34,7 @@ def test_transformer_encoder_only():
|
|||
encoder_input_value = Tensor(np.ones((2, 20, 64)), dtype.float32)
|
||||
encoder_input_mask = Tensor(np.ones((2, 20, 20)), dtype.float16)
|
||||
|
||||
_executor.compile(model, encoder_input_value, encoder_input_mask)
|
||||
_cell_graph_executor.compile(model, encoder_input_value, encoder_input_mask)
|
||||
|
||||
|
||||
def test_transformer_encoder_log_softmax():
|
||||
|
@ -51,7 +51,7 @@ def test_transformer_encoder_log_softmax():
|
|||
encoder_input_value = Tensor(np.ones((2, 20, 64)), dtype.float32)
|
||||
encoder_input_mask = Tensor(np.ones((2, 20, 20)), dtype.float16)
|
||||
|
||||
_executor.compile(model, encoder_input_value, encoder_input_mask)
|
||||
_cell_graph_executor.compile(model, encoder_input_value, encoder_input_mask)
|
||||
|
||||
|
||||
def test_transformer_encoder_leakyrelu():
|
||||
|
@ -67,7 +67,7 @@ def test_transformer_encoder_leakyrelu():
|
|||
encoder_input_value = Tensor(np.ones((2, 20, 64)), dtype.float32)
|
||||
encoder_input_mask = Tensor(np.ones((2, 20, 20)), dtype.float16)
|
||||
|
||||
_executor.compile(model, encoder_input_value, encoder_input_mask)
|
||||
_cell_graph_executor.compile(model, encoder_input_value, encoder_input_mask)
|
||||
|
||||
|
||||
def test_transformer_encoder_logsigmoid():
|
||||
|
@ -83,7 +83,7 @@ def test_transformer_encoder_logsigmoid():
|
|||
encoder_input_value = Tensor(np.ones((2, 20, 64)), dtype.float32)
|
||||
encoder_input_mask = Tensor(np.ones((2, 20, 20)), dtype.float16)
|
||||
|
||||
_executor.compile(model, encoder_input_value, encoder_input_mask)
|
||||
_cell_graph_executor.compile(model, encoder_input_value, encoder_input_mask)
|
||||
|
||||
|
||||
def test_encoder_and_decoder():
|
||||
|
@ -102,7 +102,7 @@ def test_encoder_and_decoder():
|
|||
decoder_input_mask = Tensor(np.ones((2, 10, 10)), dtype.float16)
|
||||
memory_mask = Tensor(np.ones((2, 10, 20)), dtype.float16)
|
||||
|
||||
_executor.compile(model, encoder_input_value, encoder_input_mask,
|
||||
_cell_graph_executor.compile(model, encoder_input_value, encoder_input_mask,
|
||||
decoder_input_value,
|
||||
decoder_input_mask,
|
||||
memory_mask)
|
||||
|
@ -119,7 +119,7 @@ def test_transformer_encoder():
|
|||
encoder_input_value = Tensor(np.ones((2, 16, 8)), dtype.float32)
|
||||
encoder_input_mask = Tensor(np.ones((2, 16, 16)), dtype.float16)
|
||||
|
||||
_executor.compile(model,
|
||||
_cell_graph_executor.compile(model,
|
||||
encoder_input_value,
|
||||
encoder_input_mask)
|
||||
|
||||
|
@ -131,7 +131,7 @@ def test_transformer_encoder_layer():
|
|||
encoder_input_value = Tensor(np.ones((2, 16, 8)), dtype.float32)
|
||||
encoder_input_mask = Tensor(np.ones((2, 16, 16)), dtype.float16)
|
||||
|
||||
_executor.compile(model,
|
||||
_cell_graph_executor.compile(model,
|
||||
encoder_input_value,
|
||||
encoder_input_mask)
|
||||
|
||||
|
@ -145,7 +145,7 @@ def test_transformer_encoder_layer_post_ture():
|
|||
encoder_input_value = Tensor(np.ones((2, 16, 8)), dtype.float32)
|
||||
encoder_input_mask = Tensor(np.ones((2, 16, 16)), dtype.float16)
|
||||
|
||||
_executor.compile(model,
|
||||
_cell_graph_executor.compile(model,
|
||||
encoder_input_value,
|
||||
encoder_input_mask)
|
||||
|
||||
|
@ -165,7 +165,7 @@ def test_transformer_decoder():
|
|||
decoder_input_mask = Tensor(np.ones((2, 10, 10)), dtype.float16)
|
||||
memory_mask = Tensor(np.ones((2, 10, 20)), dtype.float16)
|
||||
|
||||
_executor.compile(model, decoder_input_value, decoder_input_mask,
|
||||
_cell_graph_executor.compile(model, decoder_input_value, decoder_input_mask,
|
||||
encoder_input_value,
|
||||
memory_mask)
|
||||
|
||||
|
@ -185,7 +185,7 @@ def test_transformer_decoder_layer():
|
|||
decoder_input_mask = Tensor(np.ones((2, 10, 10)), dtype.float16)
|
||||
memory_mask = Tensor(np.ones((2, 10, 20)), dtype.float16)
|
||||
|
||||
_executor.compile(model, decoder_input_value, decoder_input_mask,
|
||||
_cell_graph_executor.compile(model, decoder_input_value, decoder_input_mask,
|
||||
encoder_input_value,
|
||||
memory_mask)
|
||||
|
||||
|
@ -200,7 +200,7 @@ def test_multihead_attention():
|
|||
to_tensor = Tensor(np.ones((2, 20, 15)), dtype.float16)
|
||||
attention_mask = Tensor(np.ones((2, 20, 20)), dtype.float16)
|
||||
|
||||
_executor.compile(model, from_tensor, to_tensor, to_tensor, attention_mask)
|
||||
_cell_graph_executor.compile(model, from_tensor, to_tensor, to_tensor, attention_mask)
|
||||
|
||||
|
||||
def test_multihead_attention_wrong_batch():
|
||||
|
@ -214,7 +214,7 @@ def test_multihead_attention_wrong_batch():
|
|||
attention_mask = Tensor(np.ones((3, 20, 20)), dtype.float16)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
_executor.compile(model, from_tensor, to_tensor, to_tensor, attention_mask)
|
||||
_cell_graph_executor.compile(model, from_tensor, to_tensor, to_tensor, attention_mask)
|
||||
|
||||
|
||||
def test_feedforward_layer():
|
||||
|
@ -224,7 +224,7 @@ def test_feedforward_layer():
|
|||
hidden_act='relu')
|
||||
tensor = Tensor(np.ones((2, 20, 15)), dtype.float32)
|
||||
|
||||
_executor.compile(model, tensor)
|
||||
_cell_graph_executor.compile(model, tensor)
|
||||
|
||||
|
||||
def test_cross_entroy():
|
||||
|
@ -233,13 +233,13 @@ def test_cross_entroy():
|
|||
labels_np = np.array([1]).astype(np.int32)
|
||||
input_mask = Tensor(np.ones(1).astype(np.float32))
|
||||
labels = Tensor(labels_np)
|
||||
_executor.compile(model, logits, labels, input_mask)
|
||||
_cell_graph_executor.compile(model, logits, labels, input_mask)
|
||||
|
||||
|
||||
def test_attention_mask():
|
||||
model = AttentionMask(seq_length=19)
|
||||
inputs = Tensor(np.ones((2, 19)), dtype.float32)
|
||||
_executor.compile(model, inputs)
|
||||
_cell_graph_executor.compile(model, inputs)
|
||||
|
||||
|
||||
def test_sparse_attention():
|
||||
|
@ -252,4 +252,4 @@ def test_sparse_attention():
|
|||
k = Tensor(np.ones((2, 1024, 512)), dtype.float16)
|
||||
v = Tensor(np.ones((2, 1024, 512)), dtype.float16)
|
||||
mask = Tensor(np.ones((2, 1024)), dtype.float32)
|
||||
_executor.compile(model, q, k, v, mask)
|
||||
_cell_graph_executor.compile(model, q, k, v, mask)
|
||||
|
|
|
@ -21,7 +21,7 @@ import pytest
|
|||
import mindspore.context as context
|
||||
from mindspore import Tensor
|
||||
from mindspore import nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import operations as P
|
||||
from ..ut_filter import non_graph_engine
|
||||
from ....mindspore_test_framework.mindspore_test import mindspore_test
|
||||
|
@ -63,7 +63,7 @@ def test_net_without_construct():
|
|||
""" test_net_without_construct """
|
||||
net = NetMissConstruct()
|
||||
inp = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))
|
||||
_executor.compile(net, inp)
|
||||
_cell_graph_executor.compile(net, inp)
|
||||
|
||||
|
||||
class NetWithRaise(nn.Cell):
|
||||
|
@ -83,7 +83,7 @@ def test_net_with_raise():
|
|||
net = NetWithRaise()
|
||||
inp = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
_executor.compile(net, inp)
|
||||
_cell_graph_executor.compile(net, inp)
|
||||
assert "Unsupported syntax 'Raise'." in str(err.value)
|
||||
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ import pytest
|
|||
import mindspore.common.dtype as mstype
|
||||
import mindspore.nn as nn
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim import Momentum, SGD, RMSProp, Adam
|
||||
|
@ -80,7 +80,7 @@ def test_group_lr():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, opt)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_group_dynamic_1():
|
||||
|
@ -114,7 +114,7 @@ def test_group_dynamic_1():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, opt)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_group_dynamic_2():
|
||||
|
@ -144,7 +144,7 @@ def test_group_dynamic_2():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, opt)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_group_dynamic_no_same_size():
|
||||
|
@ -214,7 +214,7 @@ def test_weight_decay():
|
|||
|
||||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, opt)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
_cell_graph_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
||||
def test_group_repeat_param():
|
||||
|
|
|
@ -23,7 +23,7 @@ from mindspore.ops import _constants as Constants
|
|||
from mindspore.graph_utils.python_pass import register_pass, unregister_pass, set_renorm, gen_new_parameter,\
|
||||
cancel_new_parameter, set_reopt
|
||||
from mindspore.common.api import _generate_pip_args
|
||||
from mindspore._c_expression import generate_key, Executor_
|
||||
from mindspore._c_expression import generate_arguments_key, GraphExecutor_
|
||||
from mindspore.graph_utils.graph_pattern import OneOf, Prim, Call, NoneOf, Any, NewTensor, NewParameter, Imm
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
@ -31,13 +31,10 @@ context.set_context(mode=context.GRAPH_MODE)
|
|||
def get_func_graph(obj, *args, phase="validate"):
|
||||
args_names, args_list = _generate_pip_args(obj, *args)
|
||||
dic = dict(zip(args_names, args_list))
|
||||
key = generate_key(phase, dic)
|
||||
phase_prefix = str(key[1])
|
||||
if phase == 'export':
|
||||
phase = phase + '.' + phase_prefix + '.' + str(obj.create_time)
|
||||
else:
|
||||
phase = phase_prefix + phase + '.' + str(obj.create_time)
|
||||
_executor = Executor_.get_instance()
|
||||
key = generate_arguments_key(dic)
|
||||
obj.arguments_key = str(key)
|
||||
phase = phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key
|
||||
_executor = GraphExecutor_.get_instance()
|
||||
_executor.compile(obj, args_list, phase, False, "")
|
||||
return _executor.get_func_graph(phase)
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ import numpy as np
|
|||
|
||||
import mindspore as ms
|
||||
from mindspore import context, Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import Cell, TrainOneStepCell
|
||||
from mindspore.nn.optim.adafactor import AdaFactor
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -53,7 +53,7 @@ def compile_net(net):
|
|||
train_net = TrainOneStepCell(net, optimizer)
|
||||
train_net.set_auto_parallel()
|
||||
train_net.set_train()
|
||||
_executor.compile(train_net, _x, _b)
|
||||
_cell_graph_executor.compile(train_net, _x, _b)
|
||||
context.reset_auto_parallel_context()
|
||||
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from tests.ut.python.ops.test_math_ops import VirtualLoss
|
||||
|
@ -60,7 +60,7 @@ class Grad(nn.Cell):
|
|||
def compile_net(net, x, y):
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y)
|
||||
_cell_graph_executor.compile(net, x, y)
|
||||
|
||||
|
||||
def test_add_relu_stride_slice():
|
||||
|
|
|
@ -18,7 +18,7 @@ import pytest
|
|||
import mindspore as ms
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
|
||||
from mindspore.nn.optim.momentum import Momentum
|
||||
from mindspore.parallel import _cost_model_context as cost_model_context
|
||||
|
@ -120,7 +120,7 @@ def train_common(net):
|
|||
model = Model(net, loss, opt)
|
||||
|
||||
model.train(epoch_size, dataset, dataset_sink_mode=False)
|
||||
allreduce_fusion_dict = _executor._get_allreduce_fusion(model._train_network)
|
||||
allreduce_fusion_dict = _cell_graph_executor._get_allreduce_fusion(model._train_network)
|
||||
|
||||
print(allreduce_fusion_dict)
|
||||
return allreduce_fusion_dict
|
||||
|
|
|
@ -20,7 +20,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
|
||||
from mindspore.nn.optim.momentum import Momentum
|
||||
|
@ -97,7 +97,7 @@ def all_to_all_common(strategy1):
|
|||
model = Model(net, loss, opt)
|
||||
|
||||
model.train(epoch_size, dataset, dataset_sink_mode=False)
|
||||
strategys = _executor._get_shard_strategy(model._train_network)
|
||||
strategys = _cell_graph_executor._get_shard_strategy(model._train_network)
|
||||
return strategys
|
||||
|
||||
|
||||
|
@ -137,7 +137,7 @@ def test_all_to_all_success():
|
|||
return out
|
||||
|
||||
net = Net()
|
||||
_executor.compile(net, _x1)
|
||||
_cell_graph_executor.compile(net, _x1)
|
||||
|
||||
|
||||
def test_all_to_all_invalid_split_count_value_failed():
|
||||
|
@ -159,7 +159,7 @@ def test_all_to_all_invalid_split_count_value_failed():
|
|||
|
||||
with pytest.raises(ValueError):
|
||||
net = Net()
|
||||
_executor.compile(net, _x1)
|
||||
_cell_graph_executor.compile(net, _x1)
|
||||
|
||||
|
||||
def test_all_to_all_invalid_split_count_type_failed():
|
||||
|
@ -181,7 +181,7 @@ def test_all_to_all_invalid_split_count_type_failed():
|
|||
|
||||
with pytest.raises(TypeError):
|
||||
net = Net()
|
||||
_executor.compile(net, _x1)
|
||||
_cell_graph_executor.compile(net, _x1)
|
||||
|
||||
|
||||
def test_all_to_all_invalid_split_dim_value_failed():
|
||||
|
@ -203,7 +203,7 @@ def test_all_to_all_invalid_split_dim_value_failed():
|
|||
|
||||
with pytest.raises(IndexError):
|
||||
net = Net()
|
||||
_executor.compile(net, _x1)
|
||||
_cell_graph_executor.compile(net, _x1)
|
||||
|
||||
|
||||
def test_all_to_all_invalid_split_dim_type_failed():
|
||||
|
@ -225,7 +225,7 @@ def test_all_to_all_invalid_split_dim_type_failed():
|
|||
|
||||
with pytest.raises(TypeError):
|
||||
net = Net()
|
||||
_executor.compile(net, _x1)
|
||||
_cell_graph_executor.compile(net, _x1)
|
||||
|
||||
|
||||
def test_all_to_all_invalid_concat_dim_value_failed():
|
||||
|
@ -247,7 +247,7 @@ def test_all_to_all_invalid_concat_dim_value_failed():
|
|||
|
||||
with pytest.raises(IndexError):
|
||||
net = Net()
|
||||
_executor.compile(net, _x1)
|
||||
_cell_graph_executor.compile(net, _x1)
|
||||
|
||||
|
||||
def test_all_to_all_invalid_concat_dim_type_failed():
|
||||
|
@ -269,7 +269,7 @@ def test_all_to_all_invalid_concat_dim_type_failed():
|
|||
|
||||
with pytest.raises(TypeError):
|
||||
net = Net()
|
||||
_executor.compile(net, _x1)
|
||||
_cell_graph_executor.compile(net, _x1)
|
||||
|
||||
|
||||
def test_all_to_all_invalid_split_count_cannot_be_divisible_failed():
|
||||
|
@ -291,7 +291,7 @@ def test_all_to_all_invalid_split_count_cannot_be_divisible_failed():
|
|||
|
||||
with pytest.raises(ValueError):
|
||||
net = Net()
|
||||
_executor.compile(net, _x1)
|
||||
_cell_graph_executor.compile(net, _x1)
|
||||
|
||||
|
||||
def test_all_to_all_invalid_group_type_failed():
|
||||
|
@ -313,7 +313,7 @@ def test_all_to_all_invalid_group_type_failed():
|
|||
|
||||
with pytest.raises(TypeError):
|
||||
net = Net()
|
||||
_executor.compile(net, _x1)
|
||||
_cell_graph_executor.compile(net, _x1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -17,7 +17,7 @@ import numpy as np
|
|||
import mindspore as ms
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Parameter, Tensor, context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from tests.ut.python.ops.test_math_ops import VirtualLoss
|
||||
|
@ -49,7 +49,7 @@ class GradWrap(nn.Cell):
|
|||
def compile_net(net, x, y, b):
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, b)
|
||||
_cell_graph_executor.compile(net, x, y, b)
|
||||
|
||||
|
||||
def test_matmul_sub():
|
||||
|
@ -651,7 +651,7 @@ def test_assign_sub():
|
|||
def compile_sub_net(net, x):
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
||||
context.set_auto_parallel_context(device_num=64, global_rank=15)
|
||||
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
|
||||
|
@ -699,7 +699,7 @@ def test_assign_add():
|
|||
def compile_sub_net(net, x):
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
||||
context.set_auto_parallel_context(device_num=64, global_rank=15)
|
||||
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
|
||||
|
@ -747,7 +747,7 @@ def test_assign():
|
|||
def compile_sub_net(net, x):
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
||||
context.set_auto_parallel_context(device_num=64, global_rank=15)
|
||||
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
|
||||
|
|
|
@ -18,7 +18,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from tests.ut.python.ops.test_math_ops import VirtualLoss
|
||||
|
||||
|
@ -74,4 +74,4 @@ def test_auto_parallel_bn_with_prelu():
|
|||
net = GradWrap(NetWithLoss(Net()))
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
|
|
@ -16,7 +16,7 @@ import numpy as np
|
|||
|
||||
import mindspore as ms
|
||||
from mindspore import context, Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn import Cell, TrainOneStepCell, Momentum
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
|
@ -44,7 +44,7 @@ def compile_net(net):
|
|||
train_net = TrainOneStepCell(net, optimizer)
|
||||
train_net.set_auto_parallel()
|
||||
train_net.set_train()
|
||||
_executor.compile(train_net, _x, _b)
|
||||
_cell_graph_executor.compile(train_net, _x, _b)
|
||||
context.reset_auto_parallel_context()
|
||||
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.parallel._utils import _reset_op_id as reset_op_id
|
||||
|
@ -54,7 +54,7 @@ class GradWrap(nn.Cell):
|
|||
def compile_net(net, x, y, b, phase):
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, b, phase=phase)
|
||||
_cell_graph_executor.compile(net, x, y, b, phase=phase)
|
||||
|
||||
|
||||
def test_auto_parallel_arithmetic():
|
||||
|
@ -78,7 +78,7 @@ def test_auto_parallel_arithmetic():
|
|||
y = Tensor(np.ones([32, 128]), dtype=ms.float32)
|
||||
b = Tensor(np.ones([64, 128]), dtype=ms.float32)
|
||||
compile_net(net, x, y, b, phase='train')
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
strategies = _cell_graph_executor._get_shard_strategy(net)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('FloorDiv-op', k) is not None:
|
||||
assert v == [[2, 4], [2, 4]]
|
||||
|
@ -107,7 +107,7 @@ def test_auto_parallel_arithmetic_broadcast_both():
|
|||
y = Tensor(np.ones([32, 1]), dtype=ms.float32)
|
||||
b = Tensor(np.ones([1, 64]), dtype=ms.float32)
|
||||
compile_net(net, x, y, b, phase='train')
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
strategies = _cell_graph_executor._get_shard_strategy(net)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('FloorDiv-op', k) is not None:
|
||||
assert v == [[8, 1], [1, 1]]
|
||||
|
@ -136,7 +136,7 @@ def test_auto_parallel_arithmetic_broadcast_right():
|
|||
y = Tensor(np.ones([32, 32]), dtype=ms.float32)
|
||||
b = Tensor(np.ones([32]), dtype=ms.float32)
|
||||
compile_net(net, x, y, b, phase='train')
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
strategies = _cell_graph_executor._get_shard_strategy(net)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('FloorDiv-op', k) is not None:
|
||||
assert v == [[4, 2], [2]]
|
||||
|
@ -165,7 +165,7 @@ def test_auto_parallel_arithmetic_broadcast_left():
|
|||
y = Tensor(np.ones([32, 32]), dtype=ms.float32)
|
||||
b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32)
|
||||
compile_net(net, x, y, b, phase="train")
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
strategies = _cell_graph_executor._get_shard_strategy(net)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('FloorDiv-op', k) is not None:
|
||||
assert v == [[4, 2], [1, 4, 2]]
|
||||
|
|
|
@ -19,7 +19,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.parallel._utils import _reset_op_id as reset_op_id
|
||||
from tests.ut.python.ops.test_math_ops import VirtualLoss
|
||||
|
@ -62,8 +62,8 @@ def test_auto_parallel_assign_sub_with_ref_key():
|
|||
reset_op_id()
|
||||
|
||||
net.set_train()
|
||||
_executor.compile(net, x, phase="train")
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
_cell_graph_executor.compile(net, x, phase="train")
|
||||
strategies = _cell_graph_executor._get_shard_strategy(net)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('PReLU-op', k) is not None:
|
||||
assert v == [[1, 1, 1, 8], [1]]
|
||||
|
|
|
@ -19,7 +19,7 @@ import mindspore.nn as nn
|
|||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.parallel._utils import _reset_op_id as reset_op_id
|
||||
|
@ -82,8 +82,8 @@ def test_double_star_graph():
|
|||
reset_op_id()
|
||||
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, z, w, phase='train')
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
_cell_graph_executor.compile(net, x, y, z, w, phase='train')
|
||||
strategies = _cell_graph_executor._get_shard_strategy(net)
|
||||
expected_strategies = {'Default/network-Net/Cast-op1': [[8, 1]],
|
||||
'Default/network-Net/Cast-op3': [[1, 8]],
|
||||
'Default/network-Net/MatMul-op2': [[8, 1], [1, 1]],
|
||||
|
|
|
@ -18,7 +18,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from tests.ut.python.ops.test_math_ops import VirtualLoss
|
||||
|
@ -73,4 +73,4 @@ def test_common_parameter():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, z)
|
||||
_cell_graph_executor.compile(net, x, y, z)
|
||||
|
|
|
@ -18,7 +18,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from tests.ut.python.ops.test_math_ops import VirtualLoss
|
||||
|
@ -80,7 +80,7 @@ def test_double_source_graph():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, z, w, a)
|
||||
_cell_graph_executor.compile(net, x, y, z, w, a)
|
||||
|
||||
|
||||
def test_double_source_complex_graph():
|
||||
|
@ -116,4 +116,4 @@ def test_double_source_complex_graph():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, z, w, a)
|
||||
_cell_graph_executor.compile(net, x, y, z, w, a)
|
||||
|
|
|
@ -18,7 +18,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from tests.ut.python.ops.test_math_ops import VirtualLoss
|
||||
|
@ -84,4 +84,4 @@ def test_double_star_graph():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, z, w, a, b, c)
|
||||
_cell_graph_executor.compile(net, x, y, z, w, a, b, c)
|
||||
|
|
|
@ -19,7 +19,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter, ParameterTuple
|
||||
from mindspore import context, Model
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn.optim import Adam, FTRL
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import functional as F
|
||||
|
@ -114,8 +114,8 @@ def test_double_subgraphs():
|
|||
x = Tensor(np.ones([8, 8, 8, 8]), dtype=ms.float32)
|
||||
reset_op_id()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, phase='train')
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
_cell_graph_executor.compile(net, x, phase='train')
|
||||
strategies = _cell_graph_executor._get_shard_strategy(net)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('ReduceMean-op', k) is not None:
|
||||
assert v == [[8, 1, 1, 1]]
|
||||
|
@ -165,7 +165,7 @@ def test_double_subgraphs_train():
|
|||
ds_train = DatasetLenet(Tensor(batch_ids), None)
|
||||
model = Model(net)
|
||||
model.train(1, ds_train, dataset_sink_mode=False)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
strategies = _cell_graph_executor._get_shard_strategy(net)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('ReduceMean-op', k) is not None:
|
||||
assert v == [[1, 1, 1, 1]]
|
||||
|
|
|
@ -18,7 +18,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from tests.ut.python.ops.test_math_ops import VirtualLoss
|
||||
|
@ -71,4 +71,4 @@ def test_two_matmul():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, b)
|
||||
_cell_graph_executor.compile(net, x, y, b)
|
||||
|
|
|
@ -21,7 +21,7 @@ import mindspore.nn as nn
|
|||
from mindspore.ops import operations as P, functional as F
|
||||
from mindspore.common.initializer import initializer
|
||||
import mindspore.common.dtype as mstype
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from tests.dataset_mock import MindData
|
||||
|
||||
|
||||
|
@ -123,7 +123,7 @@ def test_auto_parallel():
|
|||
net = Full(_w1, 3)
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, _x, phase='train')
|
||||
num_ops = _executor._get_num_parallel_ops(net)
|
||||
_cell_graph_executor.compile(net, _x, phase='train')
|
||||
num_ops = _cell_graph_executor._get_num_parallel_ops(net)
|
||||
expected_num = 16
|
||||
assert num_ops == expected_num
|
||||
|
|
|
@ -18,7 +18,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter, ParameterTuple
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.nn.optim import Adam, FTRL
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import functional as F
|
||||
|
@ -130,7 +130,7 @@ def test_double_subgraphs():
|
|||
x = Tensor(np.ones([8, 8, 8, 8]), dtype=ms.float32)
|
||||
reset_op_id()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, phase='train')
|
||||
num_ops = _executor._get_num_parallel_ops(net)
|
||||
_cell_graph_executor.compile(net, x, phase='train')
|
||||
num_ops = _cell_graph_executor._get_num_parallel_ops(net)
|
||||
expected_num = 7
|
||||
assert expected_num == num_ops
|
||||
|
|
|
@ -21,7 +21,7 @@ import mindspore.nn as nn
|
|||
from mindspore.ops import operations as P, functional as F
|
||||
from mindspore.common.initializer import initializer
|
||||
import mindspore.common.dtype as mstype
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from tests.dataset_mock import MindData
|
||||
|
||||
|
||||
|
@ -130,7 +130,7 @@ def test_auto_parallel():
|
|||
net = Full(_w1, 3)
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, _x, phase='train')
|
||||
num_ops = _executor._get_num_parallel_ops(net)
|
||||
_cell_graph_executor.compile(net, _x, phase='train')
|
||||
num_ops = _cell_graph_executor._get_num_parallel_ops(net)
|
||||
expected_num = 16
|
||||
assert num_ops == expected_num
|
||||
|
|
|
@ -18,7 +18,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from tests.ut.python.ops.test_math_ops import VirtualLoss
|
||||
|
@ -50,7 +50,7 @@ class GradWrap(nn.Cell):
|
|||
def compile_net(net, x, y, z, w, b):
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, z, w, b)
|
||||
_cell_graph_executor.compile(net, x, y, z, w, b)
|
||||
|
||||
# model_parallel test
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.parallel._utils import _reset_op_id as reset_op_id
|
||||
|
@ -74,4 +74,4 @@ def test_auto_parallel_l2normalize():
|
|||
y = Tensor(np.ones([128, 64, 64]), dtype=ms.float32)
|
||||
b = Tensor(np.ones([128, 64, 64]), dtype=ms.float32)
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, b, phase='train')
|
||||
_cell_graph_executor.compile(net, x, y, b, phase='train')
|
||||
|
|
|
@ -18,7 +18,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from tests.ut.python.ops.test_math_ops import VirtualLoss
|
||||
|
@ -71,4 +71,4 @@ def test_two_matmul_dropout():
|
|||
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
|
||||
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, b)
|
||||
_cell_graph_executor.compile(net, x, y, b)
|
||||
|
|
|
@ -19,7 +19,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.parallel._utils import _reset_op_id as reset_op_id
|
||||
|
@ -75,8 +75,8 @@ def test_matmul_prelu():
|
|||
reset_op_id()
|
||||
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, b, phase='train')
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
_cell_graph_executor.compile(net, x, y, b, phase='train')
|
||||
strategies = _cell_graph_executor._get_shard_strategy(net)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('PReLU-op', k) is not None:
|
||||
assert v == [[16, 1, 1, 1], [1]]
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
import numpy as np
|
||||
import mindspore as ms
|
||||
import mindspore.context as context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.nn import Cell, TrainOneStepCell, Momentum
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -59,7 +59,7 @@ def compile_net(net):
|
|||
train_net = TrainOneStepCell(net, optimizer)
|
||||
train_net.set_auto_parallel()
|
||||
train_net.set_train()
|
||||
_executor.compile(train_net, inputs_, label_)
|
||||
_cell_graph_executor.compile(train_net, inputs_, label_)
|
||||
context.reset_auto_parallel_context()
|
||||
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.nn.optim.momentum import Momentum
|
||||
from mindspore.ops import composite as C
|
||||
|
@ -100,7 +100,7 @@ def test_auto_parallel_arithmetic():
|
|||
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
|
||||
b = Tensor(np.ones([64]), dtype=ms.int32)
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, b)
|
||||
_cell_graph_executor.compile(net, x, y, b)
|
||||
|
||||
|
||||
def test_auto_parallel_arithmetic_model():
|
||||
|
|
|
@ -20,7 +20,7 @@ import mindspore.nn as nn
|
|||
from mindspore import Tensor, Parameter
|
||||
from mindspore import context
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.parallel import set_algo_parameters
|
||||
from mindspore.parallel._utils import _reset_op_id as reset_op_id
|
||||
|
@ -69,8 +69,8 @@ def test_common_parameter():
|
|||
reset_op_id()
|
||||
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, phase='train')
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
_cell_graph_executor.compile(net, x, y, phase='train')
|
||||
strategies = _cell_graph_executor._get_shard_strategy(net)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('MatMul-op', k) is not None:
|
||||
assert v == [[8, 1], [1, 1]]
|
||||
|
|
|
@ -18,7 +18,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from tests.ut.python.ops.test_math_ops import VirtualLoss
|
||||
|
@ -78,4 +78,4 @@ def test_four_matmul_linear():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, z, w, b)
|
||||
_cell_graph_executor.compile(net, x, y, z, w, b)
|
||||
|
|
|
@ -18,7 +18,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from tests.ut.python.ops.test_math_ops import VirtualLoss
|
||||
|
@ -50,7 +50,7 @@ class GradWrap(nn.Cell):
|
|||
def compile_net(net, x, y, b):
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y, b)
|
||||
_cell_graph_executor.compile(net, x, y, b)
|
||||
|
||||
|
||||
# model_parallel test
|
||||
|
|
|
@ -18,7 +18,7 @@ import mindspore as ms
|
|||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import _cell_graph_executor
|
||||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
|
@ -69,7 +69,7 @@ def test_reshape_matmul():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
||||
def test_reshape_reshape():
|
||||
class Net(nn.Cell):
|
||||
|
@ -92,7 +92,7 @@ def test_reshape_reshape():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
||||
|
||||
def test_reshape_auto_1():
|
||||
|
@ -118,7 +118,7 @@ def test_reshape_auto_1():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
||||
|
||||
def test_reshape_auto_2():
|
||||
|
@ -147,7 +147,7 @@ def test_reshape_auto_2():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
||||
|
||||
def test_reshape_auto_3():
|
||||
|
@ -173,7 +173,7 @@ def test_reshape_auto_3():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
||||
|
||||
def test_reshape_auto_4():
|
||||
|
@ -200,7 +200,7 @@ def test_reshape_auto_4():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
||||
|
||||
def test_reshape_auto_5():
|
||||
|
@ -251,7 +251,7 @@ def test_reshape_auto_5():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y)
|
||||
_cell_graph_executor.compile(net, x, y)
|
||||
|
||||
def test_reshape_auto_6():
|
||||
class NetWithLoss6(nn.Cell):
|
||||
|
@ -299,7 +299,7 @@ def test_reshape_auto_6():
|
|||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y)
|
||||
_cell_graph_executor.compile(net, x, y)
|
||||
|
||||
def test_reshape_auto_7():
|
||||
class Net(nn.Cell):
|
||||
|
@ -322,7 +322,7 @@ def test_reshape_auto_7():
|
|||
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x)
|
||||
_cell_graph_executor.compile(net, x)
|
||||
|
||||
def test_reshape_depend_reshape():
|
||||
class Net(nn.Cell):
|
||||
|
@ -371,9 +371,9 @@ def test_reshape_depend_reshape():
|
|||
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
|
||||
net.set_auto_parallel()
|
||||
net.set_train()
|
||||
_executor.compile(net, x, y)
|
||||
_cell_graph_executor.compile(net, x, y)
|
||||
net_auto = GradWrap1(NetWithLoss1(Net()))
|
||||
context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
net_auto.set_auto_parallel()
|
||||
net_auto.set_train()
|
||||
_executor.compile(net_auto, x, y)
|
||||
_cell_graph_executor.compile(net_auto, x, y)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue