Unify api

Signed-off-by: zjun <zhangjun0@huawei.com>
This commit is contained in:
zjun 2022-09-28 10:39:14 +08:00
parent d81588afbb
commit 432641d19f
10 changed files with 86 additions and 56 deletions

View File

@ -41,7 +41,7 @@ void RegRandomNormal(py::module *m);
}
namespace pynative {
void RegPynativeExecutor(const py::module *m);
void RegPyNativeExecutor(const py::module *m);
}
namespace tensor {

View File

@ -109,7 +109,7 @@ void RegModule(py::module *m) {
mindspore::initializer::RegRandomNormal(m);
RegMsContext(m);
RegSecurity(m);
mindspore::pynative::RegPynativeExecutor(m);
mindspore::pynative::RegPyNativeExecutor(m);
mindspore::opt::python_pass::RegPattern(m);
mindspore::opt::python_pass::RegPyPassManager(m);
mindspore::prim::RegCompositeOpsGroup(m);
@ -187,7 +187,6 @@ PYBIND11_MODULE(_c_expression, m) {
.def("set_jit_config", &GraphExecutorPy::SetJitConfig, py::arg("jit_config") = py::dict(), "Set the jit config.")
.def("generate_arguments_key", &GraphExecutorPy::GenerateArgumentsKey, "Generate unique key of argument.");
(void)m.def("real_run_op", &mindspore::pynative::RealRunOp, "Run op pynatively.");
(void)m.def("reset_op_id", &mindspore::pipeline::ResetOpId, "Reset Operator Id");
(void)m.def("reset_op_id_with_offset", &mindspore::pipeline::ResetOpIdWithOffset, "Reset Operator Id With Offset");
(void)m.def("init_hccl", &mindspore::pipeline::InitHccl, "Init Hccl");
@ -208,8 +207,6 @@ PYBIND11_MODULE(_c_expression, m) {
(void)m.def("init_cluster", &mindspore::distributed::Initialize, "Init Cluster");
(void)m.def("set_cluster_exit_with_exception", &mindspore::distributed::set_cluster_exit_with_exception,
"Set this process exits with exception.");
(void)m.def("get_dyn_shape", &mindspore::pynative::GetDynShape, "Get Dynamic Shape of Tensor");
(void)m.def("call_constant_folding", &mindspore::pynative::CallConstantFolding, "Call Constant Folding Primitive");
(void)py::class_<mindspore::MpiConfig, std::shared_ptr<mindspore::MpiConfig>>(m, "MpiConfig")
.def_static("get_instance", &mindspore::MpiConfig::GetInstance, "Get mpi config instance.")

View File

@ -323,7 +323,7 @@ void InferOperation::SetNodeAbsCacheById(const std::string &id, const abstract::
node_abs_cache_[id] = abs;
}
py::object InferOperation::CallConstantFolding(const py::args &args) {
py::object InferOperation::CallConstantFolding(const py::args &args) const {
const auto &op_run_info = std::make_shared<FrontendOpRunInfo>();
PyNativeAlgo::PyParser::SetPrim(op_run_info, args[0]);
const auto &v = PyNativeAlgo::DataConvert::PyObjToValue(args[1]);

View File

@ -41,7 +41,7 @@ class InferOperation {
void ClearPrimAbsList() { prim_abs_list_.clear(); }
// Manage constant flag primitive cache.
void ClearConstFlagPrimCache() { no_const_flag_prims_.clear(); }
py::object CallConstantFolding(const py::args &args);
py::object CallConstantFolding(const py::args &args) const;
private:
// Set abstract for each input value.

View File

@ -81,25 +81,19 @@ void PyNativeExecutorTry(const std::function<void(T *ret, const Args &...)> &met
}
} // namespace
py::object RealRunOp(const py::args &args) {
const auto &executor = PyNativeExecutor::GetInstance();
MS_EXCEPTION_IF_NULL(executor);
FrontendOpRunInfoPtr op_run_info = executor->forward_executor()->GenerateOpRunInfo(args);
py::object PyNativeExecutor::RealRunOp(const py::args &args) const {
FrontendOpRunInfoPtr op_run_info = forward_executor()->GenerateOpRunInfo(args);
py::object ret = py::none();
PyNativeExecutorTry(executor->forward_executor()->RunOpS, &ret, op_run_info);
PyNativeExecutorTry(forward_executor()->RunOpS, &ret, op_run_info);
return ret;
}
py::object GetDynShape(const py::args &args) {
const auto &executor = PyNativeExecutor::GetInstance();
MS_EXCEPTION_IF_NULL(executor);
return executor->forward_executor()->dynamic_shape()->GetDynShape(args);
py::object PyNativeExecutor::GetShape(const py::args &args) const {
return forward_executor()->dynamic_shape()->GetDynShape(args);
}
py::object CallConstantFolding(const py::args &args) {
const auto &executor = PyNativeExecutor::GetInstance();
MS_EXCEPTION_IF_NULL(executor);
return executor->forward_executor()->infer_operation()->CallConstantFolding(args);
py::object PyNativeExecutor::CallConstantFolding(const py::args &args) const {
return forward_executor()->infer_operation()->CallConstantFolding(args);
}
void PyNativeExecutor::set_py_exe_path(const py::object &py_exe_path) const {
@ -231,12 +225,12 @@ void PyNativeExecutor::SetLazyBuild(bool enable) const { forward_executor()->set
bool PyNativeExecutor::IsFirstCell() const { return forward_executor()->IsFirstCell(); }
void PyNativeExecutor::SetMsFunctionCompileStatus(bool is_compiling) {
void PyNativeExecutor::SetMsFunctionCompileStatus(bool is_compiling) const {
forward_executor()->set_is_ms_function_compiling(is_compiling);
}
void RegPynativeExecutor(const py::module *m) {
(void)py::class_<PyNativeExecutor, std::shared_ptr<PyNativeExecutor>>(*m, "PynativeExecutor_")
void RegPyNativeExecutor(const py::module *m) {
(void)py::class_<PyNativeExecutor, std::shared_ptr<PyNativeExecutor>>(*m, "PyNativeExecutor_")
.def_static("get_instance", &PyNativeExecutor::GetInstance, "PyNativeExecutor get_instance.")
.def("is_first_cell", &PyNativeExecutor::IsFirstCell, "check if the first cell.")
.def("new_graph", &PyNativeExecutor::NewGraph, "pynative new a graph.")
@ -262,6 +256,9 @@ void RegPynativeExecutor(const py::module *m) {
.def("set_kernel_build_server_dir", &PyNativeExecutor::set_kernel_build_server_dir,
py::arg("kernel_build_server_dir") = py::str(""), "set kernel build server directory path.")
.def("set_ms_function_compile_status", &PyNativeExecutor::SetMsFunctionCompileStatus,
"set ms_funciton compile status.");
"set ms_funciton compile status.")
.def("real_run_op", &PyNativeExecutor::RealRunOp, "Run op pynatively.")
.def("get_shape", &PyNativeExecutor::GetShape, "Get Dynamic Shape of Tensor")
.def("constant_folding", &PyNativeExecutor::CallConstantFolding, "Call Constant Folding Primitive");
}
} // namespace mindspore::pynative

View File

@ -28,10 +28,6 @@
namespace mindspore::pynative {
namespace py = pybind11;
py::object RealRunOp(const py::args &args);
py::object GetDynShape(const py::args &args);
py::object CallConstantFolding(const py::args &args);
class PyNativeExecutor : public std::enable_shared_from_this<PyNativeExecutor> {
public:
static std::shared_ptr<PyNativeExecutor> GetInstance() {
@ -55,6 +51,9 @@ class PyNativeExecutor : public std::enable_shared_from_this<PyNativeExecutor> {
return forward_executor_;
}
py::object RealRunOp(const py::args &args) const;
py::object GetShape(const py::args &args) const;
py::object CallConstantFolding(const py::args &args) const;
bool grad_flag() const;
void set_grad_flag(bool flag) const;
void SetDynamicInput(const py::object &cell, const py::args &args) const;
@ -82,7 +81,7 @@ class PyNativeExecutor : public std::enable_shared_from_this<PyNativeExecutor> {
void Sync() const;
void SetLazyBuild(bool enable) const;
bool IsFirstCell() const;
void SetMsFunctionCompileStatus(bool is_compiling);
void SetMsFunctionCompileStatus(bool is_compiling) const;
private:
PyNativeExecutor() = default;

View File

@ -36,7 +36,7 @@ from mindspore.common.tensor import COOTensor as PythonCOOTensor
from mindspore.common.tensor import RowTensor as PythonRowTensor
from mindspore.common.initializer import initializer
from mindspore._c_expression import GraphExecutor_, Tensor, MetaTensor, CSRTensor, RowTensor, COOTensor, \
PynativeExecutor_, verify_inputs_signature, init_exec_dataset, _set_dataset_mode_config, init_pipeline, \
PyNativeExecutor_, verify_inputs_signature, init_exec_dataset, _set_dataset_mode_config, init_pipeline, \
_ms_memory_recycle
from mindspore.parallel._tensor import _load_tensor_by_layout
from mindspore.parallel._ps_context import _is_role_pserver, _is_role_sched, _enable_distributed_mindrt
@ -730,7 +730,7 @@ def _parameter_broadcast(obj, auto_parallel_mode):
_build_broadcast_graph(broadcast_params_dict, broadcast_phase)
class _PynativeExecutor:
class _PyNativeExecutor:
"""
A pynative executor used to compile/manage/run single op.
@ -748,12 +748,27 @@ class _PynativeExecutor:
"""
def __init__(self):
self._executor = PynativeExecutor_.get_instance()
self._executor = PyNativeExecutor_.get_instance()
self._executor.set_py_exe_path(sys.executable)
self._executor.set_kernel_build_server_dir(os.path.split(kernel_build_server.__file__)[0] + os.sep)
self._optimizer = None
self._top_cell = None
def __call__(self, sens_param, obj, *args, **kwargs):
"""
PyNative executor run grad graph.
Args:
obj (Function/Cell): The function or cell instance.
args (tuple): Function or cell input arguments.
kwargs (dict): keyword arguments.
Return:
The return object after running grad graph.
"""
args = args + tuple(kwargs.values())
return self._executor(sens_param, obj, args)
@staticmethod
def parameter_broadcast(obj, phase, auto_parallel_mode):
"""
@ -770,6 +785,18 @@ class _PynativeExecutor:
if BROADCAST_PHASE not in phase and _get_parameter_broadcast():
_parameter_broadcast(obj, auto_parallel_mode)
def real_run_op(self, *args):
"""
Run single op.
Args:
args (tuple): Op prim and input arguments.
Return:
Tensor, result of run op.
"""
return self._executor.real_run_op(*args)
def new_graph(self, obj, *args, **kwargs):
"""
Initialize resources for building forward and backward graph.
@ -847,7 +874,7 @@ class _PynativeExecutor:
def clear_res(self):
"""
Clean resource for _PynativeExecutor.
Clean resource for _PyNativeExecutor.
Return:
None.
@ -1011,20 +1038,29 @@ class _PynativeExecutor:
"""
return self._top_cell
def __call__(self, sens_param, obj, *args, **kwargs):
def get_shape(self, *args):
"""
PyNative executor run grad graph.
Get shape of input arguments.
Args:
obj (Function/Cell): The function or cell instance.
args (tuple): Function or cell input arguments.
kwargs (dict): keyword arguments.
args (Tensor/tuple(Tensor)): Input arguments.
Return:
The return object after running grad graph.
tuple(int), the shape of input arguments.
"""
args = args + tuple(kwargs.values())
return self._executor(sens_param, obj, args)
return self._executor.get_shape(*args)
def constant_folding(self, *args):
"""
Get value by infer value.
Args:
args (tuple): Op prim and input arguments.
Return:
Tensor, the value get by op infer.
"""
return self._executor.constant_folding(*args)
class _CellGraphExecutor:
@ -1314,6 +1350,6 @@ def ms_memory_recycle():
_cell_graph_executor = _CellGraphExecutor()
_pynative_executor = _PynativeExecutor()
_pynative_executor = _PyNativeExecutor()
__all__ = ['ms_function', 'ms_memory_recycle', 'ms_class']

View File

@ -30,7 +30,7 @@ from mindspore._checkparam import Validator as validator
from mindspore.common import dtype as mstype
from mindspore.common.parameter import Parameter
from mindspore.communication.management import GlobalComm
from mindspore._c_expression import call_constant_folding
from mindspore.common.api import _pynative_executor
# Bit operation
@ -2114,14 +2114,14 @@ class TopTypeof(Primitive):
def __init__(self):
self.prim = Primitive('TopTypeof')
self.typeof_cache = {
'slice': call_constant_folding(self.prim, slice(None, None, None)),
'list': call_constant_folding(self.prim, []),
'tuple': call_constant_folding(self.prim, ()),
'Tensor': call_constant_folding(self.prim, Tensor(np.ones([1], dtype=np.float32))),
'NoneType': call_constant_folding(self.prim, None),
'int': call_constant_folding(self.prim, 0),
'bool': call_constant_folding(self.prim, False),
'ellipsis': call_constant_folding(self.prim, ...)
'slice': _pynative_executor.constant_folding(self.prim, slice(None, None, None)),
'list': _pynative_executor.constant_folding(self.prim, []),
'tuple': _pynative_executor.constant_folding(self.prim, ()),
'Tensor': _pynative_executor.constant_folding(self.prim, Tensor(np.ones([1], dtype=np.float32))),
'NoneType': _pynative_executor.constant_folding(self.prim, None),
'int': _pynative_executor.constant_folding(self.prim, 0),
'bool': _pynative_executor.constant_folding(self.prim, False),
'ellipsis': _pynative_executor.constant_folding(self.prim, ...)
}
def __call__(self, x):
@ -2130,7 +2130,7 @@ class TopTypeof(Primitive):
index_type = 'Tensor'
if index_type in ('slice', 'list', 'tuple', 'Tensor', 'NoneType', 'int', 'bool', 'ellipsis'):
return self.typeof_cache.get(index_type)
return call_constant_folding(self.prim, x)
return _pynative_executor.constant_folding(self.prim, x)
class MixedPrecisionCast(Primitive):

View File

@ -35,10 +35,10 @@ from mindspore.common import dtype as mstype
from mindspore.common._decorator import deprecated
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor, CSRTensor, COOTensor
from mindspore.common.api import _pynative_executor
from mindspore._c_expression import Tensor as Tensor_
from mindspore._c_expression import CSRTensor as CSRTensor_
from mindspore._c_expression import COOTensor as COOTensor_
from mindspore._c_expression import get_dyn_shape
class _ScatterOp(PrimitiveWithInfer):
@ -877,7 +877,7 @@ class Shape(Primitive):
"""Initialize Shape"""
def __call__(self, x):
return get_dyn_shape(x)
return _pynative_executor.get_shape(x)
class TensorShape(Primitive):

View File

@ -22,7 +22,8 @@ from mindspore.log import _LogActionOnce
from mindspore import context, log as logger
from mindspore.parallel._utils import _is_in_auto_parallel_mode
from mindspore.common.parameter import Parameter
from mindspore._c_expression import Primitive_, real_run_op, prim_type
from mindspore.common.api import _pynative_executor
from mindspore._c_expression import Primitive_, prim_type
from mindspore._checkparam import Validator
from mindspore.ops import signature as sig
@ -749,5 +750,5 @@ def constexpr(fn=None, get_instance=True, name=None, reuse_result=True, check=Tr
@_wrap_func
def _run_op(obj, op_name, args):
"""Single op execution function supported by ge in PyNative mode."""
output = real_run_op(obj, op_name, args)
output = _pynative_executor.real_run_op(obj, op_name, args)
return output