delete useless backendpolicy

This commit is contained in:
chujinjin 2022-01-25 15:32:17 +08:00
parent 6b8908c36d
commit 818c542e56
7 changed files with 26 additions and 449 deletions

View File

@ -114,6 +114,10 @@ if(ENABLE_LOAD_ANF_IR)
add_compile_definitions(ENABLE_LOAD_ANF_IR)
endif()
if(ENABLE_TESTCASES)
add_compile_definitions(ENABLE_TEST)
endif()
if(ENABLE_TESTCASES OR (NOT ENABLE_D))
add_compile_definitions(NO_DLIB=1)
endif()

View File

@ -1,9 +1,4 @@
file(GLOB_RECURSE _PYNATIVE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "pynative_execute.cc")
if(ENABLE_D)
file(GLOB_RECURSE _GE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "pynative_execute_ge.cc")
list(APPEND _PYNATIVE_SRC_LIST ${_GE_SRC_LIST})
endif()
set_property(SOURCE ${_PYNATIVE_SRC_LIST} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_PYNATIVE)
add_library(_mindspore_pipeline_pynative_obj OBJECT ${_PYNATIVE_SRC_LIST})

View File

@ -35,18 +35,6 @@ namespace mindspore {
namespace pynative {
namespace py = pybind11;
enum PynativeStatusCode {
PYNATIVE_SUCCESS = 0,
PYNATIVE_OP_NOT_IMPLEMENTED_ERR = 1,
PYNATIVE_OP_INPUTS_ERR = 2,
PYNATIVE_OP_PARAMS_ERR = 3,
PYNATIVE_OP_ATTRS_ERR = 4,
PYNATIVE_GRAPH_MANAGER_ERR = 5,
PYNATIVE_GRAPH_GE_BUILD_ERR = 6,
PYNATIVE_GRAPH_GE_RUN_ERR = 7,
PYNATIVE_UNKNOWN_STATE = 0XFF
};
enum RunOpArgsEnum { PY_PRIM = 0, PY_NAME, PY_INPUTS, PY_ARGS_NUM };
struct OpExecInfo {

View File

@ -61,10 +61,6 @@
#include "runtime/hardware/device_context_manager.h"
#include "vm/transform.h"
#ifdef ENABLE_D
#include "pipeline/pynative/pynative_execute_ge.h"
#endif
using mindspore::tensor::TensorPy;
namespace mindspore::pynative {
@ -2003,88 +1999,53 @@ void GradExecutor::SaveForwardTensorInfoInBpropGraph(const pipeline::ResourcePtr
py::tuple ForwardExecutor::RunOpWithInitBackendPolicy(const OpExecInfoPtr &op_exec_info) {
MS_EXCEPTION_IF_NULL(op_exec_info);
auto backend_policy = InitEnv(op_exec_info);
PynativeStatusCode status = PYNATIVE_UNKNOWN_STATE;
auto backend_policy = GetBackendPolicy(op_exec_info);
// returns a null py::tuple on error
py::object result = RunOpWithBackendPolicy(backend_policy, op_exec_info, &status);
if (status != PYNATIVE_SUCCESS) {
MS_LOG(EXCEPTION) << "Failed to run " << op_exec_info->op_name;
}
py::object result = RunOpWithBackendPolicy(backend_policy, op_exec_info);
MS_LOG(DEBUG) << "RunOp end";
return result;
}
MsBackendPolicy ForwardExecutor::InitEnv(const OpExecInfoPtr &op_exec_info) {
MsBackendPolicy ForwardExecutor::GetBackendPolicy(const OpExecInfoPtr &op_exec_info) {
MS_EXCEPTION_IF_NULL(op_exec_info);
MS_LOG(DEBUG) << "RunOp start, op name is: " << op_exec_info->op_name;
parse::python_adapter::set_python_env_flag(true);
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
MsBackendPolicy backend_policy = kMsBackendMsPrior;
MsBackendPolicy backend_policy = kMsBackendVmOnly;
#ifdef ENABLE_D
if (ms_context->backend_policy() == "ge") {
context::PynativeInitGe(ms_context);
backend_policy = kMsBackendGeOnly;
MS_LOG(EXCEPTION) << "In PyNative mode, not support ge backend!";
}
#else
if (!context::IsTsdOpened(ms_context)) {
if (!context::OpenTsd(ms_context)) {
MS_LOG(EXCEPTION) << "Open tsd failed";
}
}
if (ms_context->backend_policy() == "ms") {
backend_policy = kMsBackendMsPrior;
} else {
backend_policy = kMsBackendVmOnly;
}
#endif
if (kVmOperators.find(op_exec_info->op_name) != kVmOperators.end()) {
backend_policy = kMsBackendVmOnly;
}
return backend_policy;
}
py::object ForwardExecutor::RunOpWithBackendPolicy(MsBackendPolicy backend_policy, const OpExecInfoPtr &op_exec_info,
PynativeStatusCode *status) {
MS_EXCEPTION_IF_NULL(status);
py::object ForwardExecutor::RunOpWithBackendPolicy(MsBackendPolicy backend_policy, const OpExecInfoPtr &op_exec_info) {
py::object result;
switch (backend_policy) {
case kMsBackendVmOnly: {
// use vm only
MS_LOG(DEBUG) << "RunOp use VM only backend";
result = RunOpInVM(op_exec_info, status);
break;
if (backend_policy == kMsBackendVmOnly) {
#ifndef ENABLE_TEST
if (kVmOperators.find(op_exec_info->op_name) != kVmOperators.end()) {
result = RunOpInVM(op_exec_info);
} else {
result = RunOpInMs(op_exec_info);
}
case kMsBackendGePrior: {
#ifdef ENABLE_D
// use GE first, use vm when GE fails
MS_LOG(DEBUG) << "RunOp use GE first backend";
result = RunOpInGE(op_exec_info, status);
if (*status != PYNATIVE_SUCCESS) {
result = RunOpInVM(op_exec_info, status);
}
#else
result = RunOpInVM(op_exec_info);
#endif
break;
}
case kMsBackendMsPrior: {
// use Ms first,use others when ms failed
MS_LOG(DEBUG) << "RunOp use Ms first backend";
result = RunOpInMs(op_exec_info, status);
if (*status != PYNATIVE_SUCCESS) {
MS_LOG(ERROR) << "RunOp use Ms backend failed!!!";
}
break;
}
default:
MS_LOG(ERROR) << "No backend configured for run op";
}
return result;
}
py::object ForwardExecutor::RunOpInVM(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *status) {
py::object ForwardExecutor::RunOpInVM(const OpExecInfoPtr &op_exec_info) {
MS_LOG(DEBUG) << "RunOpInVM start";
MS_EXCEPTION_IF_NULL(status);
MS_EXCEPTION_IF_NULL(op_exec_info);
MS_EXCEPTION_IF_NULL(op_exec_info->py_primitive);
@ -2107,7 +2068,6 @@ py::object ForwardExecutor::RunOpInVM(const OpExecInfoPtr &op_exec_info, Pynativ
result[i] = new_tensor;
}
}
*status = PYNATIVE_SUCCESS;
MS_LOG(DEBUG) << "RunOpInVM end";
return std::move(result);
}
@ -2117,12 +2077,10 @@ py::object ForwardExecutor::RunOpInVM(const OpExecInfoPtr &op_exec_info, Pynativ
auto result = primitive->RunPyComputeFunction(op_inputs);
MS_LOG(DEBUG) << "RunOpInVM end";
if (py::isinstance<py::none>(result)) {
MS_LOG(ERROR) << "VM got the result none, please check whether it is failed to get func";
*status = PYNATIVE_OP_NOT_IMPLEMENTED_ERR;
MS_LOG(EXCEPTION) << "VM op " << op_exec_info->op_name << " run failed!";
py::tuple err_ret(0);
return std::move(err_ret);
}
*status = PYNATIVE_SUCCESS;
if (py::isinstance<py::tuple>(result)) {
return result;
}
@ -2138,9 +2096,8 @@ void ForwardExecutor::CheckIfNeedSyncForHeterogeneous(const std::string &cur_tar
last_target_ = cur_target;
}
py::object ForwardExecutor::RunOpInMs(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *status) {
py::object ForwardExecutor::RunOpInMs(const OpExecInfoPtr &op_exec_info) {
MS_EXCEPTION_IF_NULL(op_exec_info);
MS_EXCEPTION_IF_NULL(status);
compile::SetMindRTEnable();
MS_LOG(DEBUG) << "Start run op [" << op_exec_info->op_name << "] with backend policy ms";
auto ms_context = MsContext::GetInstance();
@ -2207,7 +2164,6 @@ py::object ForwardExecutor::RunOpInMs(const OpExecInfoPtr &op_exec_info, Pynativ
}
auto result = BaseRefToPyData(outputs);
ms_context->set_param<bool>(MS_CTX_ENABLE_PYNATIVE_INFER, false);
*status = PYNATIVE_SUCCESS;
MS_LOG(DEBUG) << "End run op [" << op_exec_info->op_name << "] with backend policy ms";
return result;
}

View File

@ -339,13 +339,12 @@ class ForwardExecutor {
private:
GradExecutorPtr grad() const;
MsBackendPolicy InitEnv(const OpExecInfoPtr &op_exec_info);
MsBackendPolicy GetBackendPolicy(const OpExecInfoPtr &op_exec_info);
py::tuple RunOpWithInitBackendPolicy(const OpExecInfoPtr &op_exec_info);
void RunMixedPrecisionCastOp(const OpExecInfoPtr &op_exec_info, py::object *ret);
py::object RunOpInVM(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *status);
py::object RunOpInMs(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *status);
py::object RunOpWithBackendPolicy(MsBackendPolicy backend_policy, const OpExecInfoPtr &op_exec_info,
PynativeStatusCode *status);
py::object RunOpInVM(const OpExecInfoPtr &op_exec_info);
py::object RunOpInMs(const OpExecInfoPtr &op_exec_info);
py::object RunOpWithBackendPolicy(MsBackendPolicy backend_policy, const OpExecInfoPtr &op_exec_info);
void SetNonCostantValueAbs(const AbstractBasePtr &abs, size_t i, const std::string &id);
void GetInputsArgsSpec(const OpExecInfoPtr &op_exec_info, abstract::AbstractBasePtrList *args_spec_list);
void GetOpOutputAbstract(const OpExecInfoPtr &op_exec_info, const abstract::AbstractBasePtrList &args_spec_list,

View File

@ -1,319 +0,0 @@
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pipeline/pynative/pynative_execute_ge.h"
#include <typeinfo>
#include <map>
#include <set>
#include "utils/hash_set.h"
#include "utils/any.h"
#include "utils/utils.h"
#include "utils/ms_context.h"
#include "frontend/operator/ops.h"
#include "pipeline/jit/parse/data_converter.h"
#include "pipeline/jit/static_analysis/prim.h"
#include "backend/session/session_factory.h"
#include "pybind_api/ir/tensor_py.h"
#include "transform/graph_ir/op_declare/array_ops_declare.h"
using mindspore::tensor::TensorPy;
namespace mindspore {
namespace pynative {
const char SINGLE_OP_GRAPH[] = "single_op_graph";
using MeTensor = mindspore::tensor::Tensor;
using MeTensorPtr = mindspore::tensor::TensorPtr;
using GeOperator = ge::Operator;
using GeOperatorPtr = std::shared_ptr<GeOperator>;
using transform::GraphRunner;
using transform::GraphRunnerOptions;
using transform::OperatorPtr;
static std::shared_ptr<session::SessionBasic> session = nullptr;
inline ValuePtr PyAttrValue(const py::object &obj) {
ValuePtr converted_ret = nullptr;
bool converted = parse::ConvertData(obj, &converted_ret);
if (!converted) {
MS_LOG(EXCEPTION) << "Attribute convert error with type:" << std::string(py::str(obj));
}
return converted_ret;
}
MeTensorPtr ConvertPyObjToTensor(const py::object &obj) {
MeTensorPtr me_tensor_ptr = nullptr;
if (py::isinstance<MeTensor>(obj)) {
me_tensor_ptr = py::cast<MeTensorPtr>(obj);
} else if (py::isinstance<py::tuple>(obj)) {
me_tensor_ptr = TensorPy::MakeTensor(py::array(py::cast<py::tuple>(obj)), nullptr);
} else if (py::isinstance<py::float_>(obj)) {
me_tensor_ptr = TensorPy::MakeTensor(py::array(py::cast<py::float_>(obj)), nullptr);
} else if (py::isinstance<py::int_>(obj)) {
me_tensor_ptr = TensorPy::MakeTensor(py::array(py::cast<py::int_>(obj)), nullptr);
} else if (py::isinstance<py::list>(obj)) {
me_tensor_ptr = TensorPy::MakeTensor(py::array(py::cast<py::list>(obj)), nullptr);
} else if (py::isinstance<py::array>(obj)) {
me_tensor_ptr = TensorPy::MakeTensor(py::cast<py::array>(obj), nullptr);
} else {
MS_LOG(EXCEPTION) << "Run op inputs type is invalid!";
}
return me_tensor_ptr;
}
bool SetInputsForSingleOpGraph(const OpExecInfoPtr &op_exec_info, const std::vector<GeTensorPtr> &inputs,
const OperatorPtr &op, std::vector<GeOperator> *graph_input_nodes) {
MS_EXCEPTION_IF_NULL(op_exec_info);
MS_EXCEPTION_IF_NULL(graph_input_nodes);
auto op_inputs = op_exec_info->op_inputs;
std::string op_name = op_exec_info->op_name;
transform::OpAdapterPtr adapter = transform::DfGraphConvertor::FindAdapter(op_name, true);
if (adapter == nullptr) {
return false;
}
int64_t op_input_idx = 1;
size_t size = inputs.size();
for (size_t i = 0; i < size; i++) {
if (inputs[i] == nullptr) {
continue;
}
auto const_op = std::make_shared<transform::Constant>();
MS_EXCEPTION_IF_NULL(const_op);
(void)const_op->set_attr_value(*inputs[i]);
MeTensorPtr me_tensor_ptr = ConvertPyObjToTensor(op_inputs[i]);
MS_EXCEPTION_IF_NULL(me_tensor_ptr);
auto const_op_desc =
transform::TransformUtil::GetGeTensorDesc(me_tensor_ptr->shape_c(), me_tensor_ptr->data_type(), kOpFormat_NCHW);
if (const_op_desc == nullptr) {
MS_LOG(ERROR) << "Create variable " << op_name << " output descriptor failed!";
return false;
}
auto pointer_cast_const_op = std::static_pointer_cast<transform::Constant>(const_op);
MS_EXCEPTION_IF_NULL(pointer_cast_const_op);
(void)pointer_cast_const_op->update_output_desc_y(*const_op_desc);
auto &input_map = adapter->getInputMap();
if (input_map.find(op_input_idx) == input_map.end()) {
continue;
}
if (adapter->setInput(op, op_input_idx++, const_op)) {
MS_LOG(ERROR) << "Failed to set params, index is " << op_input_idx;
return false;
}
graph_input_nodes->push_back(*const_op);
}
return true;
}
bool BuildSingleOpGraph(const OpExecInfoPtr &op_exec_info, const std::vector<GeTensorPtr> &inputs,
const mindspore::HashMap<std::string, ValuePtr> &attrs, const GeGraphPtr &graph) {
MS_EXCEPTION_IF_NULL(op_exec_info);
std::string op_name = op_exec_info->op_name;
auto op_inputs = op_exec_info->op_inputs;
transform::OpAdapterPtr adapter = transform::DfGraphConvertor::FindAdapter(op_name, true);
if (adapter == nullptr) {
MS_LOG(ERROR) << "Unable to find Adapter for " << ((std::string)py::str(op_name));
return false;
}
OperatorPtr op = adapter->generate(op_name);
MS_EXCEPTION_IF_NULL(op);
std::vector<GeOperator> graph_input_nodes;
// hold param nodes after setting input and output for the graph
// set input
if (!SetInputsForSingleOpGraph(op_exec_info, inputs, op, &graph_input_nodes)) {
return false;
}
// set attributes
for (auto attr : attrs) {
(void)adapter->setAttr(op, attr.first, attr.second);
}
// set default attributes
auto extra_attrs = adapter->GetExtraAttr();
for (auto attr : extra_attrs) {
(void)adapter->setAttr(op, attr.first, attr.second);
}
// set input attributes
auto &input_attr_map = adapter->getInputAttrMap();
for (auto &it : input_attr_map) {
if (op_inputs.size() < it.first) {
continue;
}
auto const_value = PyAttrValue(op_inputs[it.first - 1]);
if (const_value->isa<None>()) {
continue;
}
it.second.set_attr(op, const_value);
}
// construct output data nodes
std::vector<GeOperator> graph_outputs{*op};
// set input and output nodes for the graph
MS_EXCEPTION_IF_NULL(graph);
(void)graph->SetInputs(graph_input_nodes).SetOutputs(graph_outputs);
MS_LOG(INFO) << "BuildSingleOpGraph done";
return true;
}
void ToTensorPtr(const OpExecInfoPtr op_exec_info, std::vector<GeTensorPtr> *const inputs) {
MS_EXCEPTION_IF_NULL(inputs);
MS_EXCEPTION_IF_NULL(op_exec_info);
auto op_inputs = op_exec_info->op_inputs;
size_t size = op_inputs.size();
for (size_t i = 0; i < size; i++) {
if (py::isinstance<py::none>(op_inputs[i])) {
inputs->emplace_back(nullptr);
continue;
}
MeTensorPtr me_tensor_ptr = ConvertPyObjToTensor(op_inputs[i]);
auto ge_tensor_ptr = transform::TransformUtil::ConvertTensor(me_tensor_ptr, kOpFormat_NCHW);
if (ge_tensor_ptr == nullptr) {
MS_LOG(EXCEPTION) << "Convert inputs to GE tensor failed in op " << op_exec_info->op_name << ".";
}
// set inputs for operator to build single node graph
inputs->push_back(ge_tensor_ptr);
}
}
PynativeStatusCode ConvertAttributes(const OpExecInfoPtr &op_exec_info, const std::vector<GeTensorPtr> &inputs) {
MS_EXCEPTION_IF_NULL(op_exec_info);
auto op_attrs = op_exec_info->op_attrs;
mindspore::HashMap<std::string, ValuePtr> attrs{};
for (auto &item : op_attrs) {
if (!py::isinstance<py::str>(item.first)) {
MS_LOG(ERROR) << "Type error in py dict convert";
return PYNATIVE_OP_ATTRS_ERR;
}
std::string name = py::cast<std::string>(item.first);
auto attr_value = PyAttrValue(py::cast<py::object>(item.second));
(void)attrs.emplace(name, attr_value);
}
// build graph
GeGraphPtr graph = std::make_shared<GeGraph>(op_exec_info->op_name);
if (BuildSingleOpGraph(op_exec_info, inputs, attrs, graph) == false) {
MS_LOG(ERROR) << "Failed to BuildSingleOpGraph";
return PYNATIVE_GRAPH_GE_BUILD_ERR;
}
// add the single op graph into the graph manager, which will be iterated by session.
transform::Status ret =
transform::DfGraphManager::GetInstance().AddGraph(SINGLE_OP_GRAPH, std::shared_ptr<transform::DfGraph>(graph));
if (ret != transform::SUCCESS) {
MS_LOG(ERROR) << "Failed to AddGraph into graph manager";
return PYNATIVE_GRAPH_MANAGER_ERR;
}
return PYNATIVE_SUCCESS;
}
std::vector<MeTensorPtr> ConvertOutputTensors(const OpExecInfoPtr &op_exec_info,
const std::vector<GeTensorPtr> &ge_tensors) {
std::vector<MeTensorPtr> outputs;
MS_EXCEPTION_IF_NULL(op_exec_info);
AbstractBasePtr abs_base = op_exec_info->abstract;
std::vector<std::vector<int64_t>> shapes;
if (abs_base != nullptr && abs_base->isa<abstract::AbstractTensor>()) {
auto arg_tensor = dyn_cast<abstract::AbstractTensor>(abs_base);
MS_EXCEPTION_IF_NULL(arg_tensor);
auto shape = arg_tensor->shape();
MS_EXCEPTION_IF_NULL(shape);
shapes.emplace_back(shape->shape());
outputs = transform::TransformUtil::ConvertGeTensors(ge_tensors, shapes);
return outputs;
}
if (abs_base != nullptr && abs_base->isa<abstract::AbstractTuple>()) {
auto arg_tuple = dyn_cast<abstract::AbstractTuple>(abs_base);
MS_EXCEPTION_IF_NULL(arg_tuple);
size_t len = arg_tuple->size();
for (size_t i = 0; i < len; i++) {
if (arg_tuple->elements()[i]->isa<abstract::AbstractTensor>()) {
auto tensor = dyn_cast<abstract::AbstractTensor>(arg_tuple->elements()[i]);
MS_EXCEPTION_IF_NULL(tensor);
auto shape = tensor->shape();
MS_EXCEPTION_IF_NULL(shape);
shapes.emplace_back(shape->shape());
}
}
outputs = transform::TransformUtil::ConvertGeTensors(ge_tensors, shapes);
return outputs;
}
for (auto &it : ge_tensors) {
auto tensor = transform::TransformUtil::ConvertGeTensor(it);
if (tensor != nullptr) {
outputs.emplace_back(tensor);
}
}
return outputs;
}
py::object RunOpInGE(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *status) {
MS_LOG(INFO) << "RunOpInGe start";
MS_EXCEPTION_IF_NULL(op_exec_info);
MS_EXCEPTION_IF_NULL(status);
// returns a null py::tuple on error
py::tuple err_ret(0);
auto op_name = op_exec_info->op_name;
transform::OpAdapterPtr adapter = transform::DfGraphConvertor::FindAdapter(op_name, true);
if (adapter == nullptr) {
MS_LOG(ERROR) << "Unable to find GE Adapter for " << ((std::string)py::str(op_name));
*status = PYNATIVE_OP_NOT_IMPLEMENTED_ERR;
return std::move(err_ret);
}
std::vector<GeTensorPtr> inputs{};
ToTensorPtr(op_exec_info, &inputs);
// convert me attr to ge AttrValue
PynativeStatusCode ret = ConvertAttributes(op_exec_info, inputs);
if (ret != PYNATIVE_SUCCESS) {
*status = ret;
return std::move(err_ret);
}
// run graph
transform::RunOptions run_options;
run_options.name = SINGLE_OP_GRAPH;
std::vector<GeTensorPtr> ge_inputs;
std::vector<GeTensorPtr> ge_outputs;
transform::GraphRunnerOptions graph_runner_options;
graph_runner_options.options["ge.trainFlag"] = "1";
auto graph_runner = std::make_shared<transform::GraphRunner>(graph_runner_options);
transform::Status run_ret;
{
// Release GIL before calling into (potentially long-running) C++ code
py::gil_scoped_release release;
run_ret = graph_runner->RunGraph(run_options, ge_inputs, &ge_outputs);
}
if (run_ret != transform::Status::SUCCESS) {
MS_LOG(ERROR) << "GraphRunner fails to run graph";
*status = PYNATIVE_GRAPH_GE_RUN_ERR;
return std::move(err_ret);
}
std::vector<MeTensorPtr> graph_outputs = ConvertOutputTensors(op_exec_info, ge_outputs);
size_t output_size = graph_outputs.size();
py::tuple result(output_size);
for (size_t i = 0; i < output_size; i++) {
MS_EXCEPTION_IF_NULL(graph_outputs[i]);
result[i] = *graph_outputs[i];
}
*status = PYNATIVE_SUCCESS;
MS_LOG(INFO) << "RunOpInGe end";
return std::move(result);
}
} // namespace pynative
} // namespace mindspore

View File

@ -1,46 +0,0 @@
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PIPELINE_PYNATIVE_PYNATIVE_EXECUTE_GE_H_
#define MINDSPORE_CCSRC_PIPELINE_PYNATIVE_PYNATIVE_EXECUTE_GE_H_
#include <vector>
#include <utility>
#include <string>
#include <memory>
#include "utils/hash_map.h"
#include "pipeline/pynative/base.h"
#include "transform/graph_ir/convert.h"
#include "transform/graph_ir/graph_runner.h"
#include "transform/graph_ir/types.h"
#include "utils/ms_context.h"
using GeTensor = ge::Tensor;
using GeTensorPtr = std::shared_ptr<GeTensor>;
using GeGraph = ge::Graph;
using GeGraphPtr = std::shared_ptr<GeGraph>;
namespace mindspore {
namespace pynative {
bool BuildSingleOpGraph(const OpExecInfoPtr &op_exec_info, const std::vector<GeTensorPtr> &inputs,
const mindspore::HashMap<std::string, ValuePtr> &attrs, const GeGraphPtr &graph);
py::object RunOpInGE(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *status);
} // namespace pynative
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PIPELINE_PYNATIVE_PYNATIVE_EXECUTE_GE_H_