forked from OSSInnovation/mindspore
fix yolov3 multi getitem bug and tensor ptr not unique bug
This commit is contained in:
parent
2d50a43be9
commit
b79523a994
|
@ -29,6 +29,7 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace tensor {
|
||||
static uint64_t count = 0;
|
||||
void DataBuf2Contiguous(const py::array &src, py::array *const dest) {
|
||||
if (dest == nullptr) {
|
||||
MS_LOG(EXCEPTION) << "Failed to copy data to a contiguous buffer as dest is nullptr!";
|
||||
|
@ -208,7 +209,7 @@ void Tensor::init(const py::array &input, const TypeId &data_type) {
|
|||
data_ = input;
|
||||
}
|
||||
dirty_ = true;
|
||||
id_ = std::to_string((uintptr_t)(this));
|
||||
id_ = std::to_string((uintptr_t)(this)) + std::to_string(count++);
|
||||
}
|
||||
|
||||
void Tensor::init(TypeId data_type, const std::vector<int> &shape, py::array *const data) {
|
||||
|
@ -255,7 +256,7 @@ void Tensor::init(TypeId data_type, const std::vector<int> &shape, py::array *co
|
|||
MS_LOG(EXCEPTION) << "Cannot construct Tensor because of unsupported data type: " << data_type << ".";
|
||||
break;
|
||||
}
|
||||
id_ = std::to_string((uintptr_t)(this));
|
||||
id_ = std::to_string((uintptr_t)(this)) + std::to_string(count++);
|
||||
}
|
||||
|
||||
TypePtr Tensor::SetDtype(const TypePtr type_ptr) {
|
||||
|
|
|
@ -57,7 +57,7 @@ struct OpExecInfo {
|
|||
py::dict op_attrs;
|
||||
};
|
||||
using OpExecInfoPtr = std::shared_ptr<OpExecInfo>;
|
||||
OpExecInfoPtr GenerateOpExecInfo(const py::args &args);
|
||||
OpExecInfoPtr GenerateOpExecInfo(const py::args &args, py::list *const out_args);
|
||||
|
||||
const std::set<std::string> ignore_infer_prim = {"make_ref"};
|
||||
} // namespace pynative
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
|
||||
const char SINGLE_OP_GRAPH[] = "single_op_graph";
|
||||
// primitive unable to infer value for constant input in PyNative mode
|
||||
const std::set<std::string> vm_operators = {"make_ref", "HookBackward"};
|
||||
const std::set<std::string> vm_operators = {"make_ref", "HookBackward", "stop_gradient"};
|
||||
|
||||
namespace mindspore {
|
||||
namespace pynative {
|
||||
|
@ -79,15 +79,12 @@ std::string GetId(const py::object &obj) {
|
|||
if (p_list.size() == 0) {
|
||||
return "empty";
|
||||
}
|
||||
to_process = p_list[0];
|
||||
prefix = "tuple:";
|
||||
if (!py::isinstance<tensor::Tensor>(to_process)) {
|
||||
std::string key = "";
|
||||
for (size_t i = 0; i < p_list.size(); ++i) {
|
||||
key += std::string(py::str(p_list[i])) + ":";
|
||||
}
|
||||
return prefix + key;
|
||||
std::string key = "";
|
||||
for (size_t i = 0; i < p_list.size(); ++i) {
|
||||
key += std::string(py::str(GetId(p_list[i]))) + ":";
|
||||
}
|
||||
return prefix + key;
|
||||
}
|
||||
if (py::isinstance<py::int_>(to_process)) {
|
||||
return prefix + std::string(py::str(to_process));
|
||||
|
@ -143,7 +140,8 @@ std::map<SignatureEnumDType, size_t> GetDstType(const py::tuple &py_args,
|
|||
return dst_type;
|
||||
}
|
||||
|
||||
py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &args, py::tuple *const out_args) {
|
||||
py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &args, py::tuple *const out_args,
|
||||
py::list *out_args_list) {
|
||||
auto &py_args = *out_args;
|
||||
py::tuple input_mask(args.size());
|
||||
for (size_t i = 0; i < args.size(); ++i) {
|
||||
|
@ -171,8 +169,10 @@ py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &args, py::tu
|
|||
auto tensor_ptr = py::cast<tensor::TensorPtr>(py_args[it->second]);
|
||||
if (py::isinstance<py::int_>(py_args[i])) {
|
||||
py_args[i] = std::make_shared<tensor::Tensor>(py::cast<py::int_>(py_args[i]), tensor_ptr->Dtype());
|
||||
(*out_args_list)[i] = py_args[i];
|
||||
} else {
|
||||
py_args[i] = std::make_shared<tensor::Tensor>(py::cast<py::float_>(py_args[i]), tensor_ptr->Dtype());
|
||||
(*out_args_list)[i] = py_args[i];
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ void PynativeInfer(const PrimitivePyPtr &prim, const py::list &py_args, OpExecIn
|
|||
op_exec_info->abstract = infer_res;
|
||||
}
|
||||
|
||||
OpExecInfoPtr GenerateOpExecInfo(const py::args &args) {
|
||||
OpExecInfoPtr GenerateOpExecInfo(const py::args &args, py::list *const out_args) {
|
||||
if (args.size() != PY_ARGS_NUM) {
|
||||
MS_LOG(ERROR) << "Three args are needed by RunOp";
|
||||
return nullptr;
|
||||
|
@ -213,7 +213,7 @@ OpExecInfoPtr GenerateOpExecInfo(const py::args &args) {
|
|||
size_t input_num = a.size();
|
||||
op_exec_info->op_inputs = py::tuple(input_num);
|
||||
|
||||
op_exec_info->inputs_mask = ConvertInputs(prim, args[PY_INPUTS], &op_exec_info->op_inputs);
|
||||
op_exec_info->inputs_mask = ConvertInputs(prim, args[PY_INPUTS], &op_exec_info->op_inputs, out_args);
|
||||
// use python infer method
|
||||
if (ignore_infer_prim.find(op_exec_info->op_name) == ignore_infer_prim.end()) {
|
||||
PynativeInfer(prim, op_exec_info->op_inputs, op_exec_info.get());
|
||||
|
@ -513,16 +513,15 @@ AnfNodePtr PynativeExecutor::MakeCNode(const OpExecInfoPtr &op_exec_info, const
|
|||
auto prim = op_exec_info->py_primitive;
|
||||
inputs.push_back(NewValueNode(prim));
|
||||
py::tuple op_masks = op_exec_info->inputs_mask;
|
||||
py::list op_args = args[PY_INPUTS];
|
||||
AbstractBasePtrList args_spec_list;
|
||||
for (size_t i = 0; i < op_args.size(); i++) {
|
||||
auto node = GetInput(op_args[i], op_masks[i]);
|
||||
for (size_t i = 0; i < args.size(); i++) {
|
||||
auto node = GetInput(args[i], op_masks[i]);
|
||||
args_spec_list.push_back(node->abstract());
|
||||
inputs.push_back(node);
|
||||
}
|
||||
|
||||
auto cnode = curr_g_->NewCNode(inputs);
|
||||
MS_LOG(DEBUG) << "MakeCnode set node " << cnode->DebugString();
|
||||
MS_LOG(DEBUG) << "MakeCnode set node " << cnode->DebugString(4);
|
||||
py::object out_real = out;
|
||||
if (out.size() == 1) {
|
||||
MS_LOG(DEBUG) << "MakeCnode out size is one.";
|
||||
|
@ -534,10 +533,12 @@ AnfNodePtr PynativeExecutor::MakeCNode(const OpExecInfoPtr &op_exec_info, const
|
|||
if (value.size() > 1) {
|
||||
for (int i = 0; i < static_cast<int>(value.size()); i++) {
|
||||
auto value_id = GetId(value[i]);
|
||||
MS_LOG(DEBUG) << "MakeCnode set node id " << value_id;
|
||||
set_obj_node_map(curr_g_, value_id, cnode, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
MS_LOG(DEBUG) << "MakeCnode set node id " << obj_id;
|
||||
set_obj_node_map(curr_g_, obj_id, cnode);
|
||||
set_pyobj(curr_g_, obj_id);
|
||||
return cnode;
|
||||
|
@ -545,12 +546,17 @@ AnfNodePtr PynativeExecutor::MakeCNode(const OpExecInfoPtr &op_exec_info, const
|
|||
|
||||
AnfNodePtr PynativeExecutor::GetObjNode(const py::object &obj) {
|
||||
auto &out = graph_info_map_[curr_g_].obj_node_map[GetId(obj)];
|
||||
if (out.second == -1) {
|
||||
if (out.second.size() == 1 && out.second[0] == -1) {
|
||||
return out.first;
|
||||
}
|
||||
std::vector<AnfNodePtr> tuple_get_item_inputs{NewValueNode(prim::kPrimTupleGetItem), out.first,
|
||||
NewValueNode(out.second)};
|
||||
return curr_g_->NewCNode(tuple_get_item_inputs);
|
||||
auto node = out.first;
|
||||
MS_LOG(DEBUG) << "output size " << out.second.size() << node->DebugString();
|
||||
for (auto &idx : out.second) {
|
||||
std::vector<AnfNodePtr> tuple_get_item_inputs{NewValueNode(prim::kPrimTupleGetItem), node, NewValueNode(idx)};
|
||||
node = curr_g_->NewCNode(tuple_get_item_inputs);
|
||||
}
|
||||
MS_LOG(DEBUG) << "GetObjNode output" << node->DebugString(6);
|
||||
return node;
|
||||
}
|
||||
|
||||
py::tuple RunOp(const OpExecInfoPtr &op_exec_info, const py::args &args) {
|
||||
|
@ -594,8 +600,11 @@ py::tuple RunOp(const OpExecInfoPtr &op_exec_info, const py::args &args) {
|
|||
|
||||
py::tuple RunOp(const py::args &args) {
|
||||
MS_LOG(DEBUG) << "RunOp start" << args.size();
|
||||
OpExecInfoPtr op_exec_info = GenerateOpExecInfo(args);
|
||||
py::list args_input = args[PY_INPUTS];
|
||||
|
||||
OpExecInfoPtr op_exec_info = GenerateOpExecInfo(args, &args_input);
|
||||
MS_EXCEPTION_IF_NULL(op_exec_info);
|
||||
|
||||
if (op_exec_info->abstract != nullptr) {
|
||||
py::dict output = abstract::ConvertAbstractToPython(op_exec_info->abstract);
|
||||
if (!output["value"].is_none()) {
|
||||
|
@ -609,7 +618,7 @@ py::tuple RunOp(const py::args &args) {
|
|||
return value_ret;
|
||||
}
|
||||
}
|
||||
return RunOp(op_exec_info, args);
|
||||
return RunOp(op_exec_info, args_input);
|
||||
}
|
||||
|
||||
void ClearPyNativeSession() { session = nullptr; }
|
||||
|
@ -646,6 +655,14 @@ void PynativeExecutor::NewGraph(const py::object &cell, const py::args &args) {
|
|||
}
|
||||
}
|
||||
|
||||
AnfNodePtr PynativeExecutor::MakeValueNode(const py::object &obj, const std::string &obj_id) {
|
||||
ValuePtr converted_ret = nullptr;
|
||||
parse::ConvertData(obj, &converted_ret);
|
||||
auto node = NewValueNode(converted_ret);
|
||||
set_obj_node_map(curr_g_, obj_id, node);
|
||||
return node;
|
||||
}
|
||||
|
||||
AnfNodePtr PynativeExecutor::GetInput(const py::object &obj, const py::object &op_mask) {
|
||||
AnfNodePtr node = nullptr;
|
||||
std::string obj_id = GetId(obj);
|
||||
|
@ -683,10 +700,16 @@ AnfNodePtr PynativeExecutor::GetInput(const py::object &obj, const py::object &o
|
|||
} else if (py::isinstance<py::tuple>(obj)) {
|
||||
// out = op((x, y))
|
||||
// out = cell((x, y))
|
||||
auto tuple = obj.cast<py::tuple>();
|
||||
|
||||
// cell((1,2)): support not mix (scalar, tensor)
|
||||
if (tuple.size() > 0 && !py::isinstance<tensor::Tensor>(tuple[0])) {
|
||||
return MakeValueNode(obj, obj_id);
|
||||
}
|
||||
|
||||
std::vector<AnfNodePtr> args;
|
||||
args.push_back(NewValueNode(prim::kPrimMakeTuple));
|
||||
|
||||
auto tuple = obj.cast<py::tuple>();
|
||||
auto tuple_size = static_cast<int>(tuple.size());
|
||||
for (int i = 0; i < tuple_size; i++) {
|
||||
args.push_back(GetInput(tuple[i], py::object()));
|
||||
|
@ -695,17 +718,26 @@ AnfNodePtr PynativeExecutor::GetInput(const py::object &obj, const py::object &o
|
|||
set_obj_node_map(curr_g_, GetId(obj), cnode);
|
||||
node = cnode;
|
||||
} else {
|
||||
// out = op(x, 1)
|
||||
ValuePtr converted_ret = nullptr;
|
||||
parse::ConvertData(obj, &converted_ret);
|
||||
node = NewValueNode(converted_ret);
|
||||
set_obj_node_map(curr_g_, obj_id, node);
|
||||
node = MakeValueNode(obj, obj_id);
|
||||
}
|
||||
|
||||
MS_LOG(DEBUG) << "Now getinput " << py::str(obj) << " node " << node->ToString();
|
||||
MS_LOG(DEBUG) << "Now getinput node " << node->ToString() << obj_id;
|
||||
return node;
|
||||
}
|
||||
|
||||
// for output[0][1] need getitem multi
|
||||
void PynativeExecutor::SetTupleOutput(const py::object &obj, const AnfNodePtr &cnode, std::vector<int> idx) {
|
||||
if (py::isinstance<py::tuple>(obj)) {
|
||||
auto tuple = obj.cast<py::tuple>();
|
||||
for (int i = 0; i < static_cast<int>(tuple.size()); i++) {
|
||||
std::vector<int> tmp = idx;
|
||||
tmp.push_back(i);
|
||||
set_obj_node_map(curr_g_, GetId(tuple[i]), cnode, tmp);
|
||||
SetTupleOutput(tuple[i], cnode, tmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PynativeExecutor::Pushp() { graph_p_.push(curr_g_); }
|
||||
|
||||
void PynativeExecutor::Popp() {
|
||||
|
@ -737,6 +769,7 @@ void PynativeExecutor::EndGraph(const py::object &cell, const py::object &out, c
|
|||
for (int i = 0; i < tuple_size; i++) {
|
||||
args.push_back(GetInput(tuple[i], py::object()));
|
||||
set_obj_node_map(curr_g_, GetId(tuple[i]), cnode, i);
|
||||
SetTupleOutput(tuple[i], cnode, std::vector<int>{i});
|
||||
}
|
||||
cnode->set_inputs(args);
|
||||
set_obj_node_map(curr_g_, out_id, cnode);
|
||||
|
@ -784,6 +817,7 @@ void PynativeExecutor::EndGraphByOutId(const std::string &out_id, const py::obje
|
|||
auto out_size = static_cast<int>(out_list.size());
|
||||
for (int i = 0; i < out_size; i++) {
|
||||
set_obj_node_map(curr_g_, GetId(out_list[i]), out_cnode, i);
|
||||
SetTupleOutput(out_list[i], out_cnode, std::vector<int>{i});
|
||||
}
|
||||
}
|
||||
set_obj_node_map(curr_g_, GetId(out), out_cnode);
|
||||
|
@ -878,6 +912,7 @@ void PynativeExecutor::GradNet(const GradOperationPtr &grad, const py::object &c
|
|||
MS_EXCEPTION_IF_NULL(resource_->func_graph());
|
||||
auto g = GradGraph(resource_->func_graph(), grad, w_args, size);
|
||||
resource_->set_func_graph(g);
|
||||
resource_->manager()->KeepRoots({g});
|
||||
|
||||
// get the parameters items and add the value to args_spec
|
||||
abstract::AbstractBasePtrList args_spec = GetArgsSpec(args);
|
||||
|
|
|
@ -44,13 +44,14 @@ py::object RunOpInVM(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *stat
|
|||
|
||||
py::tuple RunOp(const py::args &args);
|
||||
|
||||
py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &py_args, py::tuple *const out_args);
|
||||
py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &py_args, py::tuple *const out_args,
|
||||
py::list *out_args_list);
|
||||
|
||||
void ClearPyNativeSession();
|
||||
|
||||
struct GraphInfo {
|
||||
std::unordered_map<std::string, AnfNodePtr> param_map;
|
||||
std::unordered_map<std::string, std::pair<AnfNodePtr, int>> obj_node_map;
|
||||
std::unordered_map<std::string, std::pair<AnfNodePtr, std::vector<int>>> obj_node_map;
|
||||
AnfNodePtr output;
|
||||
std::vector<std::string> objects;
|
||||
};
|
||||
|
@ -81,9 +82,12 @@ class PynativeExecutor : public std::enable_shared_from_this<PynativeExecutor> {
|
|||
FuncGraphPtr curr_g() { return curr_g_; }
|
||||
void set_pyobj(FuncGraphPtr g, const std::string obj) { graph_info_map_[g].objects.push_back(obj); }
|
||||
void set_obj_node_map(FuncGraphPtr g, const std::string obj, AnfNodePtr node) {
|
||||
graph_info_map_[g].obj_node_map[obj] = std::make_pair(node, -1);
|
||||
graph_info_map_[g].obj_node_map[obj] = std::make_pair(node, std::vector<int>{-1});
|
||||
}
|
||||
void set_obj_node_map(FuncGraphPtr g, const std::string obj, AnfNodePtr node, int index) {
|
||||
graph_info_map_[g].obj_node_map[obj] = std::make_pair(node, std::vector<int>{index});
|
||||
}
|
||||
void set_obj_node_map(FuncGraphPtr g, const std::string obj, AnfNodePtr node, std::vector<int> index) {
|
||||
graph_info_map_[g].obj_node_map[obj] = std::make_pair(node, index);
|
||||
}
|
||||
AnfNodePtr MakeCNode(const OpExecInfoPtr &op_exec_info, const py::args &args, const py::tuple &out);
|
||||
|
@ -93,6 +97,8 @@ class PynativeExecutor : public std::enable_shared_from_this<PynativeExecutor> {
|
|||
void Popp();
|
||||
FuncGraphPtr GradGraph(FuncGraphPtr g, const GradOperationPtr &grad_op, const std::vector<AnfNodePtr> &weights,
|
||||
size_t arg_size);
|
||||
void SetTupleOutput(const py::object &obj, const AnfNodePtr &cnode, std::vector<int> idx);
|
||||
AnfNodePtr MakeValueNode(const py::object &obj, const std::string &obj_id);
|
||||
|
||||
~PynativeExecutor();
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ class TestPynativeExecute : public UT::Common {
|
|||
TestPynativeExecute() {}
|
||||
};
|
||||
|
||||
inline ValuePtr PyAttrValue(const py::object& obj) {
|
||||
inline ValuePtr PyAttrValue(const py::object &obj) {
|
||||
ValuePtr converted_ret;
|
||||
bool converted = parse::ConvertData(obj, &converted_ret);
|
||||
if (!converted) {
|
||||
|
@ -63,7 +63,9 @@ OpExecInfoPtr ConstructOpExecInfo() {
|
|||
|
||||
auto conv_obj = prim::GetPythonOps("conv2d_prim", "gtest_input.pynative");
|
||||
py::none py_none;
|
||||
return GenerateOpExecInfo(py::make_tuple(conv_obj, op_name, op_inputs));
|
||||
py::args args = py::make_tuple(conv_obj, op_name, op_inputs);
|
||||
py::list args_input = args[PY_INPUTS];
|
||||
return GenerateOpExecInfo(args, &args_input);
|
||||
}
|
||||
|
||||
TEST_F(TestPynativeExecute, TestRunOpInVM) {
|
||||
|
@ -77,8 +79,8 @@ TEST_F(TestPynativeExecute, TestRunOpInVM) {
|
|||
TEST_F(TestPynativeExecute, TestRunOp) {
|
||||
py::none py_none;
|
||||
auto op_exec_info_ptr = ConstructOpExecInfo();
|
||||
py::tuple outputs = pynative::RunOp(py::make_tuple(op_exec_info_ptr->py_primitive, op_exec_info_ptr->op_name,
|
||||
op_exec_info_ptr->op_inputs));
|
||||
py::tuple outputs = pynative::RunOp(
|
||||
py::make_tuple(op_exec_info_ptr->py_primitive, op_exec_info_ptr->op_name, op_exec_info_ptr->op_inputs));
|
||||
if (outputs.size() == 0) {
|
||||
FAIL();
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue