forked from mindspore-Ecosystem/mindspore
!619 Clean code style check warning in pynative codes
Merge pull request !619 from JoyLvliang/pynative-clean-reviewbot-warning
This commit is contained in:
commit
82cb39274b
|
@ -31,7 +31,6 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace pynative {
|
||||
|
||||
namespace py = pybind11;
|
||||
|
||||
enum PynativeStatusCode {
|
||||
|
@ -61,7 +60,6 @@ using OpExecInfoPtr = std::shared_ptr<OpExecInfo>;
|
|||
OpExecInfoPtr GenerateOpExecInfo(const py::args &args);
|
||||
|
||||
const std::set<std::string> ignore_infer_prim = {"partial", "make_ref"};
|
||||
|
||||
} // namespace pynative
|
||||
} // namespace mindspore
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@ const char SINGLE_OP_GRAPH[] = "single_op_graph";
|
|||
|
||||
namespace mindspore {
|
||||
namespace pynative {
|
||||
|
||||
using MeTensor = mindspore::tensor::Tensor;
|
||||
using MeTensorPtr = mindspore::tensor::TensorPtr;
|
||||
using GeOperator = ge::Operator;
|
||||
|
@ -307,5 +306,4 @@ py::object RunOpInGE(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *stat
|
|||
return std::move(result);
|
||||
}
|
||||
} // namespace pynative
|
||||
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -226,8 +226,7 @@ void ConvertValueTupleToTensor(const py::object &input_object, std::vector<tenso
|
|||
}
|
||||
auto value_tuple = input_value->cast<ValueTuplePtr>();
|
||||
MS_EXCEPTION_IF_NULL(value_tuple);
|
||||
tensor::TensorPtr tensor_ptr = nullptr;
|
||||
tensor_ptr = opt::CreateTupleTensor(value_tuple);
|
||||
tensor::TensorPtr tensor_ptr = opt::CreateTupleTensor(value_tuple);
|
||||
MS_EXCEPTION_IF_NULL(tensor_ptr);
|
||||
input_tensor->push_back(tensor_ptr);
|
||||
}
|
||||
|
@ -583,12 +582,9 @@ void SessionBasic::LoadInputData(const std::shared_ptr<KernelGraph> &kernel_grap
|
|||
MS_EXCEPTION_IF_NULL(kernel_graph);
|
||||
auto input_nodes = kernel_graph->inputs();
|
||||
if ((inputs.size() + input_ctrl_size) - 1 != input_nodes.size()) {
|
||||
MS_LOG(EXCEPTION) << "tensor input size:" << inputs.size()
|
||||
<< " is not equal graph inputs size:" << input_nodes.size()
|
||||
MS_LOG(EXCEPTION) << "tensor input:" << inputs.size() << " is not equal graph inputs:" << input_nodes.size()
|
||||
<< ", input_ctrl_size:" << input_ctrl_size;
|
||||
}
|
||||
auto ms_context = MsContext::GetInstance();
|
||||
MS_EXCEPTION_IF_NULL(ms_context);
|
||||
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||
auto tensor = inputs[i];
|
||||
MS_EXCEPTION_IF_NULL(tensor);
|
||||
|
@ -598,7 +594,8 @@ void SessionBasic::LoadInputData(const std::shared_ptr<KernelGraph> &kernel_grap
|
|||
auto pk_node = input_node->cast<ParameterPtr>();
|
||||
auto device_address = AnfAlgo::GetMutableOutputAddr(pk_node, 0);
|
||||
bool need_sync = false;
|
||||
if (ms_context->enable_pynative_infer()) {
|
||||
MS_EXCEPTION_IF_NULL(MsContext::GetInstance());
|
||||
if (MsContext::GetInstance()->enable_pynative_infer()) {
|
||||
if (tensor->device_address().get() == nullptr || tensor->device_address() != device_address) {
|
||||
need_sync = true;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue