!42474 Fix PyNative coredump
Merge pull request !42474 from caifubi/master-pynative-coredump
This commit is contained in:
commit
ca3dfc8d15
|
@ -244,22 +244,13 @@ bool NeedDiscardTensorProperties(const std::string &op_device_target,
|
|||
return true;
|
||||
}
|
||||
|
||||
TensorPtr TensorDataDeepCopy(const TensorPtr &tensor) {
|
||||
MS_EXCEPTION_IF_NULL(tensor);
|
||||
auto data = tensor->data_ptr();
|
||||
if (data == nullptr) {
|
||||
return tensor;
|
||||
}
|
||||
return std::make_shared<Tensor>(*tensor, tensor->data_type());
|
||||
}
|
||||
|
||||
ParameterPtr ConstructRunOpParameter(const std::shared_ptr<KernelGraph> &graph, const tensor::TensorPtr &input_tensor,
|
||||
const BackendOpRunInfoPtr &op_run_info, int64_t tensor_mask) {
|
||||
MS_EXCEPTION_IF_NULL(graph);
|
||||
auto param = graph->NewParameter();
|
||||
MS_EXCEPTION_IF_NULL(param);
|
||||
if (tensor_mask == kParameterWeightTensorMask) {
|
||||
param->set_default_param(TensorDataDeepCopy(input_tensor));
|
||||
param->set_default_param(input_tensor);
|
||||
}
|
||||
|
||||
// set the kernel info of parameter
|
||||
|
|
|
@ -784,18 +784,6 @@ void MindRTBackend::DispatchOpTask(bool single_op_cache_hit, VectorRef *outputs,
|
|||
MS_EXCEPTION_IF_NULL(graph);
|
||||
const auto &output_nodes = op_compiler_info->graph_output_nodes_;
|
||||
|
||||
auto input_tensors = op_run_info->base_op_run_info.input_tensor;
|
||||
for (auto &input_tensor : input_tensors) {
|
||||
MS_EXCEPTION_IF_NULL(input_tensor);
|
||||
auto data = input_tensor->data_ptr();
|
||||
if (data != nullptr && data->is_from_numpy()) {
|
||||
// Convert python tensor to cpp tensor
|
||||
py::gil_scoped_acquire gil;
|
||||
input_tensor->AssignValue(tensor::Tensor(*input_tensor, input_tensor->data_type()));
|
||||
data = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
runtime::UpdateDeviceAddress(graph, GetTensorWithoutValueMask(op_run_info), op_compiler_info->device_context_);
|
||||
// Create output tensor
|
||||
UpdateOutput(output_nodes, outputs);
|
||||
|
|
|
@ -108,6 +108,8 @@ OpCompilerInfoPtr OpCompiler::Compile(const session::BackendOpRunInfoPtr &op_run
|
|||
|
||||
auto op_compiler_info =
|
||||
std::make_shared<OpCompilerInfo>(graph_info, graph->graph_id(), graph, outputs_with_index, device_context, false);
|
||||
|
||||
py::gil_scoped_acquire acquire_gil;
|
||||
op_compiler_infos_[graph_info] = op_compiler_info;
|
||||
return op_compiler_info;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue