!18575 [LITE] fix memory leak
Merge pull request !18575 from yefeng/125-fix_memory_leak
This commit is contained in:
commit
205e368b73
|
@ -40,7 +40,9 @@ void FreeTensors(std::vector<Tensor *> *input_tensors, std::vector<Tensor *> *ou
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
for (auto &tensor : *input_tensors) {
|
for (auto &tensor : *input_tensors) {
|
||||||
tensor->set_data(nullptr);
|
if (tensor->data_type() != kObjectTypeString && tensor->data_type() != kObjectTypeTensorType) {
|
||||||
|
tensor->set_data(nullptr);
|
||||||
|
}
|
||||||
delete tensor;
|
delete tensor;
|
||||||
tensor = nullptr;
|
tensor = nullptr;
|
||||||
}
|
}
|
||||||
|
@ -48,7 +50,9 @@ void FreeTensors(std::vector<Tensor *> *input_tensors, std::vector<Tensor *> *ou
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
for (auto &tensor : *output_tensors) {
|
for (auto &tensor : *output_tensors) {
|
||||||
tensor->set_data(nullptr);
|
if (tensor->data_type() != kObjectTypeString && tensor->data_type() != kObjectTypeTensorType) {
|
||||||
|
tensor->set_data(nullptr);
|
||||||
|
}
|
||||||
delete tensor;
|
delete tensor;
|
||||||
tensor = nullptr;
|
tensor = nullptr;
|
||||||
}
|
}
|
||||||
|
@ -130,12 +134,6 @@ void ConvertOtherTensor(MetaGraphT *graph, uint32_t index, bool *convert_succ, s
|
||||||
lite_tensors->emplace_back(lite_tensor.release());
|
lite_tensors->emplace_back(lite_tensor.release());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
auto ret = lite_tensor->MallocData();
|
|
||||||
if (ret != RET_OK) {
|
|
||||||
MS_LOG(ERROR) << "Malloc tensor data failed";
|
|
||||||
*convert_succ = false;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (lite_tensor->root_tensor() != nullptr) {
|
if (lite_tensor->root_tensor() != nullptr) {
|
||||||
lite_tensor->root_tensor()->set_data(tensorT->data.data());
|
lite_tensor->root_tensor()->set_data(tensorT->data.data());
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -209,6 +209,7 @@ void ConvTransformFusion::GenNewConvTensor(const FuncGraphPtr &func_graph, const
|
||||||
CalNewBiasTensor(bias_data, kernel_num, bias_flag, trans_scale, trans_bias);
|
CalNewBiasTensor(bias_data, kernel_num, bias_flag, trans_scale, trans_bias);
|
||||||
if (!bias_flag) {
|
if (!bias_flag) {
|
||||||
auto bias_node = AddNewBiasNode(bias_data, func_graph, kernel_num, weight_tensor);
|
auto bias_node = AddNewBiasNode(bias_data, func_graph, kernel_num, weight_tensor);
|
||||||
|
delete[] bias_data;
|
||||||
bias_node->set_name(conv_node->fullname_with_scope() + "_bias");
|
bias_node->set_name(conv_node->fullname_with_scope() + "_bias");
|
||||||
conv_node->add_input(bias_node);
|
conv_node->add_input(bias_node);
|
||||||
}
|
}
|
||||||
|
|
|
@ -452,7 +452,7 @@ STATUS NodeInferShape::ConvertToLiteTensor(const std::vector<lite::DataInfo> &da
|
||||||
return RET_ERROR;
|
return RET_ERROR;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto tensor_data = new (std::nothrow) char[tensor_size];
|
auto tensor_data = reinterpret_cast<char *>(malloc(tensor_size));
|
||||||
if (tensor_data == nullptr) {
|
if (tensor_data == nullptr) {
|
||||||
MS_LOG(ERROR) << "tensor_data is nullptr";
|
MS_LOG(ERROR) << "tensor_data is nullptr";
|
||||||
delete tensor;
|
delete tensor;
|
||||||
|
@ -460,7 +460,7 @@ STATUS NodeInferShape::ConvertToLiteTensor(const std::vector<lite::DataInfo> &da
|
||||||
}
|
}
|
||||||
if (memcpy_s(tensor_data, tensor_size, data_info.data_.data(), tensor_size) != EOK) {
|
if (memcpy_s(tensor_data, tensor_size, data_info.data_.data(), tensor_size) != EOK) {
|
||||||
delete tensor;
|
delete tensor;
|
||||||
delete[](tensor_data);
|
free(tensor_data);
|
||||||
MS_LOG(ERROR) << "memcpy error: ";
|
MS_LOG(ERROR) << "memcpy error: ";
|
||||||
return lite::RET_ERROR;
|
return lite::RET_ERROR;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue