memory leak

This commit is contained in:
liuzhongkai 2020-08-19 04:13:45 -07:00
parent b4b76b61e4
commit 2cedb2caee
4 changed files with 81 additions and 16 deletions

View File

@ -201,9 +201,9 @@ kernel::LiteKernel *OpenCLDepthwiseConv2dKernelCreator(const std::vector<lite::t
return nullptr;
}
auto ret = kernel->Init();
if (0 != ret) {
MS_LOG(ERROR) << "Init DepthwiseConv2dOpenCLKernel failed!";
if (ret != RET_OK) {
delete kernel;
MS_LOG(ERROR) << "Init DepthwiseConv2dOpenCLKernel failed!";
return nullptr;
}
return kernel;

View File

@ -175,6 +175,8 @@ kernel::LiteKernel *OpenCLSoftMaxKernelCreator(const std::vector<lite::tensor::T
}
if (inputs[0]->shape()[0] > 1) {
MS_LOG(ERROR) << "Init `Softmax` kernel failed: Unsupported multi-batch.";
delete kernel;
return nullptr;
}
auto ret = kernel->Init();
if (0 != ret) {

View File

@ -88,11 +88,14 @@ kernel::ActivationOpenClKernel *create_kernel(lite::opencl::OpenCLAllocator *all
auto *kernel =
new (std::nothrow) kernel::ActivationOpenClKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs);
if (kernel == nullptr) {
delete param;
MS_LOG(ERROR) << "Kernel:" << test_name << " create fail.";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
delete param;
delete kernel;
MS_LOG(ERROR) << "Init " << test_name << " fail.";
return nullptr;
}
@ -110,18 +113,22 @@ int RunSubGraphOpenCLKernel(const std::vector<lite::tensor::Tensor *> &inputs,
std::vector<kernel::LiteKernel *> kernels{kernel};
auto *sub_graph = new (std::nothrow) kernel::SubGraphOpenCLKernel(inputs, outputs, kernels, kernels, kernels);
if (sub_graph == nullptr) {
delete kernel;
MS_LOG(ERROR) << "Kernel SubGraphOpenCLKernel create fail.";
return RET_ERROR;
}
MS_LOG(INFO) << "Initialize sub_graph.";
auto ret = sub_graph->Init();
if (ret != RET_OK) {
delete kernel;
delete sub_graph;
MS_LOG(ERROR) << "Init sub_graph error.";
return RET_ERROR;
}
MS_LOG(INFO) << "Run SubGraphOpenCLKernel.";
ret = sub_graph->Run();
if (ret != RET_OK) {
delete sub_graph;
MS_LOG(ERROR) << "Run SubGraphOpenCLKernel error.";
return RET_ERROR;
}
@ -130,7 +137,7 @@ int RunSubGraphOpenCLKernel(const std::vector<lite::tensor::Tensor *> &inputs,
}
TEST_F(TestActivationOpenCL, ActivationFp32_dim4) {
MS_LOG(INFO) << "Begin test:";
MS_LOG(INFO) << "Begin test!";
auto ocl_runtime = lite::opencl::OpenCLRuntime::GetInstance();
ocl_runtime->Init();
auto allocator = ocl_runtime->GetAllocator();
@ -140,11 +147,21 @@ TEST_F(TestActivationOpenCL, ActivationFp32_dim4) {
auto data_type = kNumberTypeFloat32;
auto tensor_type = schema::NodeType_ValueNode;
auto *input_tensor = new lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC4, tensor_type);
auto *output_tensor = new lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC4, tensor_type);
auto *input_tensor =
new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC4, tensor_type);
if (input_tensor == nullptr) {
MS_LOG(ERROR) << "new input tensor error!";
return;
}
auto *output_tensor =
new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC4, tensor_type);
if (output_tensor == nullptr) {
MS_LOG(ERROR) << "new output tensor error!";
delete input_tensor;
return;
}
std::vector<lite::tensor::Tensor *> inputs{input_tensor};
std::vector<lite::tensor::Tensor *> outputs{output_tensor};
// freamework to do!!! allocate memory by hand
inputs[0]->MallocData(allocator);
std::map<std::string, int> Test_Activation_Type;
@ -175,13 +192,11 @@ TEST_F(TestActivationOpenCL, ActivationFp32_dim4) {
MS_LOG(INFO) << "==================output data================";
printf_tensor(outputs[0]);
CompareRes(output_tensor, Test_Res_File[it->first]);
delete kernel;
it++;
}
delete input_tensor;
delete output_tensor;
lite::opencl::OpenCLRuntime::DeleteInstance();
return;
}
} // namespace mindspore

View File

@ -93,15 +93,29 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) {
std::vector<int> output_shape = {1, 4, 3, 9};
auto data_type = kNumberTypeFloat32;
auto tensor_type = schema::NodeType_ValueNode;
auto *input_tensor = new lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type);
auto *input_tensor =
new (std::nothrow) lite::tensor::Tensor(data_type, input_shape, schema::Format_NHWC, tensor_type);
if (input_tensor == nullptr) {
MS_LOG(ERROR) << "new input tensor error";
return;
}
auto *output_tensor = new lite::tensor::Tensor(data_type, output_shape, schema::Format_NHWC4, tensor_type);
auto *weight_tensor =
new lite::tensor::Tensor(data_type, std::vector<int>{input_shape[3]}, schema::Format_NHWC, tensor_type);
if (output_tensor == nullptr) {
MS_LOG(ERROR) << "new output_tensor error";
delete input_tensor;
return;
}
auto *weight_tensor = new (std::nothrow)
lite::tensor::Tensor(data_type, std::vector<int>{input_shape[3]}, schema::Format_NHWC, tensor_type);
if (weight_tensor == nullptr) {
MS_LOG(ERROR) << "new weight_tensor error";
delete input_tensor;
delete output_tensor;
return;
}
std::vector<lite::tensor::Tensor *> inputs{input_tensor, weight_tensor};
std::vector<lite::tensor::Tensor *> outputs{output_tensor};
std::cout << input_tensor->ElementsNum() << std::endl;
std::cout << input_tensor->ElementsC4Num() << std::endl;
// freamework to do!!! allocate memory by hand
inputs[0]->MallocData(allocator);
inputs[1]->MallocData(allocator);
std::cout << input_tensor->Size() << std::endl;
@ -113,17 +127,33 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) {
MS_LOG(INFO) << "CaffePRelu==================weight data================";
printf_tensor_caffeprelu(inputs[1], weight_tensor->ElementsNum());
auto param = new CaffePReluParameter();
auto param = new (std::nothrow) CaffePReluParameter();
if (param == nullptr) {
MS_LOG(ERROR) << "new param error!";
delete input_tensor;
delete output_tensor;
delete weight_tensor;
return;
}
param->channel_num_ = input_shape[3];
auto *caffeprelu_kernel =
new (std::nothrow) kernel::CaffePReluOpenCLKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs);
if (caffeprelu_kernel == nullptr) {
delete param;
delete input_tensor;
delete output_tensor;
delete weight_tensor;
MS_LOG(ERROR) << "Create caffe prelu kernel error.";
return;
}
auto ret = caffeprelu_kernel->Init();
if (ret != RET_OK) {
delete param;
delete input_tensor;
delete output_tensor;
delete weight_tensor;
delete caffeprelu_kernel;
MS_LOG(ERROR) << "caffeprelu_kernel init error.";
return;
}
@ -132,24 +162,42 @@ TEST_F(TestCaffePReluOpenCL, CaffePReluFp32_dim4) {
std::vector<kernel::LiteKernel *> kernels{caffeprelu_kernel};
auto *sub_graph = new (std::nothrow) kernel::SubGraphOpenCLKernel({input_tensor}, outputs, kernels, kernels, kernels);
if (sub_graph == nullptr) {
delete param;
delete input_tensor;
delete output_tensor;
delete weight_tensor;
delete caffeprelu_kernel;
MS_LOG(ERROR) << "Create sub_graph kernel error.";
return;
}
ret = sub_graph->Init();
if (ret != RET_OK) {
delete param;
delete input_tensor;
delete output_tensor;
delete weight_tensor;
delete caffeprelu_kernel;
delete sub_graph;
MS_LOG(ERROR) << "sub_graph init error.";
return;
}
MS_LOG(INFO) << "Sub graph begin running!";
ret = sub_graph->Run();
if (ret != RET_OK) {
delete input_tensor;
delete output_tensor;
delete weight_tensor;
delete sub_graph;
MS_LOG(ERROR) << "sub_graph run error.";
return;
}
MS_LOG(INFO) << "CaffePRelu==================output data================";
printf_tensor_caffeprelu(outputs[0], output_tensor->ElementsC4Num());
std::cout << "output date size:" << output_tensor->Size() << std::endl;
CompareOutCaffePRelu(output_tensor, standard_answer_file);
delete input_tensor;
delete output_tensor;
delete weight_tensor;
delete sub_graph;
}
} // namespace mindspore