modifyMemoryLeacky

This commit is contained in:
Pengyongrong 2020-08-19 06:03:01 -07:00
parent e5ed0105b7
commit e3435183c5
4 changed files with 62 additions and 3 deletions

View File

@ -124,7 +124,7 @@ int BatchNormOpenCLKernel::Run() {
ocl_runtime->RunKernel(kernel_, global, local, nullptr);
return RET_OK;
} // namespace mindspore::kernel
}
kernel::LiteKernel *OpenCLBatchnormKernelCreator(const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,

View File

@ -191,6 +191,9 @@ int ConcatOpenCLKernel::Run() {
ocl_runtime->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->Data());
ocl_runtime->SetKernelArg(kernel_, arg_cn++, input0_shape3_);
ocl_runtime->SetKernelArg(kernel_, arg_cn++, output_shape_);
} else if (in_tensors_.size() < 2) {
MS_LOG(ERROR) << " inputs must been >=2";
return RET_ERROR;
} else {
MS_LOG(ERROR) << "only support inputs<=3";
return RET_ERROR;
@ -198,7 +201,7 @@ int ConcatOpenCLKernel::Run() {
ocl_runtime->RunKernel(kernel_, global, local, nullptr);
return RET_OK;
} // namespace mindspore::kernel
}
kernel::LiteKernel *OpenCLConcatKernelCreator(const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,

View File

@ -80,6 +80,11 @@ TEST_F(TestBatchnormOpenCL, Batchnorminput_dim4) {
new (std::nothrow) lite::tensor::Tensor(data_type, output_shape, schema::Format_NHWC4, tensor_type);
if (output_tensor == nullptr) {
MS_LOG(INFO) << "init tensor failed";
delete tensor_data;
delete tensor_mean;
delete tensor_var;
delete tensor_scale;
delete tensor_offset;
return;
}
std::vector<lite::tensor::Tensor *> inputs = {tensor_data, tensor_scale, tensor_offset, tensor_mean, tensor_var};
@ -89,6 +94,9 @@ TEST_F(TestBatchnormOpenCL, Batchnorminput_dim4) {
auto param = new (std::nothrow) BatchNormParameter();
if (param == nullptr) {
MS_LOG(INFO) << "new BatchNormParameter failed";
for (auto tensor : outputs) {
delete tensor;
}
return;
}
param->epsilon_ = pow(10, -5);
@ -96,6 +104,10 @@ TEST_F(TestBatchnormOpenCL, Batchnorminput_dim4) {
new (std::nothrow) kernel::BatchNormOpenCLKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs);
if (batchnorm_kernel == nullptr) {
MS_LOG(INFO) << "new kernel::BatchNorm_kernel failed";
for (auto tensor : outputs) {
delete tensor;
}
delete param;
return;
}
batchnorm_kernel->Init();
@ -110,6 +122,11 @@ TEST_F(TestBatchnormOpenCL, Batchnorminput_dim4) {
auto *sub_graph = new (std::nothrow) kernel::SubGraphOpenCLKernel(inputs, outputs, kernels, kernels, kernels);
if (sub_graph == nullptr) {
MS_LOG(INFO) << "new kernel::SubGraphOpenCLKernel failed";
for (auto tensor : outputs) {
delete tensor;
}
delete param;
delete batchnorm_kernel;
return;
}
sub_graph->Init();
@ -129,6 +146,15 @@ TEST_F(TestBatchnormOpenCL, Batchnorminput_dim4) {
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor->Data());
CompareOutputData1(output_data_gpu, correct_data, output_tensor->ElementsNum(), 0.0001);
for (auto tensor : inputs) {
delete tensor;
}
for (auto tensor : outputs) {
delete tensor;
}
delete param;
delete batchnorm_kernel;
delete sub_graph;
lite::opencl::OpenCLRuntime::DeleteInstance();
}
} // namespace mindspore

View File

@ -110,12 +110,20 @@ TEST_F(TestConcatOpenCL, ConcatFp32_2input_dim4_axis3) {
auto tensor_type = schema::NodeType_ValueNode;
std::vector<lite::tensor::Tensor *> inputs;
for (auto &shape : input_shapes) {
inputs.push_back(new lite::tensor::Tensor(data_type, shape, schema::Format_NHWC4, tensor_type));
auto input_temp = new (std::nothrow) lite::tensor::Tensor(data_type, shape, schema::Format_NHWC4, tensor_type);
inputs.push_back(input_temp);
if (input_temp == nullptr) {
MS_LOG(INFO) << "new input_tensor failed";
return;
}
}
auto *output_tensor =
new (std::nothrow) lite::tensor::Tensor(data_type, output_shape, schema::Format_NHWC4, tensor_type);
if (output_tensor == nullptr) {
MS_LOG(INFO) << "new output_tensor failed";
for (auto tensor : inputs) {
delete tensor;
}
return;
}
std::vector<lite::tensor::Tensor *> outputs{output_tensor};
@ -125,6 +133,12 @@ TEST_F(TestConcatOpenCL, ConcatFp32_2input_dim4_axis3) {
auto param = new (std::nothrow) ConcatParameter();
if (param == nullptr) {
MS_LOG(INFO) << "new ConcatParameter failed";
for (auto tensor : inputs) {
delete tensor;
}
for (auto tensor : outputs) {
delete tensor;
}
return;
}
param->axis_ = 3;
@ -132,6 +146,13 @@ TEST_F(TestConcatOpenCL, ConcatFp32_2input_dim4_axis3) {
new (std::nothrow) kernel::ConcatOpenCLKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs);
if (concat_kernel == nullptr) {
MS_LOG(INFO) << "new kernel::ConcatOpenCLKernel failed";
for (auto tensor : inputs) {
delete tensor;
}
for (auto tensor : outputs) {
delete tensor;
}
delete param;
return;
}
concat_kernel->Init();
@ -145,6 +166,14 @@ TEST_F(TestConcatOpenCL, ConcatFp32_2input_dim4_axis3) {
auto *sub_graph = new (std::nothrow) kernel::SubGraphOpenCLKernel(inputs, outputs, kernels, kernels, kernels);
if (sub_graph == nullptr) {
MS_LOG(INFO) << "new kernel::SubGraphOpenCLKernel failed";
for (auto tensor : inputs) {
delete tensor;
}
for (auto tensor : outputs) {
delete tensor;
}
delete param;
delete concat_kernel;
return;
}
sub_graph->Init();
@ -181,6 +210,7 @@ TEST_F(TestConcatOpenCL, ConcatFp32_2input_dim4_axis3) {
for (auto tensor : outputs) {
delete tensor;
}
delete param;
delete concat_kernel;
delete sub_graph;
lite::opencl::OpenCLRuntime::DeleteInstance();