fix memory leak of opencl conv2d

This commit is contained in:
wandongdong 2021-01-05 02:50:41 -08:00
parent a531628dd5
commit f0a0268ece
3 changed files with 7 additions and 7 deletions

View File

@ -264,11 +264,11 @@ int Conv2DOpenCLKernel::InitFilter() {
ConvertConvWeight4DTo7D<float, float>(weight_tensor->data_c(), packed_weight_, CO_, KH_, KW_, CI_,
block_size_.C);
}
FreeDequantedWeight();
}
}
allocator->UnmapBuffer(packed_weight_);
FreeDequantedWeight();
return RET_OK;
}

View File

@ -125,6 +125,10 @@ void Conv2dTransposeOpenCLKernel::SetConstArgs() {
}
int Conv2dTransposeOpenCLKernel::InitWeights() {
auto ret = DequantWeight();
if (ret != RET_OK) {
return ret;
}
ConvParameter *param = reinterpret_cast<ConvParameter *>(op_parameter_);
int ci = in_tensors_[0]->shape()[3];
int co = out_tensors_[0]->shape()[3];
@ -180,6 +184,7 @@ int Conv2dTransposeOpenCLKernel::InitWeights() {
}
}
allocator->UnmapBuffer(padWeight_);
FreeDequantedWeight();
// init bias_(image2d mem)
size_t im_dst_x, im_dst_y;

View File

@ -93,10 +93,6 @@ int DepthwiseConv2dOpenCLKernel::Prepare() {
}
int DepthwiseConv2dOpenCLKernel::InitWeights() {
if (!in_tensors_.at(1)->IsConst()) {
MS_LOG(ERROR) << "DepthwiseConv2d don't support non-constant filter yet.";
return RET_ERROR;
}
auto ret = DequantWeight();
if (ret != RET_OK) {
return ret;
@ -124,7 +120,6 @@ int DepthwiseConv2dOpenCLKernel::InitWeights() {
} else { // int8 or int16
std::function<int16_t(int16_t)> to_dtype = [](int16_t x) -> int16_t { return x; };
PackNCHWToNC4HW4<int16_t, int16_t>(origin_weight, packed_weight_, 1, plane, out_info.C, to_dtype);
FreeDequantedWeight();
}
} else {
packed_weight_ = allocator->Malloc(pack_weight_size * sizeof(float));
@ -138,10 +133,10 @@ int DepthwiseConv2dOpenCLKernel::InitWeights() {
} else { // int8 or int16
std::function<float(float)> to_dtype = [](float x) -> float { return x; };
PackNCHWToNC4HW4<float, float>(origin_weight, packed_weight_, 1, plane, out_info.C, to_dtype);
FreeDequantedWeight();
}
}
allocator->UnmapBuffer(packed_weight_);
FreeDequantedWeight();
size_t dtype_size = sizeof(float);
if (is_fp16 && in_tensors_.at(kBiasIndex)->data_type() == kNumberTypeFloat16) {