fix memory leaks

This commit is contained in:
xuanyue 2020-09-17 17:29:53 +08:00
parent ab75268187
commit 2ad3eb5024
7 changed files with 93 additions and 20 deletions

View File

@ -62,6 +62,7 @@ int ArgMax::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
MS_ASSERT(output != nullptr);
if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "tensor number is error.";
return RET_ERROR;
}
output->SetFormat(input->GetFormat());

View File

@ -217,6 +217,7 @@ int DeConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::
output_w = (input_w - 1) * stride_w + kernel_w;
} else {
MS_LOG(ERROR) << "unsupported pad mode for deconv";
return RET_ERROR;
}
std::vector<int> out_shape = {output_n, output_h, output_w, output_c};
output->set_shape(out_shape);
@ -230,6 +231,7 @@ int DeConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::
} else if (pad_mode == schema::PadMode_CAFFE) {
} else {
MS_LOG(ERROR) << "unsupported pad mode for deconv";
return RET_ERROR;
}
return 0;

View File

@ -71,13 +71,7 @@ int ArgMinMaxBaseCPUKernel::Run() {
auto input_data = in_tensors_.at(0)->MutableData();
auto output_data = out_tensors_.at(0)->MutableData();
auto in_tensor = in_tensors_.at(0)->shape();
auto shape = reinterpret_cast<int *>(malloc(in_tensor.size() * sizeof(int)));
if (shape == nullptr) {
MS_LOG(ERROR) << "malloc shape failed.";
return RET_ERROR;
}
memcpy(shape, in_tensor.data(), in_tensor.size() * sizeof(int));
auto shape = in_tensors_.at(0)->shape();
auto param = reinterpret_cast<ArgMinMaxParameter *>(op_parameter_);
MS_ASSERT(context_->allocator != nullptr);
@ -89,7 +83,7 @@ int ArgMinMaxBaseCPUKernel::Run() {
return RET_ERROR;
}
}
ArgMinMax(input_data, output_data, reinterpret_cast<const int *>(shape), param);
ArgMinMax(input_data, output_data, reinterpret_cast<const int *>(shape.data()), param);
context_->allocator->Free(param->arg_elements_);
param->arg_elements_ = nullptr;
return RET_OK;

View File

@ -59,25 +59,25 @@ int ArgMinMaxInt8CPUKernel::Run() {
const int8_t *input_data = reinterpret_cast<const int8_t *>(in_tensors_.at(0)->MutableData());
int8_t *output_data = reinterpret_cast<int8_t *>(out_tensors_.at(0)->MutableData());
auto in_shape = input->shape().data();
auto in_shape = input->shape();
auto param = reinterpret_cast<ArgMinMaxParameter *>(op_parameter_);
if (param->topk_ == 1) {
Int8ArgMinMaxQuant(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_);
Int8ArgMinMaxQuant(input_data, output_data, in_shape.data(), param, &in_quant_arg_, &out_quant_arg_);
return RET_OK;
}
switch (param->axis_) {
case 0:
Int8ArgMinMaxDim0(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_);
Int8ArgMinMaxDim0(input_data, output_data, in_shape.data(), param, &in_quant_arg_, &out_quant_arg_);
break;
case 1:
Int8ArgMinMaxDim1(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_);
Int8ArgMinMaxDim1(input_data, output_data, in_shape.data(), param, &in_quant_arg_, &out_quant_arg_);
break;
case 2:
Int8ArgMinMaxDim2(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_);
Int8ArgMinMaxDim2(input_data, output_data, in_shape.data(), param, &in_quant_arg_, &out_quant_arg_);
break;
case 3:
Int8ArgMinMaxDim3(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_);
Int8ArgMinMaxDim3(input_data, output_data, in_shape.data(), param, &in_quant_arg_, &out_quant_arg_);
break;
}
return RET_OK;

View File

@ -22,6 +22,7 @@
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_MEMORY_FAILED;
using mindspore::lite::RET_OK;
namespace mindspore::kernel {
@ -43,18 +44,58 @@ int CropInt8CPUKernel::Init() {
crop_para_->quant_arg.output_activation_max_ = std::numeric_limits<int8_t>::max();
crop_para_->quant_arg.output_activation_min_ = std::numeric_limits<int8_t>::min();
crop_para_->in_shape_ = reinterpret_cast<int *>(malloc(input_tensor->shape().size() * sizeof(int)));
if (crop_para_->in_shape_ == nullptr) {
MS_LOG(ERROR) << "malloc memory failed";
return RET_MEMORY_FAILED;
}
crop_para_->out_shape_ = reinterpret_cast<int *>(malloc(out_tensor->shape().size() * sizeof(int)));
if (crop_para_->out_shape_ == nullptr) {
MS_LOG(ERROR) << "malloc memory failed";
return RET_MEMORY_FAILED;
}
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}
CropInt8CPUKernel::~CropInt8CPUKernel() {
if (crop_para_->in_shape_ != nullptr) {
free(const_cast<int *>(crop_para_->in_shape_));
crop_para_->in_shape_ = nullptr;
}
if (crop_para_->out_shape_ != nullptr) {
free(const_cast<int *>(crop_para_->out_shape_));
crop_para_->out_shape_ = nullptr;
}
}
int CropInt8CPUKernel::ReSize() {
auto *input_tensor = in_tensors_.at(kInputIndex);
crop_para_->in_shape_ = input_tensor->shape().data();
auto input_shape = input_tensor->shape();
size_t input_dim = input_shape.size();
if (crop_para_->in_shape_ == nullptr) {
MS_LOG(ERROR) << "in_shape_ is nullptr";
return RET_ERROR;
} else {
memcpy(reinterpret_cast<void *>(const_cast<int *>(crop_para_->in_shape_)), input_shape.data(),
sizeof(int) * input_dim);
}
auto *out_tensor = out_tensors_.at(kOutputIndex);
crop_para_->out_shape_ = out_tensor->shape().data();
auto input_dim = input_tensor->shape().size();
auto output_shape = out_tensor->shape();
size_t output_dim = output_shape.size();
if (crop_para_->out_shape_ == nullptr) {
MS_LOG(ERROR) << "out_shape_ is nullptr";
return RET_ERROR;
} else {
memcpy(reinterpret_cast<void *>(const_cast<int *>(crop_para_->out_shape_)), output_shape.data(),
sizeof(int) * output_dim);
}
MS_ASSERT(input_dim <= CROP_OFFSET_MAX_SIZE);
crop_para_->input_dim_ = input_dim;
PadOffset(input_dim, crop_para_);

View File

@ -35,7 +35,7 @@ class CropInt8CPUKernel : public CropBaseCPUKernel {
crop_para_ = reinterpret_cast<CropParameter *>(op_parameter_);
crop_para_->thread_count_ = op_parameter_->thread_num_;
}
~CropInt8CPUKernel() = default;
~CropInt8CPUKernel();
int Init() override;
int ReSize() override;

View File

@ -64,6 +64,17 @@ int LeakyReluInt8CPUKernel::Init() {
quant_prelu_parm_.quant_arg.output_activation_max_ = std::numeric_limits<int8_t>::max();
quant_prelu_parm_.quant_arg.output_activation_min_ = std::numeric_limits<int8_t>::min();
quant_prelu_parm_.in_shape_ = reinterpret_cast<int *>(malloc(input_tensor->shape().size() * sizeof(int)));
if (quant_prelu_parm_.in_shape_ == nullptr) {
MS_LOG(ERROR) << "malloc memory failed";
return RET_MEMORY_FAILED;
}
quant_prelu_parm_.out_shape_ = reinterpret_cast<int *>(malloc(out_tensor->shape().size() * sizeof(int)));
if (quant_prelu_parm_.out_shape_ == nullptr) {
MS_LOG(ERROR) << "malloc memory failed";
return RET_MEMORY_FAILED;
}
if (!InferShapeDone()) {
return RET_OK;
}
@ -79,6 +90,14 @@ LeakyReluInt8CPUKernel::~LeakyReluInt8CPUKernel() {
free(input_quant_);
input_quant_ = nullptr;
}
if (quant_prelu_parm_.in_shape_ != nullptr) {
free(const_cast<int *>(quant_prelu_parm_.in_shape_));
quant_prelu_parm_.in_shape_ = nullptr;
}
if (quant_prelu_parm_.out_shape_ != nullptr) {
free(const_cast<int *>(quant_prelu_parm_.out_shape_));
quant_prelu_parm_.out_shape_ = nullptr;
}
}
int LeakyReluInt8CPUKernel::ReSize() {
@ -92,10 +111,26 @@ int LeakyReluInt8CPUKernel::ReSize() {
}
quant_prelu_parm_.input_dim_ = input_dim;
quant_prelu_parm_.element_num = in_tensors_[0]->Size();
quant_prelu_parm_.in_shape_ = input_tensor->shape().data();
quant_prelu_parm_.out_shape_ = out_tensor->shape().data();
auto input_shape = input_tensor->shape();
if (quant_prelu_parm_.in_shape_ == nullptr) {
MS_LOG(ERROR) << "in_shape_ is nullptr";
return RET_ERROR;
} else {
memcpy(reinterpret_cast<void *>(const_cast<int *>(quant_prelu_parm_.in_shape_)), input_shape.data(),
sizeof(int) * input_dim);
}
auto output_shape = out_tensor->shape();
size_t output_dim = output_shape.size();
if (quant_prelu_parm_.out_shape_ == nullptr) {
MS_LOG(ERROR) << "out_shape_ is nullptr";
return RET_ERROR;
} else {
memcpy(reinterpret_cast<void *>(const_cast<int *>(quant_prelu_parm_.out_shape_)), output_shape.data(),
sizeof(int) * output_dim);
}
input_quant_ = static_cast<QuantArg *>(malloc(sizeof(QuantArg) * input_dim));
if (input_quant_ == nullptr) {
MS_LOG(ERROR) << "malloc memory failed";
return RET_MEMORY_FAILED;
}
return RET_OK;