!12264 fix memory not release

From: @xutianchun
Reviewed-by: @zhanghaibo5
Signed-off-by: @zhanghaibo5
This commit is contained in:
mindspore-ci-bot 2021-02-18 14:17:07 +08:00 committed by Gitee
commit 9ea959d6d4
3 changed files with 10 additions and 11 deletions

View File

@ -551,8 +551,8 @@ STATUS WeightQuantizer::DoQuantSearch(const FuncGraphPtr &func_graph) {
// copy origin data in case to recover
auto *raw_data = static_cast<float *>(param_value->tensor_addr());
auto elem_count = param_value->tensor_shape_size();
auto origin_data = malloc(sizeof(float) * elem_count);
auto ret = memcpy_s(origin_data, sizeof(float) * elem_count, raw_data, param_value->tensor_size());
std::unique_ptr<float[]> origin_data(new (std::nothrow) float[elem_count]);
auto ret = memcpy_s(origin_data.get(), sizeof(float) * elem_count, raw_data, param_value->tensor_size());
if (ret != EOK) {
MS_LOG(ERROR) << "memcpy fail: "
<< " dst size: " << sizeof(float) * elem_count << " src size: " << param_value->tensor_size();
@ -617,7 +617,7 @@ STATUS WeightQuantizer::DoQuantSearch(const FuncGraphPtr &func_graph) {
MS_LOG(DEBUG) << "op: " << op_name << " intermediate bit: " << bit_num_t << " mean_error: " << mean_error
<< " [recover]";
// recover
status = UpdateTensorDataAndSize(param_value, origin_data, sizeof(float) * elem_count);
status = UpdateTensorDataAndSize(param_value, origin_data.get(), sizeof(float) * elem_count);
if (status != RET_OK) {
MS_LOG(ERROR) << "UpdateTensorDataAndSize fail";
return RET_ERROR;
@ -627,7 +627,6 @@ STATUS WeightQuantizer::DoQuantSearch(const FuncGraphPtr &func_graph) {
opname_bit_[op_name] = bit_num_t;
}
} // end bit loop
free(origin_data);
} // if: conv and matmul
} // end loop: all cnode
return status;

View File

@ -246,7 +246,8 @@ STATUS TfLstmCellFusion::PopulateBiasNode(const EquivPtr &body_equiv, const Para
default_param->set_tensor_shape(shape);
default_param->set_tensor_type(kNumberTypeFloat32);
default_param->set_format(schema::Format_NHWC);
auto tensor_data = new (std::nothrow) float[hidden_size * 8];
std::unique_ptr<float[]> tensor_data(new (std::nothrow) float[hidden_size * 8]);
auto forget_bias_node = utils::cast<AnfNodePtr>((*body_equiv)[forget_bias_]);
if (forget_bias_node == nullptr) {
@ -271,13 +272,12 @@ STATUS TfLstmCellFusion::PopulateBiasNode(const EquivPtr &body_equiv, const Para
}
}
}
default_param->SetTensorData(tensor_data, hidden_size * 8 * 4);
default_param->SetTensorData(tensor_data.release(), hidden_size * 8 * 4);
new_bias->set_default_param(default_param);
std::vector<int64_t> shape_vector_i(shape.begin(), shape.end());
auto abstract_tensor_i = std::make_shared<abstract::AbstractTensor>(kFloat32, shape_vector_i);
if (abstract_tensor_i == nullptr) {
MS_LOG(ERROR) << "abstract_tensor is nullptr";
delete[] tensor_data;
return RET_ERROR;
}
new_bias->set_abstract(abstract_tensor_i);

View File

@ -31,7 +31,7 @@
namespace mindspore::opt {
ValueNodePtr IfPass::GetSwitchAnfPrim() {
auto switch_primitiveT = new (std::nothrow) schema::PrimitiveT;
std::unique_ptr<schema::PrimitiveT> switch_primitiveT(new (std::nothrow) schema::PrimitiveT);
if (switch_primitiveT == nullptr) {
MS_LOG(ERROR) << "new switch_primitiveT failed";
return nullptr;
@ -43,7 +43,7 @@ ValueNodePtr IfPass::GetSwitchAnfPrim() {
return nullptr;
}
auto partial_prim = std::make_shared<lite::Partial>(switch_primitiveT);
auto partial_prim = std::make_shared<lite::Partial>(switch_primitiveT.release());
ValueNodePtr partial_anf_prim = NewValueNode(partial_prim);
return partial_anf_prim;
}