From 9a057d39f72a9af74afd62d32328c5b7dd0aa848 Mon Sep 17 00:00:00 2001 From: wangpingan2 Date: Tue, 7 Mar 2023 11:34:57 +0800 Subject: [PATCH] fix model delete segmentation fault and clean code. --- mindspore/lite/src/litert/lite_session.cc | 6 +- .../src/litert/runtime_packed_node_pass.cc | 59 +++++++++---------- .../src/litert/runtime_packed_node_pass.h | 2 +- 3 files changed, 35 insertions(+), 32 deletions(-) diff --git a/mindspore/lite/src/litert/lite_session.cc b/mindspore/lite/src/litert/lite_session.cc index 16d9a5ef06e..2484bdc059c 100644 --- a/mindspore/lite/src/litert/lite_session.cc +++ b/mindspore/lite/src/litert/lite_session.cc @@ -1863,7 +1863,11 @@ const char *lite::LiteSession::LoadModelByPath(const std::string &file, mindspor char *lite_buf = nullptr; auto buf_model_type = LoadModelByBuff(model_buf, buf_size, &lite_buf, size, model_type); if (buf_model_type == mindspore::ModelType::kUnknownType || lite_buf == nullptr) { - delete[] model_buf; + if (use_mmap) { + lite::UnmapMmapBuffer(const_cast(static_cast(model_buf)), buf_size); + } else { + delete[] model_buf; + } model_buf = nullptr; return nullptr; } diff --git a/mindspore/lite/src/litert/runtime_packed_node_pass.cc b/mindspore/lite/src/litert/runtime_packed_node_pass.cc index 03469282831..c5b601dd356 100644 --- a/mindspore/lite/src/litert/runtime_packed_node_pass.cc +++ b/mindspore/lite/src/litert/runtime_packed_node_pass.cc @@ -108,7 +108,13 @@ void PackedNodePass::Run(Model *model, const std::vector &tensors) { pack_info->weight_sums_index_ = node->input_indices_.back(); node->input_indices_.pop_back(); if (!(reinterpret_cast(model)->keep_model_buf())) { - CopyWeightBiasSumsTensor(tensors); + auto index = static_cast(pack_info->weight_sums_index_); + if (index > tensors.size()) { + MS_LOG(ERROR) << "weight sums tensor index is error."; + return; + } + auto tensor = tensors[index]; + CopyWeightBiasSumsTensor(tensor); } } @@ -116,37 +122,30 @@ void PackedNodePass::Run(Model *model, const std::vector &tensors) { } } -void PackedNodePass::CopyWeightBiasSumsTensor(const std::vector &tensors) { - for (auto &pack_info : node_pack_info_map_) { - auto index = static_cast(pack_info.second->weight_sums_index_); - if (index > tensors.size()) { +void PackedNodePass::CopyWeightBiasSumsTensor(Tensor *tensor) { + if (!tensor->IsConst() && tensor->data() != nullptr) { + return; + } + if (!tensor->IsConst() || tensor->own_data()) { + return; + } + if (tensor->data_type() == kObjectTypeTensorType) { + MS_ASSERT(tensor->data() == nullptr); + } else { + auto copy_tensor = Tensor::CopyTensor(*tensor, true); + if (copy_tensor == nullptr) { + MS_LOG(ERROR) << "Copy tensor failed"; return; } - auto tensor = tensors[index]; - if (!tensor->IsConst() && tensor->data() != nullptr) { - return; - } - if (!tensor->IsConst() || tensor->own_data()) { - continue; - } - if (tensor->data_type() == kObjectTypeTensorType) { - MS_ASSERT(tensor->data() == nullptr); - } else { - auto copy_tensor = Tensor::CopyTensor(*tensor, true); - if (copy_tensor == nullptr) { - MS_LOG(ERROR) << "Copy tensor failed"; - return; - } - tensor->FreeData(); - tensor->set_data(copy_tensor->data()); - tensor->set_own_data(true); - copy_tensor->set_data(nullptr); - delete copy_tensor; - } + tensor->FreeData(); + tensor->set_data(copy_tensor->data()); + tensor->set_own_data(true); + copy_tensor->set_data(nullptr); + delete copy_tensor; } } -void MatmulDynamicSdotInt8Cpu(void *src, void *dst, int row, int col, bool transpose) { +void MatmulDynamicSdotInt8Unpack(void *src, void *dst, int row, int col, bool transpose) { auto src_int8 = static_cast(src); auto dst_int8 = static_cast(dst); if (!transpose) { @@ -182,7 +181,7 @@ void MatmulDynamicSdotInt8Cpu(void *src, void *dst, int row, int col, bool trans } } -void MatmulFp32BaseCpu(void *src, void *dst, int row, int col, bool transpose) { +void MatmulFp32BaseUnpack(void *src, void *dst, int row, int col, bool transpose) { if (!transpose) { // RowMajor2Row8MajorParallel auto src_r = static_cast(src); @@ -238,12 +237,12 @@ RecoveryWeightFunc GetRecoveryWeightFunc(const int quant_type, const TypeId data const std::string &cpu_option) { if (cpu_option == kArm64SimdDot && node_type == schema::PrimitiveType_MatMulFusion && quant_type == schema::QuantType_QUANT_DYNAMIC && data_type == kNumberTypeInt8) { - return MatmulDynamicSdotInt8Cpu; + return MatmulDynamicSdotInt8Unpack; } if (cpu_option == kArm64SimdDot && node_type == schema::PrimitiveType_MatMulFusion && data_type == kNumberTypeFloat32) { - return MatmulFp32BaseCpu; + return MatmulFp32BaseUnpack; } return nullptr; } diff --git a/mindspore/lite/src/litert/runtime_packed_node_pass.h b/mindspore/lite/src/litert/runtime_packed_node_pass.h index b6169a2b554..8a571b824a0 100644 --- a/mindspore/lite/src/litert/runtime_packed_node_pass.h +++ b/mindspore/lite/src/litert/runtime_packed_node_pass.h @@ -52,7 +52,7 @@ class PackedNodePass { return this->node_pack_info_map_[node_name]; } void Run(Model *model, const std::vector &tensors); - void CopyWeightBiasSumsTensor(const std::vector &tensors); + void CopyWeightBiasSumsTensor(Tensor *tensor); protected: void AddNodePackInfo(const std::string &node_name, PackInfo *pack_info) {