diff --git a/mindspore/lite/src/delegate/parameter_cache/load_host_cache_model.cc b/mindspore/lite/src/delegate/parameter_cache/load_host_cache_model.cc index bd4e91cfc63..0b4dee695f8 100644 --- a/mindspore/lite/src/delegate/parameter_cache/load_host_cache_model.cc +++ b/mindspore/lite/src/delegate/parameter_cache/load_host_cache_model.cc @@ -29,6 +29,13 @@ constexpr size_t kGatherInputsSize = 3; } namespace mindspore { namespace cache { +HostCacheModel::~HostCacheModel() { + if (cache_model_ != nullptr) { + delete cache_model_; + MS_LOG(ERROR) << "delete cache_model_"; + cache_model_ = nullptr; + } +} MSTensor *SchemaTensorToMSTensor(lite::SchemaTensorWrapper *schema_tensor_wrapper, mindspore::schema::Tensor *schema_tensor) { std::vector shape; diff --git a/mindspore/lite/src/delegate/parameter_cache/load_host_cache_model.h b/mindspore/lite/src/delegate/parameter_cache/load_host_cache_model.h index 91a23c2121e..cb414bcb702 100644 --- a/mindspore/lite/src/delegate/parameter_cache/load_host_cache_model.h +++ b/mindspore/lite/src/delegate/parameter_cache/load_host_cache_model.h @@ -30,7 +30,8 @@ namespace mindspore { namespace cache { class HostCacheModel { public: - HostCacheModel() {} + HostCacheModel() = default; + ~HostCacheModel(); Status LoadCache(const std::string &model_path); Status LoadCache(DelegateModel *model); bool CheckIsCacheKernel(kernel::Kernel *kernel); diff --git a/mindspore/lite/src/delegate/tensorrt/op/matmul_tensorrt.cc b/mindspore/lite/src/delegate/tensorrt/op/matmul_tensorrt.cc index 8348dfe5b56..96909450d5f 100644 --- a/mindspore/lite/src/delegate/tensorrt/op/matmul_tensorrt.cc +++ b/mindspore/lite/src/delegate/tensorrt/op/matmul_tensorrt.cc @@ -49,6 +49,12 @@ int MatMulTensorRT::AddInnerOp(nvinfer1::INetworkDefinition *network) { transpose_b_ = primitive->transpose_b() ? nvinfer1::MatrixOperation::kTRANSPOSE : nvinfer1::MatrixOperation::kNONE; activation_ = primitive->activation_type(); } else if (type_ == schema::PrimitiveType_FullConnection) { + auto primitive = this->GetPrimitive()->value_as_FullConnection(); + if (primitive == nullptr) { + MS_LOG(ERROR) << "convert to primitive FullConnection failed for " << op_name_; + return RET_ERROR; + } + activation_ = primitive->activation_type(); transpose_a_ = nvinfer1::MatrixOperation::kNONE; transpose_b_ = nvinfer1::MatrixOperation::kTRANSPOSE; } diff --git a/mindspore/lite/src/delegate/tensorrt/tensorrt_serializer.cc b/mindspore/lite/src/delegate/tensorrt/tensorrt_serializer.cc index 88138b8f99d..6da86f48785 100644 --- a/mindspore/lite/src/delegate/tensorrt/tensorrt_serializer.cc +++ b/mindspore/lite/src/delegate/tensorrt/tensorrt_serializer.cc @@ -36,6 +36,7 @@ nvinfer1::ICudaEngine *TensorRTSerializer::GetSerializedEngine() { return nullptr; } nvinfer1::ICudaEngine *engine = runtime->deserializeCudaEngine(trt_model_stream, size, nullptr); + delete[] trt_model_stream; runtime->destroy(); return engine; } diff --git a/mindspore/lite/src/lite_session.cc b/mindspore/lite/src/lite_session.cc index 5f4e0ee7c54..3f97d508405 100644 --- a/mindspore/lite/src/lite_session.cc +++ b/mindspore/lite/src/lite_session.cc @@ -940,6 +940,8 @@ int LiteSession::DelegateInit() { int LiteSession::Init(InnerContext *context) { bool expected = false; if (!is_running_.compare_exchange_strong(expected, true)) { + delete context; + context = nullptr; MS_LOG(ERROR) << "Not support multi-threading"; return RET_ERROR; } @@ -1784,6 +1786,7 @@ int lite::LiteSession::LoadModelAndCompileByPath(const std::string &model_path, (reinterpret_cast(model))->set_keep_model_buf(true); auto ret = CompileGraph(model); if (ret != lite::RET_OK) { + delete model; MS_LOG(ERROR) << "Compile model failed"; return RET_ERROR; } @@ -1808,6 +1811,7 @@ int lite::LiteSession::LoadModelAndCompileByPath(const std::string &model_path, (reinterpret_cast(model))->set_keep_model_buf(true); auto ret = CompileGraph(model); if (ret != lite::RET_OK) { + delete model; MS_LOG(ERROR) << "Compile model failed"; return RET_ERROR; } diff --git a/mindspore/lite/src/scheduler.cc b/mindspore/lite/src/scheduler.cc index 1dc92dea21e..06f0195726e 100644 --- a/mindspore/lite/src/scheduler.cc +++ b/mindspore/lite/src/scheduler.cc @@ -515,7 +515,12 @@ int Scheduler::InitDelegateKernels(std::vector *dst_kernel } auto ret = ReplaceDelegateKernels(&tmp_kernels); if (ret != RET_OK) { - MS_LOG(ERROR) << "NPU delegate replace delegate kernels failed."; + dst_kernels->insert(dst_kernels->end(), src_kernels.begin(), src_kernels.end()); + dst_kernels->insert(dst_kernels->end(), tmp_kernels.begin(), tmp_kernels.end()); + if (remain_kernel != nullptr) { + dst_kernels->push_back(remain_kernel); + } + MS_LOG(ERROR) << "Inner delegate replace delegate kernels failed."; return ret; }