!10339 [MSLITE][DEVELOP] fix bug of npu benchmark

From: @yangruoqi713
Reviewed-by: @zhang_xue_tong,@hangangqiang
Signed-off-by: @zhang_xue_tong
This commit is contained in:
mindspore-ci-bot 2020-12-22 19:41:38 +08:00 committed by Gitee
commit 5d4a308aca
7 changed files with 17 additions and 8 deletions

View File

@ -19,6 +19,11 @@
#include "src/runtime/agent/npu/npu_manager.h"
#include "nnacl/pack.h"
namespace mindspore::lite {
NPUExecutor::~NPUExecutor() {
npu_input_tensors_.clear();
npu_output_tensors_.clear();
}
int NPUExecutor::Prepare(const std::vector<kernel::LiteKernel *> &kernels) {
this->client_ = mindspore::lite::NPUManager::GetInstance()->GetClient(model_name_);
if (this->client_ == nullptr) {

View File

@ -28,7 +28,7 @@ namespace mindspore::lite {
class NPUExecutor : public Executor {
public:
explicit NPUExecutor(const std::string &model_name) { this->model_name_ = model_name; }
~NPUExecutor() override = default;
~NPUExecutor() override;
int Prepare(const std::vector<kernel::LiteKernel *> &kernels) override;
int Run(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,

View File

@ -70,6 +70,7 @@ void NPUManager::Reset() {
}
models_.clear();
for (auto client : clients_) {
client->UnLoadModel();
client.reset();
}
clients_.clear();
@ -213,7 +214,6 @@ int NPUManager::LoadModel(const std::shared_ptr<hiai::AiModelMngerClient> &clien
}
for (const auto &desc : desc_list) {
MS_LOG(ERROR) << desc->GetName();
auto it = models_.find(desc->GetName());
it->second->is_loaded = true;
it->second->client_ = client;

View File

@ -42,7 +42,7 @@ struct SubGraphModel {
bool is_loaded = false;
int index_;
std::string model_name_;
domi::ModelBufferData *model_buffer_data_;
std::shared_ptr<domi::ModelBufferData> model_buffer_data_;
std::shared_ptr<hiai::AiModelMngerClient> client_;
std::shared_ptr<hiai::AiModelDescription> desc_;
};

View File

@ -55,7 +55,6 @@ void NPUFusionPass::UpdatePreKernels(kernel::LiteKernel *cur_kernel) {
}
}
cur_kernel->set_in_kernels(cur_in_kernels);
kernels->erase(find(kernels->begin(), kernels->end(), in_kernel));
}
}
@ -80,7 +79,6 @@ void NPUFusionPass::UpdatePostKernels(kernel::LiteKernel *cur_kernel) {
}
}
cur_kernel->set_out_kernels(cur_out_kernels);
kernels->erase(find(kernels->begin(), kernels->end(), out_kernel));
}
}
@ -191,10 +189,8 @@ int NPUFusionPass::FormatFusion(kernel::LiteKernel *kernel) {
post_kernel->set_in_kernels(post_in_kernels);
pre_out_kernels.push_back(post_kernel);
}
kernels->erase(find(kernels->begin(), kernels->end(), nc2nh));
}
pre_kernel->set_out_kernels(pre_out_kernels);
kernels->erase(find(kernels->begin(), kernels->end(), kernel));
return RET_OK;
}

View File

@ -33,6 +33,14 @@ namespace mindspore::kernel {
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
SubGraphNpuKernel::~SubGraphNpuKernel() {
subgraph_input_op_.clear();
subgraph_output_op_.clear();
if (executor_ != nullptr) {
delete executor_;
}
}
domi::ModelBufferData *SubGraphNpuKernel::BuildIRModel() {
ge::Graph graph("NPUGraph");

View File

@ -36,7 +36,7 @@ class SubGraphNpuKernel : public SubGraphKernel {
subgraph_type_ = kNpuSubGraph;
}
~SubGraphNpuKernel() override = default;
~SubGraphNpuKernel() override;
int Init() override;