!20317 [MSLITE][DEVELOP] sync r1.3 fix bug: delegate model input & output, affinity mode value

Merge pull request !20317 from yangruoqi713/master_bug
This commit is contained in:
i-robot 2021-07-15 06:12:57 +00:00 committed by Gitee
commit 6e4db2a7ea
9 changed files with 43 additions and 7 deletions

View File

@ -39,7 +39,7 @@ struct Context::Data {
int32_t thread_num = 2; int32_t thread_num = 2;
bool enable_parallel_ = false; bool enable_parallel_ = false;
std::vector<int32_t> affinity_core_list_; std::vector<int32_t> affinity_core_list_;
int affinity_mode_ = 2; int affinity_mode_ = 0;
std::shared_ptr<Delegate> delegate = nullptr; std::shared_ptr<Delegate> delegate = nullptr;
}; };
@ -80,6 +80,7 @@ void Context::SetThreadNum(int32_t thread_num) {
} }
data_->thread_num = thread_num; data_->thread_num = thread_num;
} }
int32_t Context::GetThreadNum() const { int32_t Context::GetThreadNum() const {
if (data_ == nullptr) { if (data_ == nullptr) {
MS_LOG(ERROR) << "Invalid context."; MS_LOG(ERROR) << "Invalid context.";
@ -111,9 +112,9 @@ void Context::SetThreadAffinity(int mode) {
return; return;
} }
data_->affinity_mode_ = mode; data_->affinity_mode_ = mode;
return; return;
} }
int Context::GetThreadAffinityMode() const { int Context::GetThreadAffinityMode() const {
if (data_ == nullptr) { if (data_ == nullptr) {
MS_LOG(ERROR) << "Invalid context."; MS_LOG(ERROR) << "Invalid context.";
@ -131,6 +132,7 @@ void Context::SetThreadAffinity(const std::vector<int> &core_list) {
return; return;
} }
std::vector<int32_t> Context::GetThreadAffinityCoreList() const { std::vector<int32_t> Context::GetThreadAffinityCoreList() const {
if (data_ == nullptr) { if (data_ == nullptr) {
MS_LOG(ERROR) << "Invalid context."; MS_LOG(ERROR) << "Invalid context.";
@ -221,6 +223,7 @@ void CPUDeviceInfo::SetEnableFP16(bool is_fp16) {
} }
data_->params[kModelOptionCpuEnableFP16] = is_fp16; data_->params[kModelOptionCpuEnableFP16] = is_fp16;
} }
bool CPUDeviceInfo::GetEnableFP16() const { bool CPUDeviceInfo::GetEnableFP16() const {
if (data_ == nullptr) { if (data_ == nullptr) {
MS_LOG(ERROR) << "Invalid context."; MS_LOG(ERROR) << "Invalid context.";
@ -251,6 +254,7 @@ void KirinNPUDeviceInfo::SetFrequency(int frequency) {
} }
data_->params[kModelOptionKirinNpuFrequency] = frequency; data_->params[kModelOptionKirinNpuFrequency] = frequency;
} }
int KirinNPUDeviceInfo::GetFrequency() const { int KirinNPUDeviceInfo::GetFrequency() const {
if (data_ == nullptr) { if (data_ == nullptr) {
MS_LOG(ERROR) << "Invalid context."; MS_LOG(ERROR) << "Invalid context.";

View File

@ -60,6 +60,11 @@ Status A2L_ConvertContext(Context *a_context, lite::Context *l_context) {
cpu_context->SetAllocator(l_context->allocator); cpu_context->SetAllocator(l_context->allocator);
} }
if (!IsAffinityModeValid(a_context->GetThreadAffinityMode())) {
MS_LOG(ERROR)
<< "Invalid affinity mode, only supports 0: no affinities, 1: big cores first, 2: little cores first.";
return kLiteInputParamInvalid;
}
lite::CpuBindMode mode = A2L_ConvertAffinityMode(a_context->GetThreadAffinityMode()); lite::CpuBindMode mode = A2L_ConvertAffinityMode(a_context->GetThreadAffinityMode());
lite::DeviceInfo cpu_info = {0}; lite::DeviceInfo cpu_info = {0};

View File

@ -55,6 +55,10 @@ inline lite::CpuBindMode A2L_ConvertAffinityMode(int affinity_mode) {
} }
} }
inline bool IsAffinityModeValid(int affinity_mode) {
return affinity_mode >= lite::NO_BIND && affinity_mode <= lite::MID_CPU;
}
Status A2L_ConvertContext(Context *a_context, lite::Context *l_context); Status A2L_ConvertContext(Context *a_context, lite::Context *l_context);
Status A2L_ConvertConfig(const TrainCfg *a_train_cfg, lite::TrainCfg *l_train_cfg); Status A2L_ConvertConfig(const TrainCfg *a_train_cfg, lite::TrainCfg *l_train_cfg);

View File

@ -22,6 +22,14 @@
#include "src/delegate/npu/transpose_kernel.h" #include "src/delegate/npu/transpose_kernel.h"
namespace mindspore { namespace mindspore {
NPUGraph::~NPUGraph() { NPUGraph::~NPUGraph() {
for (int i = 0; i < all_kernels_.size(); i++) {
for (auto output : all_kernels_[i]->outputs()) {
if (find(outputs_.begin(), outputs_.end(), output) != outputs_.end()) {
free(output.MutableData());
output.SetData(nullptr);
}
}
}
for (auto *kernel : all_kernels_) { for (auto *kernel : all_kernels_) {
delete kernel; delete kernel;
} }

View File

@ -30,6 +30,7 @@ InnerContext::InnerContext(const Context *context) {
this->allocator = context->allocator; this->allocator = context->allocator;
this->thread_num_ = context->thread_num_; this->thread_num_ = context->thread_num_;
this->enable_parallel_ = context->enable_parallel_; this->enable_parallel_ = context->enable_parallel_;
this->affinity_core_list_ = context->affinity_core_list_;
SetContextDevice(context); SetContextDevice(context);
#if defined(ENABLE_ARM) && defined(ENABLE_FP16) #if defined(ENABLE_ARM) && defined(ENABLE_FP16)
CpuInfo cpu_info; CpuInfo cpu_info;

View File

@ -710,6 +710,7 @@ void LiteSession::BindThread(bool if_bind) {
} }
LiteSession::~LiteSession() { LiteSession::~LiteSession() {
delegate_.reset();
bool expected = false; bool expected = false;
if (!is_running_.compare_exchange_strong(expected, true)) { if (!is_running_.compare_exchange_strong(expected, true)) {
MS_LOG(ERROR) << "Not support multi-threading"; MS_LOG(ERROR) << "Not support multi-threading";

View File

@ -112,7 +112,12 @@ void MindrtExecutor::TransferGraphOutput() {
reinterpret_cast<float *>(dst_tensor->data_c()), dst_tensor->ElementsNum()); reinterpret_cast<float *>(dst_tensor->data_c()), dst_tensor->ElementsNum());
} else { } else {
dst_tensor->set_data(src_tensor->data()); dst_tensor->set_data(src_tensor->data());
src_tensor->set_data(nullptr); if (src_tensor->own_data() == true && src_tensor->allocator() == nullptr) {
dst_tensor->set_own_data(false);
src_tensor->IncRefCount();
} else {
src_tensor->set_data(nullptr);
}
} }
src_tensor->DecRefCount(); src_tensor->DecRefCount();
} }
@ -128,8 +133,12 @@ void MindrtExecutor::FreeOutputTensor() {
} else { } else {
if (dst_tensor->data_type() == src_tensor->data_type()) { if (dst_tensor->data_type() == src_tensor->data_type()) {
/* user set graph-output-tensor from outside */ /* user set graph-output-tensor from outside */
src_tensor->set_data(dst_tensor->data()); if (dst_tensor->data() == nullptr || dst_tensor->own_data() == false) {
src_tensor->set_own_data(false); src_tensor->set_own_data(true);
} else {
src_tensor->set_data(dst_tensor->data());
src_tensor->set_own_data(false);
}
src_tensor->set_allocator(nullptr); src_tensor->set_allocator(nullptr);
} }
} }

View File

@ -172,9 +172,11 @@ int Scheduler::ReplaceDelegateKernels(std::vector<kernel::LiteKernel *> *dst_ker
kernels.push_back((*dst_kernels)[i]->kernel()); kernels.push_back((*dst_kernels)[i]->kernel());
} }
ms_inputs_ = LiteTensorsToMSTensors(inputs_);
ms_outputs_ = LiteTensorsToMSTensors(outputs_);
auto schema_version = static_cast<SchemaVersion>(VersionManager::GetInstance()->GetSchemaVersion()); auto schema_version = static_cast<SchemaVersion>(VersionManager::GetInstance()->GetSchemaVersion());
DelegateModel *model = new (std::nothrow) DelegateModel( DelegateModel *model =
&kernels, LiteTensorsToMSTensors(inputs_), LiteTensorsToMSTensors(outputs_), primitives_, schema_version); new (std::nothrow) DelegateModel(&kernels, ms_inputs_, ms_outputs_, primitives_, schema_version);
if (model == nullptr) { if (model == nullptr) {
MS_LOG(ERROR) << "New delegate model failed."; MS_LOG(ERROR) << "New delegate model failed.";
return RET_NULL_PTR; return RET_NULL_PTR;

View File

@ -124,6 +124,8 @@ class Scheduler {
std::vector<Tensor *> *src_tensors_; std::vector<Tensor *> *src_tensors_;
const std::vector<Tensor *> &inputs_; const std::vector<Tensor *> &inputs_;
const std::vector<Tensor *> &outputs_; const std::vector<Tensor *> &outputs_;
std::vector<mindspore::MSTensor> ms_inputs_;
std::vector<mindspore::MSTensor> ms_outputs_;
std::vector<size_t> graph_output_node_indexes_; std::vector<size_t> graph_output_node_indexes_;
std::map<int, OpParameter *> op_parameters_; std::map<int, OpParameter *> op_parameters_;
bool is_train_session_ = false; bool is_train_session_ = false;