fix dynamic shape predict error

This commit is contained in:
sjtujayyyy 2022-10-12 15:08:51 +08:00 committed by Steven
parent a0cc9babbf
commit 8f5d7db2d7
4 changed files with 20 additions and 12 deletions

View File

@ -350,9 +350,6 @@ STATUS ModelProcess::UnLoad() {
}
STATUS ModelProcess::SetBatchSize(const std::vector<KernelTensorPtr> &inputs) {
for (size_t i = 0; i < inputs.size(); i++) {
input_infos_[i].buffer_size = inputs[i]->GetData()->size;
}
auto batch_size_tensor = inputs[inputs.size() - 1];
size_t data_type_size = lite::DataTypeSize(batch_size_tensor->GetDtype());
size_t num = 0;
@ -383,9 +380,6 @@ STATUS ModelProcess::SetBatchSize(const std::vector<KernelTensorPtr> &inputs) {
}
STATUS ModelProcess::SetImageSize(const std::vector<KernelTensorPtr> &inputs) {
for (size_t i = 0; i < inputs.size(); i++) {
input_infos_[i].buffer_size = inputs[i]->GetData()->size;
}
auto image_size_tensor = inputs[inputs.size() - 1];
size_t data_type_size = lite::DataTypeSize(image_size_tensor->GetDtype());
size_t num = 0;
@ -471,6 +465,14 @@ bool ModelProcess::IsDynamicBatchSize() { return !GetDynamicBatch().empty(); }
bool ModelProcess::IsDynamicImageSize() { return !GetDynamicImage().empty(); }
void ModelProcess::UpdateBufferSize(const std::vector<KernelTensorPtr> &inputs) {
if (IsDynamicShape()) {
for (size_t i = 0; i < inputs.size(); i++) {
input_infos_[i].buffer_size = inputs[i]->GetData()->size;
}
}
}
STATUS ModelProcess::CheckAndInitInput(const std::vector<KernelTensorPtr> &inputs) {
aclError ret;
inputs_ = aclmdlCreateDataset();
@ -479,6 +481,7 @@ STATUS ModelProcess::CheckAndInitInput(const std::vector<KernelTensorPtr> &input
MS_LOG(ERROR) << "Check input tensor failed.";
return lite::RET_ERROR;
}
UpdateBufferSize(inputs);
// copy inputs
for (size_t i = 0; i < input_infos_.size(); ++i) {
auto &info = input_infos_[i];

View File

@ -90,6 +90,7 @@ class ModelProcess {
void DestroyInputsDataMem();
void DestroyInputsBuffer();
void DestroyOutputsBuffer();
void UpdateBufferSize(const std::vector<KernelTensorPtr> &inputs);
AclModelOptionsPtr options_;
uint32_t model_id_;

View File

@ -356,9 +356,6 @@ STATUS ModelProcess::UnLoad() {
}
STATUS ModelProcess::SetBatchSize(const std::vector<mindspore::MSTensor> &inputs) {
for (size_t i = 0; i < inputs.size(); i++) {
input_infos_[i].buffer_size = inputs[i].DataSize();
}
auto batch_size_tensor = inputs[inputs.size() - 1];
size_t data_type_size = lite::DataTypeSize(static_cast<enum TypeId>(batch_size_tensor.DataType()));
size_t num = 0;
@ -389,9 +386,6 @@ STATUS ModelProcess::SetBatchSize(const std::vector<mindspore::MSTensor> &inputs
}
STATUS ModelProcess::SetImageSize(const std::vector<mindspore::MSTensor> &inputs) {
for (size_t i = 0; i < inputs.size(); i++) {
input_infos_[i].buffer_size = inputs[i].DataSize();
}
auto image_size_tensor = inputs[inputs.size() - 1];
size_t data_type_size = lite::DataTypeSize(static_cast<enum TypeId>(image_size_tensor.DataType()));
size_t num = 0;
@ -477,6 +471,14 @@ bool ModelProcess::IsDynamicBatchSize() { return !GetDynamicBatch().empty(); }
bool ModelProcess::IsDynamicImageSize() { return !GetDynamicImage().empty(); }
void ModelProcess::UpdateBufferSize(const std::vector<mindspore::MSTensor> &inputs) {
if (IsDynamicShape()) {
for (size_t i = 0; i < inputs.size(); i++) {
input_infos_[i].buffer_size = inputs[i].DataSize();
}
}
}
STATUS ModelProcess::CheckAndInitInput(const std::vector<mindspore::MSTensor> &inputs) {
aclError ret;
inputs_ = aclmdlCreateDataset();
@ -485,6 +487,7 @@ STATUS ModelProcess::CheckAndInitInput(const std::vector<mindspore::MSTensor> &i
MS_LOG(ERROR) << "Check input tensor failed.";
return lite::RET_ERROR;
}
UpdateBufferSize(inputs);
// copy inputs
for (size_t i = 0; i < input_infos_.size(); ++i) {
auto &info = input_infos_[i];

View File

@ -88,6 +88,7 @@ class ModelProcess {
void DestroyInputsDataMem();
void DestroyInputsBuffer();
void DestroyOutputsBuffer();
void UpdateBufferSize(const std::vector<mindspore::MSTensor> &inputs);
AclModelOptions options_;
uint32_t model_id_;