!23247 [MSLITE] Fix bug of tf model‘s output names.

Merge pull request !23247 from wangshaocong/convert_r1.4
This commit is contained in:
i-robot 2021-09-11 01:44:43 +00:00 committed by Gitee
commit 9baffac2d3
3 changed files with 31 additions and 17 deletions

View File

@ -209,7 +209,10 @@ int LiteSession::ConvertTensors(const lite::Model *model) {
MS_LOG(ERROR) << "Graph output shouldn't have data";
return RET_ERROR;
}
dst_tensor->set_category(Tensor::GRAPH_OUTPUT);
// a tensor is as both input and output, would be treated as an input.
if (!dst_tensor->IsGraphInput()) {
dst_tensor->set_category(Tensor::GRAPH_OUTPUT);
}
}
if (src_tensor->name() != nullptr) {
dst_tensor->set_tensor_name(src_tensor->name()->str());
@ -364,16 +367,13 @@ void LiteSession::InitGraphInOutTensorsMap(const lite::Model *model) {
InitGraphInputMap(model);
InitGraphOutputNodeMap(model);
InitGraphOutputTensorMap(model);
for (auto *tensor : this->inputs_) {
tensor->set_category(Tensor::Category::GRAPH_INPUT);
}
for (auto *tensor : this->outputs_) {
tensor->set_category(Tensor::Category::GRAPH_OUTPUT);
}
}
void LiteSession::IsolateOutputTensor() {
for (Tensor *src_tensor : outputs_) {
if (src_tensor->IsGraphInput()) {
continue;
}
Tensor *new_tensor =
new Tensor(src_tensor->data_type(), src_tensor->shape(), src_tensor->format(), Tensor::GRAPH_OUTPUT);
new_tensor->set_allocator(src_tensor->allocator()); /* GPU use opencl allocator */

View File

@ -24,16 +24,24 @@
namespace mindspore::lite {
void MindrtExecutor::PrepareInputData(const std::vector<kernel::LiteKernel *> &kernels,
const std::vector<Tensor *> &inputs) {
for (size_t i = 0; i < inputs.size(); ++i) {
for (size_t j = 0; j < kernels.size(); ++j) {
auto in_tensor_size = kernels[j]->in_tensors().size();
for (size_t k = 0; k < in_tensor_size; ++k) {
if (inputs[i] != kernels[j]->in_tensors()[k]) {
continue;
}
auto data = std::make_shared<OpData<Tensor>>(op_actors_[j]->GetAID(), inputs[i], static_cast<int>(k));
input_data_.emplace_back(data);
for (size_t j = 0; j < kernels.size(); ++j) {
auto in_tensor_size = kernels[j]->in_tensors().size();
for (size_t k = 0; k < in_tensor_size; ++k) {
auto tensor = kernels[j]->in_tensors()[k];
if (!tensor->IsGraphInput()) {
continue;
}
size_t idx = std::find(inputs.begin(), inputs.end(), tensor) - inputs.begin();
if (idx == inputs.size()) {
MS_LOG(ERROR) << "The input is not found.";
return;
}
auto data = std::make_shared<OpData<Tensor>>(op_actors_[j]->GetAID(), inputs.at(idx), static_cast<int>(k));
if (data == nullptr) {
MS_LOG(ERROR) << "new opdata failed.";
return;
}
input_data_.emplace_back(data);
}
}
}
@ -42,6 +50,9 @@ void MindrtExecutor::PrepareOutputData(const std::vector<kernel::LiteKernel *> &
const std::vector<Tensor *> &outputs) {
for (size_t i = 0; i < outputs.size(); ++i) {
Tensor *graph_output_tensor = outputs[i];
if (graph_output_tensor->IsGraphInput()) {
continue;
}
auto current_output_map =
std::find_if(output_tensor_map_->begin(), output_tensor_map_->end(), [&](const auto output_map_tensor) {
if (graph_output_tensor == output_map_tensor.second) {

View File

@ -1059,7 +1059,10 @@ STATUS TFModelParser::ConvertOps(const tensorflow::NodeDef &node_def,
MS_ASSERT(func_graph_ptr != nullptr);
STATUS status = RET_OK;
const auto &op_type = node_def.op();
if (op_type == "Placeholder" || op_type == "Const" || op_type == "Identity" || op_type == "StopGradient") {
if (op_type == "Identity" || op_type == "StopGradient") {
return RET_OK;
} else if (op_type == "Placeholder" || op_type == "Const") {
node_output_num_[node_def.name()] = 1;
return RET_OK;
}