From 4613066b0d1ed86e8a2c96002fe8f8b2af5332c1 Mon Sep 17 00:00:00 2001 From: mengyuanli Date: Tue, 1 Jun 2021 21:28:38 +0800 Subject: [PATCH] fix bug of tensorlist ans lite session --- .../cpu/nnacl/infer/tensorlist_setitem_infer.c | 2 +- mindspore/lite/src/lite_session.cc | 2 +- .../src/runtime/kernel/arm/base/tensorlist_getitem.cc | 6 +++--- mindspore/lite/tools/common/node_util.h | 8 +++++++- .../legacy_optimizer/graph/subgraph_tensor_pass.cc | 8 ++++++++ .../legacy_optimizer/graph/topological_sort_pass.cc | 5 +++-- 6 files changed, 23 insertions(+), 8 deletions(-) diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.c index 987d616ae36..8da58f6c13c 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.c @@ -18,7 +18,7 @@ #include "nnacl/infer/infer_register.h" int PreJudge(const TensorC *get_index, TensorListC *input0, const TensorC *value_tensor) { - if (get_index->data_ == NULL || value_tensor->data_ == NULL) { + if (get_index->data_ == NULL) { return NNACL_INFER_INVALID; } diff --git a/mindspore/lite/src/lite_session.cc b/mindspore/lite/src/lite_session.cc index 732e3db5e65..c71a8469570 100644 --- a/mindspore/lite/src/lite_session.cc +++ b/mindspore/lite/src/lite_session.cc @@ -376,7 +376,7 @@ void LiteSession::FreePackOpWeight(const std::vector &kern auto inputs = kernel->in_tensors(); for (auto *tensor : inputs) { MS_ASSERT(tensor != nullptr); - if (!tensor->IsConst() || tensor->init_ref_count() != 1) { + if (!tensor->IsConst()) { continue; } tensor->FreeData(); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/tensorlist_getitem.cc b/mindspore/lite/src/runtime/kernel/arm/base/tensorlist_getitem.cc index b14b07d1143..680b0f46654 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/tensorlist_getitem.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/tensorlist_getitem.cc @@ -42,9 +42,9 @@ int TensorListGetItemCPUKernel::Run() { dtype_ = input0->tensors_data_type(); MS_ASSERT(in_tensors_.at(1)->data_c() != nullptr); index_ = reinterpret_cast(in_tensors_.at(1)->data_c())[0]; - int dim0 = input0->ElementsNum() - 1; - if (index_ < 0 || index_ > dim0) { - MS_LOG(ERROR) << "index tensor:[" << index_ << "] must be in [0, " << dim0 << "]!"; + int dim0 = input0->ElementsNum(); + if (index_ < 0 || index_ >= dim0) { + MS_LOG(ERROR) << "index tensor:[" << index_ << "] must be in [0, " << dim0 << ")!"; return RET_ERROR; } auto src_ptr = input0->GetTensor(index_); diff --git a/mindspore/lite/tools/common/node_util.h b/mindspore/lite/tools/common/node_util.h index 314da61726d..7fcba451927 100644 --- a/mindspore/lite/tools/common/node_util.h +++ b/mindspore/lite/tools/common/node_util.h @@ -48,7 +48,13 @@ STATUS BroadCastQuantParam(schema::MetaGraphT *graphT, const std::unique_ptr &inputs, std::vector *outputs); -inline schema::PrimitiveType GetCNodeTType(const schema::CNodeT &cNodeT) { return cNodeT.primitive->value.type; } +inline schema::PrimitiveType GetCNodeTType(const schema::CNodeT &cNodeT) { + if (cNodeT.primitive != nullptr) { + return cNodeT.primitive->value.type; + } else { + return schema::PrimitiveType_NONE; + } +} inline std::string GetCNodeTTypeName(const schema::CNodeT &cNodeT) { return schema::EnumNamePrimitiveType(GetCNodeTType(cNodeT)); diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_tensor_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_tensor_pass.cc index f2294dbd97e..135f81fb6d9 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_tensor_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_tensor_pass.cc @@ -35,6 +35,14 @@ bool SubgraphTensorPass::IsUsing(schema::MetaGraphT *graph, const uint32_t &tens return true; } } + for (const auto &subgraph : graph->subGraph) { + if (IsContain(subgraph->inputIndices, tensor_idx)) { + return true; + } + if (IsContain(subgraph->outputIndices, tensor_idx)) { + return true; + } + } return false; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc index ff891c9b0f6..16e0dd35eaa 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc @@ -31,7 +31,8 @@ STATUS TopologicalSortPass::Run(schema::MetaGraphT *graph) { std::vector sinked_tensor_idxes; // put all const tensor index into sinked_tensor_idxes for (size_t i = 0; i < graph->allTensors.size(); i++) { - if (graph->allTensors.at(i)->nodeType == NodeType_ValueNode) { + if (graph->allTensors.at(i)->nodeType == NodeType_ValueNode || + graph->allTensors.at(i)->nodeType == NodeType_Parameter) { sinked_tensor_idxes.insert(sinked_tensor_idxes.end(), i); } } @@ -80,7 +81,7 @@ STATUS TopologicalSortPass::Run(schema::MetaGraphT *graph) { bool TopologicalSortPass::IsNodeNonDepend(const std::unique_ptr &node, const std::vector &sinked_tensor_idxes) { MS_ASSERT(node != nullptr); - if (node->primitive->value.type == schema::PrimitiveType_Merge) { + if (node->primitive && node->primitive->value.type == schema::PrimitiveType_Merge) { auto node_input_index = node->inputIndex; MS_ASSERT(node_input_index.size() % 2 == 0); return std::all_of(node_input_index.begin(), node_input_index.begin() + node_input_index.size() / 2,