From ca4a9fad7087e862a238ceff16e4ee663e76af33 Mon Sep 17 00:00:00 2001 From: caifubi Date: Thu, 15 Dec 2022 09:59:30 +0800 Subject: [PATCH] bugfix for PyNative cache key --- mindspore/ccsrc/backend/graph_compiler/backend.cc | 7 +++++-- .../device/ascend/kernel/acl/acl_kernel_utils.cc | 8 ++++---- mindspore/ccsrc/utils/anfalgo.cc | 12 +++++++++++- mindspore/core/ir/tensor.h | 2 +- 4 files changed, 21 insertions(+), 8 deletions(-) diff --git a/mindspore/ccsrc/backend/graph_compiler/backend.cc b/mindspore/ccsrc/backend/graph_compiler/backend.cc index 32e0b9e818a..7c7faa286ff 100644 --- a/mindspore/ccsrc/backend/graph_compiler/backend.cc +++ b/mindspore/ccsrc/backend/graph_compiler/backend.cc @@ -551,6 +551,7 @@ TensorPtr CreateOutputTensor(const AnfNodePtr &output_node, size_t output_index) void MindRTBackend::RunGraphByActors(const ActorInfo &actor_info, const GraphCompilerInfo &graph_compiler_info, const VectorRef &args, VectorRef *outputs) { + MS_LOG(INFO) << "Start"; WaitTaskFinish(); auto inputs = GetRunGraphInputs(graph_compiler_info, args); MS_EXCEPTION_IF_NULL(graph_compiler_); @@ -666,6 +667,7 @@ void MindRTBackend::RunGraphBySingleOp(const GraphCompilerInfo &graph_compiler_i auto &op_executor = runtime::OpExecutor::GetInstance(); op_executor.Register([this]() { BatchBuildCallback(); }); + MS_LOG(INFO) << "Start"; MS_EXCEPTION_IF_NULL(graph_compiler_); const auto &graphs = graph_compiler_info.graphs_; auto inputs = GetRunGraphInputs(graph_compiler_info, args); @@ -748,6 +750,7 @@ void MindRTBackend::RunGraphBySingleOp(const GraphCompilerInfo &graph_compiler_i if (is_dynamic_ || root_graph_->has_flag(kFlagUseDynamicShapeProcess)) { ClearResource(); } + MS_LOG(INFO) << "End"; } void MindRTBackend::RunGraphByCondition(const ActorInfo &actor_info, const GraphCompilerInfo &graph_compiler_info, @@ -1327,7 +1330,7 @@ void MindRTBackend::RunOpImplDynamic(bool single_op_cache_hit, const OpCompilerI void MindRTBackend::RunOp(const session::BackendOpRunInfoPtr &op_run_info, VectorRef *outputs) { MS_EXCEPTION_IF_NULL(op_run_info); MS_EXCEPTION_IF_NULL(graph_compiler_); - MS_LOG(DEBUG) << "RunOp start " << op_run_info->base_op_run_info.op_name; + MS_LOG(INFO) << "RunOp start " << op_run_info->base_op_run_info.op_name; // Get the device context. const auto &device_context = device::DeviceContextManager::GetInstance().GetOrCreateDeviceContext({device_name_, device_id_}); @@ -1356,7 +1359,7 @@ void MindRTBackend::RunOp(const session::BackendOpRunInfoPtr &op_run_info, Vecto void MindRTBackend::RunOpDynamic(const session::BackendOpRunInfoPtr &op_run_info, VectorRef *outputs) { MS_EXCEPTION_IF_NULL(op_run_info); MS_EXCEPTION_IF_NULL(graph_compiler_); - MS_LOG(DEBUG) << "RunOp start " << op_run_info->base_op_run_info.op_name; + MS_LOG(INFO) << "RunOp start " << op_run_info->base_op_run_info.op_name; // Get the device context. const auto &device_context = device::DeviceContextManager::GetInstance().GetOrCreateDeviceContext({device_name_, device_id_}); diff --git a/mindspore/ccsrc/plugin/device/ascend/kernel/acl/acl_kernel_utils.cc b/mindspore/ccsrc/plugin/device/ascend/kernel/acl/acl_kernel_utils.cc index 52459c25892..2d0d84b5224 100644 --- a/mindspore/ccsrc/plugin/device/ascend/kernel/acl/acl_kernel_utils.cc +++ b/mindspore/ccsrc/plugin/device/ascend/kernel/acl/acl_kernel_utils.cc @@ -456,8 +456,8 @@ void AclOpDesc::AddConstInputTensor(const AnfNodePtr &anf_node) { } auto value = prim->GetAttr(attr_name); if (value == nullptr) { - MS_LOG(WARNING) << "Attr name " << attr_name - << " isn't in current node, please check adaptor's attr name and index:" << index; + MS_LOG(INFO) << "Attr name " << attr_name + << " isn't in current node, please check adaptor's attr name and index:" << index; continue; } ProcessAclAttrs(attr_name, value, SET_ACL_INPUT); @@ -561,8 +561,8 @@ std::vector AclUtils::GetInputTensorDesc(const AnfNodePtr &anf_ const auto &add_index_info = GeOpConvertor::GetNeedAddInput(anf_node, true); for (const auto &[attr_name, index] : add_index_info) { if (already_add_index.count(index) != 0) { - MS_LOG(WARNING) << "Current node's input " << index - << " is convert from attr, but already set input, please check adaptor of attr " << attr_name; + MS_LOG(INFO) << "Current node's input " << index + << " is convert from attr, but already set input, please check adaptor of attr " << attr_name; } } return res; diff --git a/mindspore/ccsrc/utils/anfalgo.cc b/mindspore/ccsrc/utils/anfalgo.cc index d257cbe1106..2ad1dee0d40 100644 --- a/mindspore/ccsrc/utils/anfalgo.cc +++ b/mindspore/ccsrc/utils/anfalgo.cc @@ -1700,11 +1700,17 @@ std::string AnfAlgo::GetTensorValueString(const tensor::TensorPtr &tensor) { auto dtype = tensor->Dtype(); MS_EXCEPTION_IF_NULL(dtype); size_t data_size = tensor->DataSize(); + auto shape = tensor->shape(); std::ostringstream buf; - auto fn = [&buf, data_size](auto addr) { + auto fn = [&buf, data_size, &shape](auto addr) { + // Tensor value. + buf << "v"; for (size_t i = 0; i < data_size; ++i) { buf << *(addr + i) << ","; } + // Tensor shape is necessary. + // For example, the value of ones[3x4] and ones[4x3] are the same, but the shape is different. + buf << "s" << tensor::ShapeToString(shape); }; if (dtype->type_id() == kNumberTypeBool) { @@ -1717,8 +1723,12 @@ std::string AnfAlgo::GetTensorValueString(const tensor::TensorPtr &tensor) { fn(reinterpret_cast(tensor->data_c())); } else if (dtype->type_id() == kNumberTypeInt16) { fn(reinterpret_cast(tensor->data_c())); + } else if (dtype->type_id() == kNumberTypeUInt16) { + fn(reinterpret_cast(tensor->data_c())); } else if (dtype->type_id() == kNumberTypeInt32) { fn(reinterpret_cast(tensor->data_c())); + } else if (dtype->type_id() == kNumberTypeUInt32) { + fn(reinterpret_cast(tensor->data_c())); } else if (dtype->type_id() == kNumberTypeInt64) { fn(reinterpret_cast(tensor->data_c())); } else if (dtype->type_id() == kNumberTypeFloat16) { diff --git a/mindspore/core/ir/tensor.h b/mindspore/core/ir/tensor.h index 18e144e07b7..f82da1dfa20 100644 --- a/mindspore/core/ir/tensor.h +++ b/mindspore/core/ir/tensor.h @@ -981,7 +981,7 @@ class MS_CORE_API RowTensor : public MetaSparseTensor { using RowTensorPtr = std::shared_ptr; // Convert shape vector to string. -std::string ShapeToString(const ShapeVector &shape); +MS_CORE_API std::string ShapeToString(const ShapeVector &shape); } // namespace tensor } // namespace mindspore