!46830 Bugfix for PyNative cache

Merge pull request !46830 from caifubi/r2.0.0-alpha
This commit is contained in:
i-robot 2022-12-16 01:18:59 +00:00 committed by Gitee
commit 39476c8100
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
4 changed files with 21 additions and 8 deletions

View File

@ -551,6 +551,7 @@ TensorPtr CreateOutputTensor(const AnfNodePtr &output_node, size_t output_index)
void MindRTBackend::RunGraphByActors(const ActorInfo &actor_info, const GraphCompilerInfo &graph_compiler_info,
const VectorRef &args, VectorRef *outputs) {
MS_LOG(INFO) << "Start";
WaitTaskFinish();
auto inputs = GetRunGraphInputs(graph_compiler_info, args);
MS_EXCEPTION_IF_NULL(graph_compiler_);
@ -666,6 +667,7 @@ void MindRTBackend::RunGraphBySingleOp(const GraphCompilerInfo &graph_compiler_i
auto &op_executor = runtime::OpExecutor::GetInstance();
op_executor.Register([this]() { BatchBuildCallback(); });
MS_LOG(INFO) << "Start";
MS_EXCEPTION_IF_NULL(graph_compiler_);
const auto &graphs = graph_compiler_info.graphs_;
auto inputs = GetRunGraphInputs(graph_compiler_info, args);
@ -748,6 +750,7 @@ void MindRTBackend::RunGraphBySingleOp(const GraphCompilerInfo &graph_compiler_i
if (is_dynamic_ || root_graph_->has_flag(kFlagUseDynamicShapeProcess)) {
ClearResource();
}
MS_LOG(INFO) << "End";
}
void MindRTBackend::RunGraphByCondition(const ActorInfo &actor_info, const GraphCompilerInfo &graph_compiler_info,
@ -1327,7 +1330,7 @@ void MindRTBackend::RunOpImplDynamic(bool single_op_cache_hit, const OpCompilerI
void MindRTBackend::RunOp(const session::BackendOpRunInfoPtr &op_run_info, VectorRef *outputs) {
MS_EXCEPTION_IF_NULL(op_run_info);
MS_EXCEPTION_IF_NULL(graph_compiler_);
MS_LOG(DEBUG) << "RunOp start " << op_run_info->base_op_run_info.op_name;
MS_LOG(INFO) << "RunOp start " << op_run_info->base_op_run_info.op_name;
// Get the device context.
const auto &device_context =
device::DeviceContextManager::GetInstance().GetOrCreateDeviceContext({device_name_, device_id_});
@ -1356,7 +1359,7 @@ void MindRTBackend::RunOp(const session::BackendOpRunInfoPtr &op_run_info, Vecto
void MindRTBackend::RunOpDynamic(const session::BackendOpRunInfoPtr &op_run_info, VectorRef *outputs) {
MS_EXCEPTION_IF_NULL(op_run_info);
MS_EXCEPTION_IF_NULL(graph_compiler_);
MS_LOG(DEBUG) << "RunOp start " << op_run_info->base_op_run_info.op_name;
MS_LOG(INFO) << "RunOp start " << op_run_info->base_op_run_info.op_name;
// Get the device context.
const auto &device_context =
device::DeviceContextManager::GetInstance().GetOrCreateDeviceContext({device_name_, device_id_});

View File

@ -456,8 +456,8 @@ void AclOpDesc::AddConstInputTensor(const AnfNodePtr &anf_node) {
}
auto value = prim->GetAttr(attr_name);
if (value == nullptr) {
MS_LOG(WARNING) << "Attr name " << attr_name
<< " isn't in current node, please check adaptor's attr name and index:" << index;
MS_LOG(INFO) << "Attr name " << attr_name
<< " isn't in current node, please check adaptor's attr name and index:" << index;
continue;
}
ProcessAclAttrs(attr_name, value, SET_ACL_INPUT);
@ -561,8 +561,8 @@ std::vector<GeTensorDescPtr> AclUtils::GetInputTensorDesc(const AnfNodePtr &anf_
const auto &add_index_info = GeOpConvertor::GetNeedAddInput(anf_node, true);
for (const auto &[attr_name, index] : add_index_info) {
if (already_add_index.count(index) != 0) {
MS_LOG(WARNING) << "Current node's input " << index
<< " is convert from attr, but already set input, please check adaptor of attr " << attr_name;
MS_LOG(INFO) << "Current node's input " << index
<< " is convert from attr, but already set input, please check adaptor of attr " << attr_name;
}
}
return res;

View File

@ -1700,11 +1700,17 @@ std::string AnfAlgo::GetTensorValueString(const tensor::TensorPtr &tensor) {
auto dtype = tensor->Dtype();
MS_EXCEPTION_IF_NULL(dtype);
size_t data_size = tensor->DataSize();
auto shape = tensor->shape();
std::ostringstream buf;
auto fn = [&buf, data_size](auto addr) {
auto fn = [&buf, data_size, &shape](auto addr) {
// Tensor value.
buf << "v";
for (size_t i = 0; i < data_size; ++i) {
buf << *(addr + i) << ",";
}
// Tensor shape is necessary.
// For example, the value of ones[3x4] and ones[4x3] are the same, but the shape is different.
buf << "s" << tensor::ShapeToString(shape);
};
if (dtype->type_id() == kNumberTypeBool) {
@ -1717,8 +1723,12 @@ std::string AnfAlgo::GetTensorValueString(const tensor::TensorPtr &tensor) {
fn(reinterpret_cast<uint8_t *>(tensor->data_c()));
} else if (dtype->type_id() == kNumberTypeInt16) {
fn(reinterpret_cast<int16_t *>(tensor->data_c()));
} else if (dtype->type_id() == kNumberTypeUInt16) {
fn(reinterpret_cast<uint16_t *>(tensor->data_c()));
} else if (dtype->type_id() == kNumberTypeInt32) {
fn(reinterpret_cast<int32_t *>(tensor->data_c()));
} else if (dtype->type_id() == kNumberTypeUInt32) {
fn(reinterpret_cast<uint32_t *>(tensor->data_c()));
} else if (dtype->type_id() == kNumberTypeInt64) {
fn(reinterpret_cast<int64_t *>(tensor->data_c()));
} else if (dtype->type_id() == kNumberTypeFloat16) {

View File

@ -981,7 +981,7 @@ class MS_CORE_API RowTensor : public MetaSparseTensor {
using RowTensorPtr = std::shared_ptr<RowTensor>;
// Convert shape vector to string.
std::string ShapeToString(const ShapeVector &shape);
MS_CORE_API std::string ShapeToString(const ShapeVector &shape);
} // namespace tensor
} // namespace mindspore