ignore the internal parameter output in actor runtime

This commit is contained in:
limingqi107 2021-07-01 20:26:53 +08:00
parent ea82e018e8
commit 823fb50be5
3 changed files with 18 additions and 4 deletions

View File

@ -105,7 +105,7 @@ bool DatasetIteratorKernel::ReadDevice(void **addr, size_t *len) {
MS_LOG(INFO) << "Waiting for data...(" << repeat << " / 10)";
continue;
} else {
MS_LOG(ERROR) << "Get data timeout";
MS_LOG(EXCEPTION) << "Get data timeout";
if (profiling_enable_) {
uint64_t end_time_stamp = profiling_op_->GetTimeStamp();
profiling_op_->RecordData(queue_size, start_time_stamp, end_time_stamp);

View File

@ -416,7 +416,9 @@ void KernelActor::EraseInput(OpContext<DeviceTensor> *context) {
auto ret = input_op_datas_.erase(context->sequential_num_);
if (ret == 0) {
std::string error_info = "Erase input data failed: " + GetAID().Name();
SET_OPCONTEXT_FAIL_RET_WITH_ERROR_BY_STRATEGY(strategy_, (*context), error_info);
// The sequential num may be invalid, can't set the promise value of context.
MS_LOG(ERROR) << error_info << ", sequential_num: " << context->sequential_num_;
return;
}
}
@ -424,7 +426,9 @@ void KernelActor::EraseInput(OpContext<DeviceTensor> *context) {
auto ret = input_op_controls_.erase(context->sequential_num_);
if (ret == 0) {
std::string error_info = "Erase input controls failed: " + GetAID().Name();
SET_OPCONTEXT_FAIL_RET_WITH_ERROR_BY_STRATEGY(strategy_, (*context), error_info);
// The sequential num may be invalid, can't set the promise value of context.
MS_LOG(ERROR) << error_info << ", sequential_num: " << context->sequential_num_;
return;
}
}
}

View File

@ -798,7 +798,7 @@ void GraphScheduler::CacheGraphOutputToActor(const GraphCompilerInfo &graph_comp
<< " is device tensor store.";
continue;
} else {
MS_LOG(WARNING) << "Invalid graph output node:" << output_kernel->fullname_with_scope();
MS_LOG(INFO) << "Ignore the internal parameter node:" << output_kernel->DebugString();
continue;
}
@ -1777,6 +1777,10 @@ void GraphScheduler::LinkOutputResultArrowForOutputActor(OutputActor *to_actor,
std::set<std::vector<size_t>> unique_output_positions;
std::set<KernelWithIndex> unique_outputs;
for (const auto &output : outputs) {
if (IsInternalParameter(output.first, graph)) {
MS_LOG(INFO) << "Ignore the internal parameter node:" << output.first->DebugString();
continue;
}
unique_outputs.insert(output);
}
for (const auto &output_with_index : unique_outputs) {
@ -2916,6 +2920,12 @@ void GraphScheduler::DumpOutputActor(const OutputActor *actor, std::ofstream &of
ofs << "\t\t\toutput_node_position:" << device_tensor_store_key.first
<< "\toutput_node_name:" << device_tensor_store_key.second->fullname_with_scope() << "\n";
}
ofs << "\t\tdevice_contexts:" << actor->device_contexts_.size() << "\n ";
for (const auto &device_context : actor->device_contexts_) {
MS_EXCEPTION_IF_NULL(device_context);
ofs << "\t\t\tdevice_context:" << device_context->device_context_key().ToString() << "\n";
}
}
void GraphScheduler::DumpCopyActor(const CopyActor *actor, std::ofstream &ofs) const {