fix err log

This commit is contained in:
zhaosida 2021-12-03 14:48:56 +08:00
parent 22bf786fba
commit 13a1250a42
6 changed files with 16 additions and 15 deletions

View File

@ -171,9 +171,9 @@ void TbeUtils::SaveJsonInfo(const std::string &json_name, const std::string &inf
std::string path = config_path + kCceKernelMeta + json_name + kInfoSuffix;
auto realpath = Common::CreatePrefixPath(path);
if (!realpath.has_value()) {
MS_LOG(WARNING) << "Invalid path is: " << realpath.value()
<< "Please check (1) whether the path exists, (2) whether the path has the access "
<< "permission, (3) whether the path is too long. ";
MS_LOG(WARNING) << "Invalid environment variable '" << kCOMPILER_CACHE_PATH
<< "', the path is: " << realpath.value() << ". Please check (1) whether the path exists, "
<< "(2) whether the path has the access permission, (3) whether the path is too long. ";
return;
}
ChangeFileMode(realpath.value(), S_IWUSR);

View File

@ -1872,8 +1872,8 @@ void SessionBasic::Summary(KernelGraph *graph) {
static bool is_first = true;
if (is_first && !IsSupportSummary()) {
is_first = false;
MS_LOG(ERROR) << "The Summary operator can not collect data correctly. Detail: the data sink mode is used and the"
" sink size(in model.train() python api) is not equal to 1.";
MS_LOG(WARNING) << "The Summary operator can not collect data correctly. Detail: the data sink mode is used and the"
" sink size(in model.train() python api) is not equal to 1.";
}
SetSummaryNodes(graph);
auto summary_outputs = graph->summary_nodes();
@ -2482,7 +2482,7 @@ void PreProcessOnSplitIndex(const KernelGraphPtr &graph, vector<uint32_t> *split
if (split_index_num >= grads_count) {
MS_LOG(WARNING) << "The context configuration all_reduce_fusion_config's upper boundary value should be smaller "
<< "than total grads count: " << grads_count << ", but got: " << *split_index
<< ". Now all AllReduce operations will be fused into one AllReduce operation.";
<< ". Now all AllReduce operators will be fused into one AllReduce operator.";
split_index->clear();
split_index->push_back(grads_count - 1);
} else if (split_index_num < grads_count - 1) {

View File

@ -281,7 +281,7 @@ bool TagRaiseReduce(const std::shared_ptr<kernel::KernelBuildInfo> &kernel_build
}
if (flag) {
auto node_name = AnfAlgo::GetCNodeName(cnode);
MS_LOG(WARNING) << "Node:[" << node_name << "] don't support int64, reduce precision from int64 to int32.";
MS_LOG(WARNING) << "Operator:[" << node_name << "] don't support int64, reduce precision from int64 to int32.";
}
return true;
}
@ -488,8 +488,9 @@ KernelSelectStatus SelectCustomKernelInfo(const CNodePtr &kernel_node, KernelTyp
} else if (func_type == kCustomTypeAICPU) {
*kernel_type = KernelType::AICPU_KERNEL;
} else {
MS_LOG(EXCEPTION) << "Unsupported func type for Custom op on Ascend, it should be 'tbe', 'ir_builder', "
<< "'tvm_compute' or 'hybrid', but got [" << func_type << "] for Custom op [" << op_name << "]";
MS_LOG(EXCEPTION) << "Unsupported func type for Custom operator on Ascend, it should be 'tbe', 'ir_builder', "
<< "'tvm_compute' or 'hybrid', but got [" << func_type << "]"
<< " for Custom operator [" << op_name << "]";
}
static const std::map<KernelType, kernel::OpImplyType> kKernelImplyTypeMap{
{KernelType::TBE_KERNEL, kernel::OpImplyType::kTBE},

View File

@ -325,7 +325,7 @@ void TaskGenerator::DumpTaskInfo(const std::string &real_filename) {
}
already_printed = true;
MS_LOG(WARNING) << "The functionality of dumping task debug info is disabled, "
<< "please enable ENABLE_DUMP_IR with '-D on' and recomiple source.";
<< "please recompile the source codes with '-D on' option.";
}
void TaskGenerator::DumpTaskInfo(const string &real_filename,
const std::vector<TaskDebugInfoPtr> &task_debug_info_list) {
@ -335,7 +335,7 @@ void TaskGenerator::DumpTaskInfo(const string &real_filename,
}
already_printed = true;
MS_LOG(WARNING) << "The functionality of dumping task debug info is disabled, "
<< "please enable ENABLE_DUMP_IR with '-D on' and recomiple source.";
<< "please recompile the source codes with '-D on' option.";
}
#endif

View File

@ -431,13 +431,13 @@ void SetKernelInfo(const CNodePtr &kernel_node) {
kernel::CPUKernelRegistrar(op_name, KernelAttr(),
[]() { return std::make_shared<kernel::CustomAOTCpuKernel>(); });
} else {
MS_LOG(EXCEPTION) << "Unsupported func type for Custom op on CPU, it should be 'pyfunc' or 'aot', but got ["
<< tp << "] for Custom op [" << op_name << "]";
MS_LOG(EXCEPTION) << "Unsupported func type for Custom operator on CPU, it should be 'pyfunc' or 'aot', "
<< "but got [" << tp << "] for Custom operator [" << op_name << "]";
}
}
// If Custom op has not set reg info, then infer info from inputs
if (mindspore::kernel::OpLib::FindOp(op_name, kernel::OpImplyType::kCPU) == nullptr) {
MS_LOG(WARNING) << "Not find operator information for Custom op[" << op_name << "]. "
MS_LOG(WARNING) << "Not find operator information for Custom operator[" << op_name << "]. "
<< "Infer operator information from inputs. For more details, "
<< "please refer to 'mindspore.ops.Custom' at https://www.mindspore.cn.";
return UpdateCustomKernelBuildInfoAndAttrs(kernel_node);

View File

@ -960,7 +960,7 @@ void KernelRuntime::AssignStaticMemoryValueNode(const session::KernelGraph &grap
}
} else {
if (mem_manager_->MallocMem(kStaticMem, device_address->size_, device_address, graph.graph_id())) {
MS_LOG(EXCEPTION) << "MallocMem kStaticMem failed";
MS_LOG(EXCEPTION) << "MallocStaticMem failed";
}
}
}