forked from mindspore-Ecosystem/mindspore
!41958 optimize INFO level log while dynamic shape
Merge pull request !41958 from zhengzuohe/reduce_info_log
This commit is contained in:
commit
ab131258c9
|
@ -140,7 +140,7 @@ tensor::TensorPtr GetDependValueTensor(const AnfNodePtr &node, size_t i,
|
|||
void InferShape(const CNodePtr &cnode, std::map<uint32_t, tensor::TensorPtr> *depend_tensor_map, void *args) {
|
||||
MS_EXCEPTION_IF_NULL(cnode);
|
||||
MS_EXCEPTION_IF_NULL(depend_tensor_map);
|
||||
MS_LOG(INFO) << "InferShape start, node:" << cnode->fullname_with_scope();
|
||||
MS_LOG(DEBUG) << "InferShape start, node:" << cnode->fullname_with_scope();
|
||||
std::set<int64_t> depend_list = abstract::GetValueDependArgIndices(cnode);
|
||||
auto ret = InferShapeForDefiniteOutputNode(cnode);
|
||||
if (ret) {
|
||||
|
|
|
@ -222,8 +222,8 @@ bool AscendDeviceAddress::SyncHostToDevice(size_t size, const void *host_ptr) co
|
|||
|
||||
bool AscendDeviceAddress::SyncDeviceToHost(const ShapeVector &shape, size_t size, mindspore::TypeId type,
|
||||
void *host_ptr) const {
|
||||
MS_LOG(INFO) << "SyncDeviceToHost, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
||||
<< ", size:" << size_ << "), Host(type_id:" << TypeIdLabel(type) << ", size:" << size << ")";
|
||||
MS_LOG(DEBUG) << "SyncDeviceToHost, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
||||
<< ", size:" << size_ << "), Host(type_id:" << TypeIdLabel(type) << ", size:" << size << ")";
|
||||
if (type_id_ > kMonadTypeBegin && type_id_ < kMonadTypeEnd) {
|
||||
return true;
|
||||
}
|
||||
|
@ -338,8 +338,9 @@ bool AscendDeviceAddress::SyncDeviceToHostAndConvertFormatBasedOnTransData(const
|
|||
|
||||
bool AscendDeviceAddress::SyncDeviceToHostAndConvertFormat(const ShapeVector &shape, size_t size,
|
||||
mindspore::TypeId type, void *host_ptr) const {
|
||||
MS_LOG(INFO) << "SyncDeviceToHostAndConvertFormat, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
||||
<< ", size:" << size_ << "), Host(type_id:" << TypeIdLabel(type) << ", size:" << size << ")";
|
||||
MS_LOG(DEBUG) << "SyncDeviceToHostAndConvertFormat, Device(format:" << format_
|
||||
<< ", type_id:" << TypeIdLabel(type_id_) << ", size:" << size_
|
||||
<< "), Host(type_id:" << TypeIdLabel(type) << ", size:" << size << ")";
|
||||
static const std::unordered_map<mindspore::TypeId, std::string> type_id_name_map = {
|
||||
{mindspore::kNumberTypeBool, "bool"}, {mindspore::kNumberTypeInt8, "int8"},
|
||||
{mindspore::kNumberTypeInt16, "int16"}, {mindspore::kNumberTypeInt32, "int32"},
|
||||
|
@ -396,9 +397,9 @@ bool AscendDeviceAddress::SyncDeviceToHostAndConvertFormat(const ShapeVector &sh
|
|||
|
||||
bool AscendDeviceAddress::SyncHostToDevice(const ShapeVector &shape, size_t size, mindspore::TypeId type,
|
||||
const void *host_ptr, const std::string &format) const {
|
||||
MS_LOG(INFO) << "SyncHostToDevice, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
||||
<< ", size:" << size_ << "), Host(format:" << format << ", type_id:" << TypeIdLabel(type)
|
||||
<< ", size:" << size << ")";
|
||||
MS_LOG(DEBUG) << "SyncHostToDevice, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
||||
<< ", size:" << size_ << "), Host(format:" << format << ", type_id:" << TypeIdLabel(type)
|
||||
<< ", size:" << size << ")";
|
||||
if (type_id_ > kMonadTypeBegin && type_id_ < kMonadTypeEnd) {
|
||||
return true;
|
||||
}
|
||||
|
@ -508,9 +509,9 @@ bool AscendDeviceAddress::SyncDeviceToDevice(const DeviceSync *src_device_addr)
|
|||
|
||||
bool AscendDeviceAddress::AsyncDeviceToDevice(const ShapeVector & /* shape */, size_t size, TypeId type,
|
||||
const void *src_ptr, const std::string &format) const {
|
||||
MS_LOG(INFO) << "AsyncDeviceToDevice, dst(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
||||
<< ", size:" << size_ << "), src(format:" << format << ", type_id:" << TypeIdLabel(type)
|
||||
<< ", size:" << size << ")";
|
||||
MS_LOG(DEBUG) << "AsyncDeviceToDevice, dst(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
||||
<< ", size:" << size_ << "), src(format:" << format << ", type_id:" << TypeIdLabel(type)
|
||||
<< ", size:" << size << ")";
|
||||
if (type_id_ > kMonadTypeBegin && type_id_ < kMonadTypeEnd) {
|
||||
return true;
|
||||
}
|
||||
|
@ -562,8 +563,9 @@ bool AscendDeviceAddress::AsyncDeviceToHost(const ShapeVector & /* shape */, siz
|
|||
bool AscendDeviceAddress::ConvertFormatAndSyncHostToDevice(const ShapeVector &shape, size_t size,
|
||||
mindspore::TypeId type, const void *host_ptr) const {
|
||||
bool sync_ok = false;
|
||||
MS_LOG(INFO) << "ConvertFormatAndSyncHostToDevice, Device(format:" << format_ << ", type_id:" << TypeIdLabel(type_id_)
|
||||
<< ", size:" << size_ << "), Host(type_id:" << TypeIdLabel(type) << ", size:" << size << ")";
|
||||
MS_LOG(DEBUG) << "ConvertFormatAndSyncHostToDevice, Device(format:" << format_
|
||||
<< ", type_id:" << TypeIdLabel(type_id_) << ", size:" << size_
|
||||
<< "), Host(type_id:" << TypeIdLabel(type) << ", size:" << size << ")";
|
||||
ShapeVector host_shape = shape;
|
||||
if (host_shape.empty()) {
|
||||
(void)host_shape.emplace_back(1);
|
||||
|
|
|
@ -83,7 +83,6 @@ ge::Format GeTypesConvert::GetGeFormat(const std::string &format, size_t shape_s
|
|||
{kOpFormat_FRACTAL_ZN_LSTM, ge::Format::FORMAT_FRACTAL_ZN_LSTM},
|
||||
{kOpFormat_ND_RNN_BIAS, ge::Format::FORMAT_ND_RNN_BIAS},
|
||||
{kOpFormat_FRACTAL_ZN_RNN, ge::Format::FORMAT_FRACTAL_ZN_RNN}};
|
||||
MS_LOG(INFO) << "GetGeFormat format:" << format << " shape_size:" << shape_size;
|
||||
if (format == kOpFormat_DEFAULT) {
|
||||
return shape_size == k4dSize ? ge::Format::FORMAT_NCHW : ge::Format::FORMAT_ND;
|
||||
}
|
||||
|
|
|
@ -188,7 +188,6 @@ size_t SetOutputValue(const CNodePtr &cnode, const std::vector<std::vector<int64
|
|||
} // namespace
|
||||
|
||||
void DynamicBroadcastGradientArgsKernelMod::Execute() const {
|
||||
MS_LOG(INFO) << "Execute DynamicBroadcastGradientArgsKernel Start";
|
||||
auto node = anf_node_.lock();
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
|
@ -211,7 +210,6 @@ void DynamicBroadcastGradientArgsKernelMod::Execute() const {
|
|||
ShapeVector r1_shp{SizeToLong(r1_size)};
|
||||
auto output_type = TypeId::kNumberTypeInt64;
|
||||
common::AnfAlgo::SetOutputInferTypeAndShape({output_type, output_type}, {r0_shp, r1_shp}, cnode.get());
|
||||
MS_LOG(INFO) << "Execute DynamicBroadcastGradientArgsKernel End";
|
||||
}
|
||||
|
||||
bool DynamicBroadcastGradientArgsKernelMod::Launch(const std::vector<AddressPtr> &, const std::vector<AddressPtr> &,
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
namespace mindspore {
|
||||
namespace kernel {
|
||||
void TensorShapeKernelMod::Execute(void *stream_ptr) const {
|
||||
MS_LOG(INFO) << "Execute TensorShapeKernel Start";
|
||||
auto node = anf_node_.lock();
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
|
@ -44,7 +43,7 @@ void TensorShapeKernelMod::Execute(void *stream_ptr) const {
|
|||
MS_EXCEPTION_IF_NULL(output_tensor_for_sync);
|
||||
auto data_ptr = static_cast<int64_t *>(output_tensor_for_sync->data_c());
|
||||
for (size_t i = 0; i < prev_output_shape.size(); ++i) {
|
||||
MS_LOG(INFO) << "DEBUG prev_output_shape[" << i << "]:" << prev_output_shape[i];
|
||||
MS_LOG(DEBUG) << "DEBUG prev_output_shape[" << i << "]:" << prev_output_shape[i];
|
||||
*(data_ptr + i) = prev_output_shape[i];
|
||||
}
|
||||
|
||||
|
@ -67,8 +66,6 @@ void TensorShapeKernelMod::Execute(void *stream_ptr) const {
|
|||
MS_LOG(EXCEPTION) << "Execute TensorShapeKernel rtMemcpyAsync failed!";
|
||||
}
|
||||
}
|
||||
|
||||
MS_LOG(INFO) << "Execute TensorShapeKernel End";
|
||||
}
|
||||
|
||||
bool TensorShapeKernelMod::Launch(const std::vector<AddressPtr> &, const std::vector<AddressPtr> &,
|
||||
|
|
|
@ -127,7 +127,6 @@ std::vector<int64_t> GetOutputShapes(const CNodePtr &cnode) {
|
|||
} // namespace
|
||||
|
||||
void ReshapeKernelMod::Execute() const {
|
||||
MS_LOG(INFO) << "Execute host ReshapeKernel Start";
|
||||
auto node = anf_node_.lock();
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
|
@ -155,12 +154,10 @@ void ReshapeKernelMod::Execute() const {
|
|||
MS_LOG(EXCEPTION) << "Host Reshape sync device to device failed.";
|
||||
}
|
||||
}
|
||||
MS_LOG(INFO) << "Execute host ReshapeKernel End";
|
||||
}
|
||||
|
||||
void ReshapeKernelMod::Execute(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &outputs,
|
||||
void *stream_ptr) const {
|
||||
MS_LOG(INFO) << "Execute host ReshapeKernel Start";
|
||||
auto node = anf_node_.lock();
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
|
@ -184,7 +181,6 @@ void ReshapeKernelMod::Execute(const std::vector<AddressPtr> &inputs, const std:
|
|||
if (status != RT_ERROR_NONE) {
|
||||
MS_LOG(ERROR) << "Call rtMemcpyAsync failed, ret = 0x" << status;
|
||||
}
|
||||
MS_LOG(INFO) << "Execute host ReshapeKernel End";
|
||||
}
|
||||
|
||||
bool ReshapeKernelMod::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
|
||||
|
|
|
@ -114,7 +114,7 @@ int DynamicTbeKernelMod::Resize(const BaseOperatorPtr &base_operator, const std:
|
|||
const std::map<uint32_t, tensor::TensorPtr> &depend_tensor_map = inputsOnHost;
|
||||
::ge::ComputeGraphPtr ge_graph = std::make_shared<::ge::ComputeGraph>("default");
|
||||
optiling::utils::OpRunInfo op_run_info_v2(-1, true, 0);
|
||||
MS_LOG(INFO) << "Start compute tiling of: " << cnode->fullname_with_scope();
|
||||
MS_LOG(DEBUG) << "Start compute tiling of: " << cnode->fullname_with_scope();
|
||||
if (!atomic_clean_nodes_.empty()) {
|
||||
atomic_compile_info_ = ParseCompileJson(atomic_clean_nodes_[0].lock());
|
||||
}
|
||||
|
|
|
@ -396,7 +396,6 @@ void TbeKernelCompileManager::SaveSucceedTaskCompileResult(int task_id, const st
|
|||
TbeUtils::UpdateCache(json_name);
|
||||
if (task_info.is_dynamic) {
|
||||
bool save_flag = true;
|
||||
MS_LOG(INFO) << "Save compile info to json file for op: " << json_name;
|
||||
TbeUtils::SaveCompileInfo(json_name, compile_info, &save_flag);
|
||||
if (!save_flag) {
|
||||
MS_LOG(EXCEPTION) << "Save json file failed, op: " << json_name << ", compile_info:" << compile_info;
|
||||
|
|
|
@ -391,7 +391,7 @@ bool KernelMeta::ReadIndex(const std::string &bin_dir) {
|
|||
|
||||
void TbeUtils::GetCompileInfo(const AnfNodePtr &node, std::string *compile_info, bool *get_flag) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
MS_LOG(INFO) << "Get compile info from json file start. [" << node->fullname_with_scope() << "]";
|
||||
MS_LOG(DEBUG) << "Get compile info from json file start. [" << node->fullname_with_scope() << "]";
|
||||
std::string json_name;
|
||||
if (common::AnfAlgo::HasNodeAttr(kAttrJsonFileName, node->cast<CNodePtr>())) {
|
||||
json_name = common::AnfAlgo::GetNodeAttr<std::string>(node, kAttrJsonFileName);
|
||||
|
@ -430,11 +430,11 @@ void TbeUtils::GetCompileInfo(const AnfNodePtr &node, std::string *compile_info,
|
|||
*compile_info = build_res_json.dump();
|
||||
file.close();
|
||||
file.clear();
|
||||
MS_LOG(INFO) << "Get compile info from json file success.";
|
||||
MS_LOG(DEBUG) << "Get compile info from json file success.";
|
||||
}
|
||||
|
||||
void TbeUtils::SaveCompileInfo(const std::string &json_name, const std::string &build_res, bool *save_flag) {
|
||||
MS_LOG(INFO) << "Save compile info to json file start, op: [" << json_name << "].";
|
||||
MS_LOG(DEBUG) << "Save compile info to json file start, op: [" << json_name << "].";
|
||||
auto config_path = TbeUtils::GetOpDebugPath();
|
||||
std::string path = config_path + kCceKernelMeta + json_name + kJsonSuffix;
|
||||
if (path.size() > PATH_MAX) {
|
||||
|
@ -466,7 +466,7 @@ void TbeUtils::SaveCompileInfo(const std::string &json_name, const std::string &
|
|||
file_write << info << std::endl;
|
||||
file_write.close();
|
||||
file_write.clear();
|
||||
MS_LOG(INFO) << "Save compile info to json file success";
|
||||
MS_LOG(DEBUG) << "Save compile info to json file success.";
|
||||
}
|
||||
|
||||
bool TbeUtils::CheckOfflineTune() {
|
||||
|
|
|
@ -195,7 +195,6 @@ void OpTilingCalculateAdapter::ConvertAttrs(const CNodePtr &node, ::ge::OpDescPt
|
|||
auto attr_name = attr->name();
|
||||
auto value = primitive->GetAttr(attr_name);
|
||||
if (value == nullptr) {
|
||||
MS_LOG(INFO) << attr_name << "'s value is empty!";
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -226,7 +225,7 @@ void OpTilingCalculateAdapter::ConvertCompileInfo(const CNodePtr &node, ::ge::Op
|
|||
MS_EXCEPTION_IF_NULL(node);
|
||||
MS_EXCEPTION_IF_NULL(op_desc);
|
||||
MS_EXCEPTION_IF_NULL(*op_desc);
|
||||
MS_LOG(INFO) << "For op " << op_name_ << ", get compile_info: " << op_compile_info_;
|
||||
MS_LOG(DEBUG) << "For op " << op_name_ << ", get compile_info: " << op_compile_info_;
|
||||
std::string compile_info_key = std::to_string(std::hash<std::string>()(op_compile_info_));
|
||||
(void)::ge::AttrUtils::SetStr(*(*op_desc), COMPILE_INFO_KEY, compile_info_key);
|
||||
(void)::ge::AttrUtils::SetStr(*(*op_desc), COMPILE_INFO_JSON, op_compile_info_);
|
||||
|
@ -323,7 +322,7 @@ std::vector<std::tuple<std::size_t, ::ge::NodePtr>> OpTilingCalculateAdapter::Co
|
|||
MS_EXCEPTION_IF_NULL(*op_desc);
|
||||
auto depends_list_me = abstract::GetValueDependArgIndices(node);
|
||||
if (depends_list_me.empty() || AnfAlgo::IsDynamicShapeSkipExecute(node)) {
|
||||
MS_LOG(INFO) << "The node " << op_name_ << " has no infer depend.";
|
||||
MS_LOG(DEBUG) << "The node " << op_name_ << " has no infer depend.";
|
||||
return {};
|
||||
}
|
||||
auto has_input_name_attr = common::AnfAlgo::HasNodeAttr("input_names", node);
|
||||
|
@ -358,7 +357,7 @@ std::vector<std::tuple<std::size_t, ::ge::NodePtr>> OpTilingCalculateAdapter::Co
|
|||
void OpTilingCalculateAdapter::AddEdge(const ::ge::NodePtr &ge_node,
|
||||
const std::vector<std::tuple<std::size_t, ::ge::NodePtr>> &constant_ops) {
|
||||
MS_EXCEPTION_IF_NULL(ge_node);
|
||||
MS_LOG(INFO) << "Add edge for op " << op_name_;
|
||||
MS_LOG(DEBUG) << "Add edge for op " << op_name_;
|
||||
for (const auto &item : constant_ops) {
|
||||
auto index = std::get<0>(item);
|
||||
auto constant_op = std::get<1>(item);
|
||||
|
@ -368,7 +367,7 @@ void OpTilingCalculateAdapter::AddEdge(const ::ge::NodePtr &ge_node,
|
|||
|
||||
void OpTilingCalculateAdapter::InitOpIoName(const CNodePtr &node) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
MS_LOG(INFO) << "Get the every input name of " << op_name_;
|
||||
MS_LOG(DEBUG) << "Get the every input name of " << op_name_;
|
||||
auto op_info_ptr = mindspore::kernel::tbe::TbeDynamicShapeUtil::FindOp(op_name_, node);
|
||||
MS_EXCEPTION_IF_NULL(op_info_ptr);
|
||||
auto primitive = common::AnfAlgo::GetCNodePrimitive(node);
|
||||
|
@ -418,7 +417,7 @@ void OpTilingCalculateAdapter::InitOpIoName(const CNodePtr &node) {
|
|||
const std::string &op_compile_info) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
op_name_ = common::AnfAlgo::GetCNodeName(node);
|
||||
MS_LOG(INFO) << "Convert anf node :" << op_name_ << " to ge node.";
|
||||
MS_LOG(DEBUG) << "Convert anf node :" << op_name_ << " to ge node.";
|
||||
op_compile_info_ = op_compile_info;
|
||||
auto op_type = GetRealOpType(op_name_);
|
||||
(void)InitOpIoName(node);
|
||||
|
|
|
@ -65,7 +65,6 @@ std::vector<int64_t> CalBroadCastShape(std::vector<int64_t> x_shape, std::vector
|
|||
return broadcast_shape;
|
||||
}
|
||||
abstract::ShapePtr BroadCastInferShape(const std::string &op_name, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_LOG(INFO) << "For '" << op_name << "', it's now doing infer shape.";
|
||||
const int64_t input_num = 2;
|
||||
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, op_name);
|
||||
auto x_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->GetShapeTrack());
|
||||
|
|
Loading…
Reference in New Issue