!2505 [Code Reivew] fix code review content

Merge pull request !2505 from jjfeing/master
This commit is contained in:
mindspore-ci-bot 2020-06-23 21:08:35 +08:00 committed by Gitee
commit ce9c68d8da
9 changed files with 40 additions and 38 deletions

View File

@ -50,6 +50,8 @@ bool CheckHash(const std::string &json_file, const std::string &bin_file, const
} // namespace
const std::string KernelPack::Serialize() const {
MS_EXCEPTION_IF_NULL(json_);
MS_EXCEPTION_IF_NULL(kernel_);
std::string buffer;
(void)buffer.append((const char *)json_, json_->len + sizeof(json_->len));
(void)buffer.append((const char *)kernel_, kernel_->len + sizeof(kernel_->len));

View File

@ -293,8 +293,10 @@ bool OpLib::GetRefInfo(const std::shared_ptr<OpInfo> &op_info) {
const auto &output_infos = op_info->outputs_ptr();
const auto &input_infos = op_info->inputs_ptr();
for (size_t out_index = 0; out_index < output_infos.size(); out_index++) {
MS_EXCEPTION_IF_NULL(output_infos[out_index]);
const auto &out_name = output_infos[out_index]->name();
for (size_t in_index = 0; in_index < input_infos.size(); in_index++) {
MS_EXCEPTION_IF_NULL(input_infos[in_index]);
const auto &in_name = input_infos[in_index]->name();
if (out_name == in_name) {
if (op_info->has_ref_index(out_index)) {

View File

@ -189,7 +189,7 @@ bool TbeKernelJsonCreator::GenInputList(const std::shared_ptr<AnfNode> &anf_node
input_list->emplace_back(input_desc_json);
continue;
}
MS_LOG(ERROR) << "input num: " << *real_input_index << " is not match op inputs";
MS_LOG(ERROR) << "Input num: " << *real_input_index << " is not match op inputs";
return false;
}
if (op_name == "BatchNorm") {
@ -197,7 +197,7 @@ bool TbeKernelJsonCreator::GenInputList(const std::shared_ptr<AnfNode> &anf_node
auto attr = primitive->GetAttr("is_training");
MS_EXCEPTION_IF_NULL(attr);
bool is_training = GetValue<bool>(attr);
MS_LOG(INFO) << "op_name" << op_name << ", tensor_name " << input_ptr->name() << ", is_training "
MS_LOG(INFO) << "Op_name" << op_name << ", tensor_name " << input_ptr->name() << ", is_training "
<< is_training;
if (is_training) {
(*real_input_index)++;
@ -230,7 +230,7 @@ bool GetInputNameAndRealNum(const std::shared_ptr<AnfNode> &anf_node, const std:
if (input_ptr->param_type() == kParamDynamic) {
if (*dyn_input_index >= dyn_input_sizes.size()) {
MS_LOG(ERROR) << "dyn input index" << *dyn_input_index << "is over dyn input num" << dyn_input_sizes.size();
MS_LOG(ERROR) << "Dyn input index" << *dyn_input_index << "is over dyn input num" << dyn_input_sizes.size();
return false;
}
*input_num = IntToSize(dyn_input_sizes[*dyn_input_index]);
@ -314,7 +314,7 @@ bool TbeKernelJsonCreator::GenOutputDescJson(
output_obj_num = real_output_num;
} else {
if (output_idx >= real_output_num) {
MS_LOG(INFO) << "op:" << op_name << ", output" << output_ptr->name() << " is optional, output is none.";
MS_LOG(INFO) << "Op:" << op_name << ", output" << output_ptr->name() << " is optional, output is none.";
std::vector<nlohmann::json> output_list;
nlohmann::json output_obj;
output_obj[kJName] = output_ptr->name();
@ -389,7 +389,7 @@ bool TbeKernelJsonCreator::GenTbeAttrJson(const std::shared_ptr<AnfNode> &anf_no
attr_obj[kJValid] = false;
} else {
if (attr_ptr->param_type() == kParamRequred && creater_type_ == SINGLE_BUILD) {
MS_LOG(EXCEPTION) << "op name: " << op_info->op_name() << " attr: " << attr_name
MS_LOG(EXCEPTION) << "Op name: " << op_info->op_name() << " attr: " << attr_name
<< " is required, but not set.";
} else {
attr_obj[kJValid] = false;
@ -451,7 +451,7 @@ void TbeKernelJsonCreator::ParseAttrValue(const std::string &type, const mindspo
auto attr_value = GetValue<std::vector<std::vector<int>>>(value);
(*attr_obj)[kJValue] = attr_value;
} else {
MS_LOG(EXCEPTION) << "type: " << type << "not support";
MS_LOG(EXCEPTION) << "Type: " << type << "not support";
}
}
@ -536,7 +536,7 @@ std::string TbeKernelJsonCreator::GetDeviceOutputFormat(const AnfNodePtr &anf_no
bool TbeKernelBuild::GetIOSize(const nlohmann::json &kernel_json, std::vector<size_t> *input_size_list,
std::vector<size_t> *output_size_list) {
if (input_size_list == nullptr || output_size_list == nullptr) {
MS_LOG(ERROR) << "input size or output size is nullptr";
MS_LOG(ERROR) << "Input size or output size is nullptr";
return false;
}
input_size_list->clear();
@ -750,7 +750,7 @@ bool TbeKernelBuild::GenFusionDataInputJson(const std::shared_ptr<mindspore::Anf
MS_EXCEPTION_IF_NULL(index);
std::vector<nlohmann::json> output_desc_list;
if (!data_input) {
MS_LOG(INFO) << "data input is optional node";
MS_LOG(INFO) << "Data input is optional node";
auto name = std::string(kOptional) + std::to_string(*index);
(*data_str)[kJName] = name;
nlohmann::json output_desc;
@ -766,7 +766,7 @@ bool TbeKernelBuild::GenFusionDataInputJson(const std::shared_ptr<mindspore::Anf
auto kernel_idx = AnfAlgo::VisitKernel(data_input, 0);
auto real_node = kernel_idx.first;
size_t real_idx = kernel_idx.second;
MS_LOG(INFO) << "real name " << real_node->fullname_with_scope() << " index:" << real_idx;
MS_LOG(INFO) << "Real name " << real_node->fullname_with_scope() << " index:" << real_idx;
// kJOutputDesc
nlohmann::json output_desc;
GenDescJson(real_node, real_idx, real_idx, &output_desc, fusion_data_type);
@ -842,18 +842,18 @@ bool TbeKernelBuild::GenFusionComputeInputJson(const mindspore::CNodePtr &cnode,
auto kernel_idx = AnfAlgo::VisitKernel(input, 0);
auto real_node = kernel_idx.first;
size_t real_idx = kernel_idx.second;
MS_LOG(INFO) << "real name" << real_node->fullname_with_scope() << "index:" << real_idx;
MS_LOG(INFO) << "Real name" << real_node->fullname_with_scope() << "index:" << real_idx;
nlohmann::json input_desc;
GenDescJson(real_node, real_idx, real_idx, &input_desc);
if (is_dynamic_input) {
MS_LOG(INFO) << "node has dynamic input.";
MS_LOG(INFO) << "Node has dynamic input.";
input_desc[kJDynIndex] = (i - 1);
}
input_desc_list_tmp.emplace_back(input_desc);
}
size_t optional_num = GetOptionalInput(cnode, is_dynamic_input);
if (optional_num > 0) {
MS_LOG(INFO) << "node has optional input.";
MS_LOG(INFO) << "Node has optional input.";
for (size_t i = 0; i < optional_num; ++i) {
nlohmann::json optional_input_desc;
optional_input_desc[kJName] = std::string(kOptional) + std::to_string(*index);
@ -871,7 +871,7 @@ std::vector<size_t> TbeKernelBuild::GetDescOutputIndex(const std::vector<int> &o
std::vector<size_t> desc_output_index = {};
for (size_t idx = 0; idx < output_used_nums.size(); ++idx) {
auto output_use_num_item = output_used_nums[idx];
MS_LOG(INFO) << "output used num[" << idx << "] = " << output_use_num_item;
MS_LOG(INFO) << "Output used num[" << idx << "] = " << output_use_num_item;
desc_output_index.emplace_back(idx);
if (output_use_num_item > 1) {
desc_output_index.emplace_back(idx);
@ -990,7 +990,7 @@ bool TbeKernelBuild::GetIOSize(const nlohmann::json &fusion_op_list,
auto op_output_desces = op[kJOutputDesc];
if (output_node != real_node) {
// tuple_get item
MS_LOG(INFO) << "output is a tuple getitem node";
MS_LOG(INFO) << "Output is a tuple getitem node";
auto output_desc = op_output_desces[real_idx];
if (output_desc[kJShape].empty()) {
MS_LOG(INFO) << "Fusion error: output_desc's shape is empty. real_index " << real_idx;

View File

@ -77,7 +77,7 @@ bool TbeOpParallelPreBuild(const std::vector<AnfNodePtr> &anf_nodes) {
return true;
}
bool TbeOpParallelBuild(std::vector<AnfNodePtr> anf_nodes) {
bool TbeOpParallelBuild(const std::vector<AnfNodePtr> &anf_nodes) {
auto build_manger = std::make_shared<ParallelBuildManager>();
MS_EXCEPTION_IF_NULL(build_manger);
set<std::string> processed_kernel;

View File

@ -27,7 +27,7 @@
namespace mindspore {
namespace kernel {
bool TbeOpParallelPreBuild(const std::vector<AnfNodePtr> &anf_nodes);
bool TbeOpParallelBuild(std::vector<AnfNodePtr> anf_nodes);
bool TbeOpParallelBuild(const std::vector<AnfNodePtr> &anf_nodes);
struct KernelBuildTaskInfo {
AnfNode *node;

View File

@ -20,7 +20,6 @@
namespace mindspore {
namespace kernel {
constexpr char kDynInputKey[] = "dyn_input_sizes";
constexpr size_t kInputIndex_0 = 0;
constexpr size_t kChannelN = 0;
constexpr size_t kChannelC = 1;
@ -34,9 +33,9 @@ bool TbeKernelBroadCastSelecter::GetShapeInfo(SupportFormat *support_format) {
output_num_ = 0;
input_shapes_.clear();
output_shapes_.clear();
if (AnfAlgo::HasNodeAttr(kDynInputKey, cnode_ptr_)) {
if (AnfAlgo::HasNodeAttr(kAttrDynInputSizes, cnode_ptr_)) {
MS_LOG(INFO) << "This broadcast node has dynamic input.";
auto dynamic_size_vec = AnfAlgo::GetNodeAttr<std::vector<int>>(cnode_ptr_, kDynInputKey);
auto dynamic_size_vec = AnfAlgo::GetNodeAttr<std::vector<int>>(cnode_ptr_, kAttrDynInputSizes);
if (dynamic_size_vec.empty() || dynamic_size_vec[0] < 2) {
MS_LOG(EXCEPTION) << "dynamic attr set error, please check.";
}

View File

@ -23,7 +23,6 @@
namespace mindspore {
namespace kernel {
constexpr char kKeepDims[] = "keep_dims";
constexpr char kAxis[] = "axis";
constexpr char kTypeInt32[] = "Int32";
constexpr size_t kInputIndex_0 = 0;
@ -148,12 +147,12 @@ void TbeKernelReduceSelecter::GetReduceAttrAxis() {
}
void TbeKernelReduceSelecter::GetReduceAttrKeepDim() {
if (!AnfAlgo::HasNodeAttr(kKeepDims, cnode_ptr_)) {
if (!AnfAlgo::HasNodeAttr(kAttrKeepDims, cnode_ptr_)) {
MS_LOG(INFO) << "This node does't have keep_attr.";
keep_dims_ = false;
return;
}
keep_dims_ = AnfAlgo::GetNodeAttr<bool>(cnode_ptr_, kKeepDims);
keep_dims_ = AnfAlgo::GetNodeAttr<bool>(cnode_ptr_, kAttrKeepDims);
}
void TbeKernelReduceSelecter::AssignSupportFormat(const std::string &support_format_str,

View File

@ -39,7 +39,6 @@ constexpr auto kDtype = "dtype";
constexpr auto kFormat = "format";
constexpr auto kPrefixInput = "input";
constexpr auto kPrefixOutput = "output";
constexpr char kDynInputKey[] = "dyn_input_sizes";
constexpr char kParamTypeDynamic[] = "dynamic";
constexpr char kParamTypeRequre[] = "required";
constexpr char kParamTypeOptional[] = "optional";
@ -87,8 +86,8 @@ void TbeKernelSelect::GetCommonPatternKernelInfo(const OpInfo &op_info) {
auto primitive = AnfAlgo::GetCNodePrimitive(cnode_ptr_);
MS_EXCEPTION_IF_NULL(primitive);
std::vector<int> dyn_input_sizes;
if (primitive->HasAttr(kDynInputKey)) {
dyn_input_sizes = GetValue<std::vector<int>>(primitive->GetAttr(kDynInputKey));
if (primitive->HasAttr(kAttrDynInputSizes)) {
dyn_input_sizes = GetValue<std::vector<int>>(primitive->GetAttr(kAttrDynInputSizes));
}
// get real input/output num
size_t real_input_tensor_num = AnfAlgo::GetInputTensorNum(cnode_ptr_);

View File

@ -59,14 +59,14 @@ void TbeUtils::SaveJsonInfo(const std::string &json_name, const std::string &inf
MS_LOG(INFO) << "json file exist, no need to create.";
return;
}
std::ofstream filewrite;
filewrite.open(path);
if (!filewrite.is_open()) {
std::ofstream file_write;
file_write.open(path);
if (!file_write.is_open()) {
return;
}
filewrite << info << std::endl;
filewrite.close();
if (nullptr == realpath(path.c_str(), real_path)) {
file_write << info << std::endl;
file_write.close();
if (realpath(path.c_str(), real_path) == nullptr) {
MS_LOG(INFO) << "dir: " << path << "does not exit.";
return;
}
@ -144,12 +144,12 @@ uintptr_t KernelManager::GenFuncStub(const mindspore::kernel::KernelPack &kernel
auto kernel_json_info = kernel_pack.kernel_json_info();
*block_dim = kernel_json_info.block_dim;
string funcname = kernel_json_info.kernel_name;
string func_name = kernel_json_info.kernel_name;
string magic = kernel_json_info.magic;
if (!force_reload) {
// use the cached object.
auto iter = info_table_.find(funcname);
auto iter = info_table_.find(func_name);
if (iter != info_table_.end()) {
auto kernelmeta = iter->second;
*block_dim = kernelmeta->block_dim_;
@ -157,23 +157,24 @@ uintptr_t KernelManager::GenFuncStub(const mindspore::kernel::KernelPack &kernel
}
}
void *module = nullptr;
if (0 != BinaryRegister((*kernel_pack.GetKernel()), &module, magic)) {
if (BinaryRegister((*kernel_pack.GetKernel()), &module, magic) != 0) {
MS_LOG(INFO) << "Call runtime BinaryRegister error.";
return 0;
}
// to diff different funcs.
uintptr_t funcstub = ++kernel_stub_gen_;
uintptr_t func_stub = ++kernel_stub_gen_;
if (RT_ERROR_NONE !=
rtFunctionRegister(module, reinterpret_cast<void *>(funcstub), funcname.c_str(), funcname.c_str(), 0)) {
rtFunctionRegister(module, reinterpret_cast<void *>(func_stub), func_name.c_str(), func_name.c_str(), 0)) {
MS_LOG(INFO) << "Call runtime rtFunctionRegister error.";
return 0;
}
// cache the registered kernelmeta.
info_table_[funcname] = std::make_shared<KernelMetaInfo>(KernelMetaInfo{funcstub, *block_dim});
return funcstub;
info_table_[func_name] = std::make_shared<KernelMetaInfo>(KernelMetaInfo{func_stub, *block_dim});
return func_stub;
}
std::string KernelManager::GetStubFuncName(const KernelPackPtr &kernel_pack) {
MS_EXCEPTION_IF_NULL(kernel_pack);
auto kernel_json_info = kernel_pack->kernel_json_info();
return kernel_json_info.kernel_name;
}