!31559 Code check clean.

Merge pull request !31559 from linqingke/clean_code_318
This commit is contained in:
i-robot 2022-03-19 07:59:58 +00:00 committed by Gitee
commit 95d2d441b1
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
10 changed files with 17 additions and 18 deletions

View File

@ -928,14 +928,14 @@ bool AnfRuntimeAlgorithm::IsIndependentNode(const CNodePtr &node) {
static inline void GetMaxOrDefaultShape(const std::vector<int64_t> &max_shape, std::vector<size_t> *device_shape) { static inline void GetMaxOrDefaultShape(const std::vector<int64_t> &max_shape, std::vector<size_t> *device_shape) {
if (!max_shape.empty()) { if (!max_shape.empty()) {
std::transform(max_shape.begin(), max_shape.end(), device_shape->begin(), IntToSize); (void)std::transform(max_shape.begin(), max_shape.end(), device_shape->begin(), IntToSize);
} else { } else {
constexpr size_t kDefaultValueForDynamicDim = 16; constexpr size_t kDefaultValueForDynamicDim = 16;
auto tmp_shape = *device_shape; auto tmp_shape = *device_shape;
auto ConvertNegOneToDefalut = [&kDefaultValueForDynamicDim](size_t size) { auto ConvertNegOneToDefalut = [&kDefaultValueForDynamicDim](size_t size) {
return static_cast<int64_t>(size) < 0 ? kDefaultValueForDynamicDim : size; return static_cast<int64_t>(size) < 0 ? kDefaultValueForDynamicDim : size;
}; };
std::transform(tmp_shape.begin(), tmp_shape.end(), device_shape->begin(), ConvertNegOneToDefalut); (void)std::transform(tmp_shape.begin(), tmp_shape.end(), device_shape->begin(), ConvertNegOneToDefalut);
} }
} }

View File

@ -35,7 +35,7 @@ class AscendKernelMod : public KernelMod {
explicit AscendKernelMod(const AnfNodePtr &anf_node_ptr) : KernelMod(anf_node_ptr) {} explicit AscendKernelMod(const AnfNodePtr &anf_node_ptr) : KernelMod(anf_node_ptr) {}
virtual std::vector<TaskInfoPtr> GenTask(const std::vector<AddressPtr> &, const std::vector<AddressPtr> &, virtual std::vector<TaskInfoPtr> GenTask(const std::vector<AddressPtr> &, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &, uint32_t) = 0; const std::vector<AddressPtr> &, uint32_t) = 0;
uint32_t block_dim() { return block_dim_; } uint32_t block_dim() const { return block_dim_; }
uint32_t stream_id() const { return stream_id_; } uint32_t stream_id() const { return stream_id_; }
virtual bool NeedDump() { virtual bool NeedDump() {
#ifndef ENABLE_SECURITY #ifndef ENABLE_SECURITY

View File

@ -120,7 +120,7 @@ bool AicpuExtInfoHandler::ParseExtInputShape(AicpuExtInfo *aicpu_ext_info) {
auto input = reinterpret_cast<AicpuShapeAndType *>(aicpu_ext_info->infoMsg); auto input = reinterpret_cast<AicpuShapeAndType *>(aicpu_ext_info->infoMsg);
for (uint32_t index = 0; index < input_num_; ++index) { for (uint32_t index = 0; index < input_num_; ++index) {
input_shape_and_type_.emplace_back(&input[index]); (void)input_shape_and_type_.emplace_back(&input[index]);
} }
MS_LOG(INFO) << "Node:" << node_name_ << " parse ext input shape success infoLen=" << aicpu_ext_info->infoLen; MS_LOG(INFO) << "Node:" << node_name_ << " parse ext input shape success infoLen=" << aicpu_ext_info->infoLen;
return true; return true;
@ -138,7 +138,7 @@ bool AicpuExtInfoHandler::ParseExtOutputShape(AicpuExtInfo *aicpu_ext_info) {
auto output = reinterpret_cast<AicpuShapeAndType *>(aicpu_ext_info->infoMsg); auto output = reinterpret_cast<AicpuShapeAndType *>(aicpu_ext_info->infoMsg);
for (uint32_t index = 0; index < output_num_; ++index) { for (uint32_t index = 0; index < output_num_; ++index) {
output_shape_and_type_.emplace_back(&output[index]); (void)output_shape_and_type_.emplace_back(&output[index]);
} }
MS_LOG(INFO) << "Node:" << node_name_ << " parse ext output shape success infoLen=" << aicpu_ext_info->infoLen; MS_LOG(INFO) << "Node:" << node_name_ << " parse ext output shape success infoLen=" << aicpu_ext_info->infoLen;
return true; return true;
@ -237,7 +237,7 @@ void AicpuExtInfoHandler::GetShapeAndType(const NotNull<const AicpuShapeAndType
if (tmpDim == kDimEndFlag) { if (tmpDim == kDimEndFlag) {
break; break;
} }
shape->emplace_back(tmpDim); (void)shape->emplace_back(tmpDim);
MS_LOG(DEBUG) << "Debug tmpDim:" << tmpDim; MS_LOG(DEBUG) << "Debug tmpDim:" << tmpDim;
} }

View File

@ -323,7 +323,7 @@ class MemcpyAsyncTaskInfo : public TaskInfo {
uint64_t dst_max_; uint64_t dst_max_;
void *src_; void *src_;
uint64_t count_; uint64_t count_;
int32_t kind_; uint32_t kind_;
}; };
class StreamSwitchTaskInfo : public TaskInfo { class StreamSwitchTaskInfo : public TaskInfo {

View File

@ -115,12 +115,12 @@ void AllToAllvCalcParam::CalcMemOffset(const std::vector<size_t> &mem_sizes, con
if (rank_ids[i] < 0 || static_cast<size_t>(rank_ids[i]) >= rank_size_) { if (rank_ids[i] < 0 || static_cast<size_t>(rank_ids[i]) >= rank_size_) {
MS_LOG(EXCEPTION) << "Invalid rank id " << rank_ids[i] << " at index " << i << " as rank size " << rank_size_; MS_LOG(EXCEPTION) << "Invalid rank id " << rank_ids[i] << " at index " << i << " as rank size " << rank_size_;
} }
rank_id_map.emplace(rank_ids[i], i); (void)rank_id_map.emplace(rank_ids[i], i);
} }
size_t offset = 0; size_t offset = 0;
for (uint32_t i = 0; i < rank_size_; ++i) { for (uint32_t i = 0; i < rank_size_; ++i) {
(*displs)[i] = offset; (*displs)[i] = SizeToLong(offset);
auto iter = rank_id_map.find(i); auto iter = rank_id_map.find(i);
if (iter != rank_id_map.end()) { if (iter != rank_id_map.end()) {
(*counts)[i] = real_sizes[iter->second]; (*counts)[i] = real_sizes[iter->second];

View File

@ -55,7 +55,7 @@ std::vector<TaskInfoPtr> LabelSetKernel::GenTask(const std::vector<AddressPtr> &
std::vector<TaskInfoPtr> task_info_list; std::vector<TaskInfoPtr> task_info_list;
std::shared_ptr<LabelSetTaskInfo> task_info_ptr = std::make_shared<LabelSetTaskInfo>(unique_name_, stream_id, label_); std::shared_ptr<LabelSetTaskInfo> task_info_ptr = std::make_shared<LabelSetTaskInfo>(unique_name_, stream_id, label_);
MS_EXCEPTION_IF_NULL(task_info_ptr); MS_EXCEPTION_IF_NULL(task_info_ptr);
task_info_list.emplace_back(task_info_ptr); (void)task_info_list.emplace_back(task_info_ptr);
return task_info_list; return task_info_list;
} }
} // namespace kernel } // namespace kernel

View File

@ -301,8 +301,7 @@ std::vector<size_t> FusionBuildTbeJsonCreator::GetDescOutputIndex(const std::vec
return desc_output_index; return desc_output_index;
} }
bool FusionBuildTbeJsonCreator::AttrsJsonPostProcessing(const AnfNodePtr &anf_node, const OpInfoPtr &op_info_ptr, bool FusionBuildTbeJsonCreator::AttrsJsonPostProcessing(const AnfNodePtr &, const OpInfoPtr &, nlohmann::json *) {
nlohmann::json *attrs_json) {
return true; return true;
} }

View File

@ -87,7 +87,7 @@ bool HasFraczGroupAttrAndSet(const AnfNodePtr &node, size_t index, int64_t group
if (common::AnfAlgo::HasNodeAttr(kAttrFracZGroupIdx, cnode)) { if (common::AnfAlgo::HasNodeAttr(kAttrFracZGroupIdx, cnode)) {
fz_group_idx = common::AnfAlgo::GetNodeAttr<std::vector<int64_t>>(cnode, kAttrFracZGroupIdx); fz_group_idx = common::AnfAlgo::GetNodeAttr<std::vector<int64_t>>(cnode, kAttrFracZGroupIdx);
if (input_num > fz_group_idx.size()) { if (input_num > fz_group_idx.size()) {
fz_group_idx.insert(fz_group_idx.begin(), input_num - fz_group_idx.size(), 1); (void)fz_group_idx.insert(fz_group_idx.begin(), input_num - fz_group_idx.size(), 1);
} }
if (fz_group_idx[index] == 1) { if (fz_group_idx[index] == 1) {
fz_group_idx[index] = groups; fz_group_idx[index] = groups;

View File

@ -133,7 +133,7 @@ AnfNodePtr SplitFission::DoFission(const FuncGraphPtr &func_graph, const CNodePt
int64_t cur_output_index = 0; int64_t cur_output_index = 0;
while (num_split - cur_output_index > divisor) { while (num_split - cur_output_index > divisor) {
auto tuple_getitem = CreateTupleGetItem(func_graph, base_splitv, nodes_num); auto tuple_getitem = CreateTupleGetItem(func_graph, base_splitv, nodes_num);
base_splitv_outputs.push_back(tuple_getitem); (void)base_splitv_outputs.push_back(tuple_getitem);
CNodePtr new_splitv = CreateSplitVNode(func_graph, tuple_getitem); CNodePtr new_splitv = CreateSplitVNode(func_graph, tuple_getitem);
std::vector<int64_t> size_splits_new(size_splits.begin() + nodes_num * divisor, std::vector<int64_t> size_splits_new(size_splits.begin() + nodes_num * divisor,
size_splits.begin() + (nodes_num + 1) * divisor); size_splits.begin() + (nodes_num + 1) * divisor);
@ -154,7 +154,7 @@ AnfNodePtr SplitFission::DoFission(const FuncGraphPtr &func_graph, const CNodePt
auto last_node_num_split = num_split - cur_output_index; auto last_node_num_split = num_split - cur_output_index;
if (last_node_num_split > 1) { if (last_node_num_split > 1) {
auto tuple_getitem = CreateTupleGetItem(func_graph, base_splitv, nodes_num); auto tuple_getitem = CreateTupleGetItem(func_graph, base_splitv, nodes_num);
base_splitv_outputs.push_back(tuple_getitem); (void)base_splitv_outputs.push_back(tuple_getitem);
CNodePtr new_splitv = CreateSplitVNode(func_graph, tuple_getitem); CNodePtr new_splitv = CreateSplitVNode(func_graph, tuple_getitem);
std::vector<int64_t> size_splits_new_last(size_splits.begin() + nodes_num * divisor, size_splits.end()); std::vector<int64_t> size_splits_new_last(size_splits.begin() + nodes_num * divisor, size_splits.end());
SetAttrForSplitVNode(new_splitv, size_splits_new_last, split_dim, last_node_num_split); SetAttrForSplitVNode(new_splitv, size_splits_new_last, split_dim, last_node_num_split);
@ -168,9 +168,9 @@ AnfNodePtr SplitFission::DoFission(const FuncGraphPtr &func_graph, const CNodePt
size_splits_base.emplace_back(last_split_size); size_splits_base.emplace_back(last_split_size);
} else { } else {
auto tuple_getitem = CreateTupleGetItem(func_graph, base_splitv, nodes_num); auto tuple_getitem = CreateTupleGetItem(func_graph, base_splitv, nodes_num);
base_splitv_outputs.push_back(tuple_getitem); (void)base_splitv_outputs.push_back(tuple_getitem);
(void)make_tuple_inputs.emplace_back(tuple_getitem); (void)make_tuple_inputs.emplace_back(tuple_getitem);
size_splits_base.emplace_back(size_splits[size_splits.size() - 1]); (void)size_splits_base.emplace_back(size_splits[size_splits.size() - 1]);
} }
nodes_num++; nodes_num++;
} }

View File

@ -162,7 +162,7 @@ CNodePtr CreateDynamicShapeCNode(const FuncGraphPtr &func_graph, const AnfNodePt
CNodePtr CreateDropoutGenMaskCNode(const FuncGraphPtr &func_graph, const CNodePtr &dropout, CNodePtr CreateDropoutGenMaskCNode(const FuncGraphPtr &func_graph, const CNodePtr &dropout,
const ValueNodePtr &keep_prob_value, const abstract::ShapePtr &input_shape, const ValueNodePtr &keep_prob_value, const abstract::ShapePtr &input_shape,
const bool use_v3, const PatternProcessPass &pass) { const bool use_v3, const PatternProcessPass &) {
MS_EXCEPTION_IF_NULL(func_graph); MS_EXCEPTION_IF_NULL(func_graph);
MS_EXCEPTION_IF_NULL(dropout); MS_EXCEPTION_IF_NULL(dropout);
MS_EXCEPTION_IF_NULL(input_shape); MS_EXCEPTION_IF_NULL(input_shape);