forked from mindspore-Ecosystem/mindspore
pclint clean code.
This commit is contained in:
parent
5b3a5787be
commit
7065684a84
|
@ -928,14 +928,14 @@ bool AnfRuntimeAlgorithm::IsIndependentNode(const CNodePtr &node) {
|
|||
|
||||
static inline void GetMaxOrDefaultShape(const std::vector<int64_t> &max_shape, std::vector<size_t> *device_shape) {
|
||||
if (!max_shape.empty()) {
|
||||
std::transform(max_shape.begin(), max_shape.end(), device_shape->begin(), IntToSize);
|
||||
(void)std::transform(max_shape.begin(), max_shape.end(), device_shape->begin(), IntToSize);
|
||||
} else {
|
||||
constexpr size_t kDefaultValueForDynamicDim = 16;
|
||||
auto tmp_shape = *device_shape;
|
||||
auto ConvertNegOneToDefalut = [&kDefaultValueForDynamicDim](size_t size) {
|
||||
return static_cast<int64_t>(size) < 0 ? kDefaultValueForDynamicDim : size;
|
||||
};
|
||||
std::transform(tmp_shape.begin(), tmp_shape.end(), device_shape->begin(), ConvertNegOneToDefalut);
|
||||
(void)std::transform(tmp_shape.begin(), tmp_shape.end(), device_shape->begin(), ConvertNegOneToDefalut);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ class AscendKernelMod : public KernelMod {
|
|||
explicit AscendKernelMod(const AnfNodePtr &anf_node_ptr) : KernelMod(anf_node_ptr) {}
|
||||
virtual std::vector<TaskInfoPtr> GenTask(const std::vector<AddressPtr> &, const std::vector<AddressPtr> &,
|
||||
const std::vector<AddressPtr> &, uint32_t) = 0;
|
||||
uint32_t block_dim() { return block_dim_; }
|
||||
uint32_t block_dim() const { return block_dim_; }
|
||||
uint32_t stream_id() const { return stream_id_; }
|
||||
virtual bool NeedDump() {
|
||||
#ifndef ENABLE_SECURITY
|
||||
|
|
|
@ -120,7 +120,7 @@ bool AicpuExtInfoHandler::ParseExtInputShape(AicpuExtInfo *aicpu_ext_info) {
|
|||
auto input = reinterpret_cast<AicpuShapeAndType *>(aicpu_ext_info->infoMsg);
|
||||
|
||||
for (uint32_t index = 0; index < input_num_; ++index) {
|
||||
input_shape_and_type_.emplace_back(&input[index]);
|
||||
(void)input_shape_and_type_.emplace_back(&input[index]);
|
||||
}
|
||||
MS_LOG(INFO) << "Node:" << node_name_ << " parse ext input shape success infoLen=" << aicpu_ext_info->infoLen;
|
||||
return true;
|
||||
|
@ -138,7 +138,7 @@ bool AicpuExtInfoHandler::ParseExtOutputShape(AicpuExtInfo *aicpu_ext_info) {
|
|||
|
||||
auto output = reinterpret_cast<AicpuShapeAndType *>(aicpu_ext_info->infoMsg);
|
||||
for (uint32_t index = 0; index < output_num_; ++index) {
|
||||
output_shape_and_type_.emplace_back(&output[index]);
|
||||
(void)output_shape_and_type_.emplace_back(&output[index]);
|
||||
}
|
||||
MS_LOG(INFO) << "Node:" << node_name_ << " parse ext output shape success infoLen=" << aicpu_ext_info->infoLen;
|
||||
return true;
|
||||
|
@ -236,7 +236,7 @@ void AicpuExtInfoHandler::GetShapeAndType(NotNull<const AicpuShapeAndType *> sha
|
|||
if (tmpDim == kDimEndFlag) {
|
||||
break;
|
||||
}
|
||||
shape->emplace_back(tmpDim);
|
||||
(void)shape->emplace_back(tmpDim);
|
||||
MS_LOG(DEBUG) << "Debug tmpDim:" << tmpDim;
|
||||
}
|
||||
|
||||
|
|
|
@ -323,7 +323,7 @@ class MemcpyAsyncTaskInfo : public TaskInfo {
|
|||
uint64_t dst_max_;
|
||||
void *src_;
|
||||
uint64_t count_;
|
||||
int32_t kind_;
|
||||
uint32_t kind_;
|
||||
};
|
||||
|
||||
class StreamSwitchTaskInfo : public TaskInfo {
|
||||
|
|
|
@ -115,12 +115,12 @@ void AllToAllvCalcParam::CalcMemOffset(const std::vector<size_t> &mem_sizes, con
|
|||
if (rank_ids[i] < 0 || static_cast<size_t>(rank_ids[i]) >= rank_size_) {
|
||||
MS_LOG(EXCEPTION) << "Invalid rank id " << rank_ids[i] << " at index " << i << " as rank size " << rank_size_;
|
||||
}
|
||||
rank_id_map.emplace(rank_ids[i], i);
|
||||
(void)rank_id_map.emplace(rank_ids[i], i);
|
||||
}
|
||||
|
||||
size_t offset = 0;
|
||||
for (uint32_t i = 0; i < rank_size_; ++i) {
|
||||
(*displs)[i] = offset;
|
||||
(*displs)[i] = SizeToLong(offset);
|
||||
auto iter = rank_id_map.find(i);
|
||||
if (iter != rank_id_map.end()) {
|
||||
(*counts)[i] = real_sizes[iter->second];
|
||||
|
|
|
@ -55,7 +55,7 @@ std::vector<TaskInfoPtr> LabelSetKernel::GenTask(const std::vector<AddressPtr> &
|
|||
std::vector<TaskInfoPtr> task_info_list;
|
||||
std::shared_ptr<LabelSetTaskInfo> task_info_ptr = std::make_shared<LabelSetTaskInfo>(unique_name_, stream_id, label_);
|
||||
MS_EXCEPTION_IF_NULL(task_info_ptr);
|
||||
task_info_list.emplace_back(task_info_ptr);
|
||||
(void)task_info_list.emplace_back(task_info_ptr);
|
||||
return task_info_list;
|
||||
}
|
||||
} // namespace kernel
|
||||
|
|
|
@ -301,8 +301,7 @@ std::vector<size_t> FusionBuildTbeJsonCreator::GetDescOutputIndex(const std::vec
|
|||
return desc_output_index;
|
||||
}
|
||||
|
||||
bool FusionBuildTbeJsonCreator::AttrsJsonPostProcessing(const AnfNodePtr &anf_node, const OpInfoPtr &op_info_ptr,
|
||||
nlohmann::json *attrs_json) {
|
||||
bool FusionBuildTbeJsonCreator::AttrsJsonPostProcessing(const AnfNodePtr &, const OpInfoPtr &, nlohmann::json *) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ bool HasFraczGroupAttrAndSet(const AnfNodePtr &node, size_t index, int64_t group
|
|||
if (common::AnfAlgo::HasNodeAttr(kAttrFracZGroupIdx, cnode)) {
|
||||
fz_group_idx = common::AnfAlgo::GetNodeAttr<std::vector<int64_t>>(cnode, kAttrFracZGroupIdx);
|
||||
if (input_num > fz_group_idx.size()) {
|
||||
fz_group_idx.insert(fz_group_idx.begin(), input_num - fz_group_idx.size(), 1);
|
||||
(void)fz_group_idx.insert(fz_group_idx.begin(), input_num - fz_group_idx.size(), 1);
|
||||
}
|
||||
if (fz_group_idx[index] == 1) {
|
||||
fz_group_idx[index] = groups;
|
||||
|
|
|
@ -133,7 +133,7 @@ AnfNodePtr SplitFission::DoFission(const FuncGraphPtr &func_graph, const CNodePt
|
|||
int64_t cur_output_index = 0;
|
||||
while (num_split - cur_output_index > divisor) {
|
||||
auto tuple_getitem = CreateTupleGetItem(func_graph, base_splitv, nodes_num);
|
||||
base_splitv_outputs.push_back(tuple_getitem);
|
||||
(void)base_splitv_outputs.push_back(tuple_getitem);
|
||||
CNodePtr new_splitv = CreateSplitVNode(func_graph, tuple_getitem);
|
||||
std::vector<int64_t> size_splits_new(size_splits.begin() + nodes_num * divisor,
|
||||
size_splits.begin() + (nodes_num + 1) * divisor);
|
||||
|
@ -154,7 +154,7 @@ AnfNodePtr SplitFission::DoFission(const FuncGraphPtr &func_graph, const CNodePt
|
|||
auto last_node_num_split = num_split - cur_output_index;
|
||||
if (last_node_num_split > 1) {
|
||||
auto tuple_getitem = CreateTupleGetItem(func_graph, base_splitv, nodes_num);
|
||||
base_splitv_outputs.push_back(tuple_getitem);
|
||||
(void)base_splitv_outputs.push_back(tuple_getitem);
|
||||
CNodePtr new_splitv = CreateSplitVNode(func_graph, tuple_getitem);
|
||||
std::vector<int64_t> size_splits_new_last(size_splits.begin() + nodes_num * divisor, size_splits.end());
|
||||
SetAttrForSplitVNode(new_splitv, size_splits_new_last, split_dim, last_node_num_split);
|
||||
|
@ -168,9 +168,9 @@ AnfNodePtr SplitFission::DoFission(const FuncGraphPtr &func_graph, const CNodePt
|
|||
size_splits_base.emplace_back(last_split_size);
|
||||
} else {
|
||||
auto tuple_getitem = CreateTupleGetItem(func_graph, base_splitv, nodes_num);
|
||||
base_splitv_outputs.push_back(tuple_getitem);
|
||||
(void)base_splitv_outputs.push_back(tuple_getitem);
|
||||
(void)make_tuple_inputs.emplace_back(tuple_getitem);
|
||||
size_splits_base.emplace_back(size_splits[size_splits.size() - 1]);
|
||||
(void)size_splits_base.emplace_back(size_splits[size_splits.size() - 1]);
|
||||
}
|
||||
nodes_num++;
|
||||
}
|
||||
|
|
|
@ -162,7 +162,7 @@ CNodePtr CreateDynamicShapeCNode(const FuncGraphPtr &func_graph, const AnfNodePt
|
|||
|
||||
CNodePtr CreateDropoutGenMaskCNode(const FuncGraphPtr &func_graph, const CNodePtr &dropout,
|
||||
const ValueNodePtr &keep_prob_value, const abstract::ShapePtr &input_shape,
|
||||
const bool use_v3, const PatternProcessPass &pass) {
|
||||
const bool use_v3, const PatternProcessPass &) {
|
||||
MS_EXCEPTION_IF_NULL(func_graph);
|
||||
MS_EXCEPTION_IF_NULL(dropout);
|
||||
MS_EXCEPTION_IF_NULL(input_shape);
|
||||
|
|
Loading…
Reference in New Issue