!31434 codecheck clean

Merge pull request !31434 from wuweikang/clean
This commit is contained in:
i-robot 2022-03-19 05:38:41 +00:00 committed by Gitee
commit 5b3a5787be
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
22 changed files with 42 additions and 37 deletions

View File

@ -141,7 +141,7 @@ AnfNodePtr ReduceSumOptimizer::NewAssistValueNode(const CNodePtr &cnode, const K
for (auto &iter : value_tuple->value()) {
auto item = GetValue<int64_t>(iter->cast<ScalarPtr>());
if (item < 0) {
axes_value.emplace_back(item + x_shape->shape().size());
(void)axes_value.emplace_back(item + x_shape->shape().size());
} else {
axes_value.emplace_back(item);
}

View File

@ -21,7 +21,7 @@
namespace mindspore {
std::vector<Output> CellBase::operator()(const std::vector<Input> &inputs) const { return Clone()->Construct(inputs); }
ParameterCell::ParameterCell(const ParameterCell &cell) {
ParameterCell::ParameterCell(const ParameterCell &cell) : Cell<ParameterCell>(cell) {
auto tmp_ptr = cell.tensor_.Clone();
tensor_ = *tmp_ptr;
MSTensor::DestroyTensorPtr(tmp_ptr);
@ -37,7 +37,7 @@ ParameterCell &ParameterCell::operator=(const ParameterCell &cell) {
return *this;
}
ParameterCell::ParameterCell(ParameterCell &&cell) : tensor_(cell.tensor_) {}
ParameterCell::ParameterCell(ParameterCell &&cell) : Cell<ParameterCell>(cell), tensor_(cell.tensor_) {}
ParameterCell &ParameterCell::operator=(ParameterCell &&cell) {
if (&cell == this) {

View File

@ -19,8 +19,8 @@
namespace mindspore {
VectorRef MSTensorRef::Convert(const std::vector<MSTensor> &tensors) {
VectorRef res;
std::transform(tensors.begin(), tensors.end(), std::back_inserter(res),
[](const MSTensor &t) { return MSTensorRef(t); });
(void)std::transform(tensors.begin(), tensors.end(), std::back_inserter(res),
[](const MSTensor &t) { return MSTensorRef(t); });
return res;
}

View File

@ -36,7 +36,7 @@ class AscendKernelMod : public KernelMod {
virtual std::vector<TaskInfoPtr> GenTask(const std::vector<AddressPtr> &, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &, uint32_t) = 0;
uint32_t block_dim() { return block_dim_; }
uint32_t stream_id() { return stream_id_; }
uint32_t stream_id() const { return stream_id_; }
virtual bool NeedDump() {
#ifndef ENABLE_SECURITY
const auto &dump_json = DumpJsonParser::GetInstance();

View File

@ -81,7 +81,8 @@ bool MPICollective::CreateCommGroup(const std::string &name, const std::vector<u
CHECK_RET(rtSetDevice(local_rank_id_), RT_ERROR_NONE, "Call rtSetDevice error.");
HcclRootInfo rootInfo;
if (static_cast<unsigned int>(rank_id_) == ranks[0]) {
CHECK_RET(HcclGetRootInfo(&rootInfo), ::HcclResult::HCCL_SUCCESS, "HcclGetRootInfo failed.");
CHECK_RET(static_cast<int32_t>(HcclGetRootInfo(&rootInfo)), static_cast<int32_t>(::HcclResult::HCCL_SUCCESS),
"HcclGetRootInfo failed.");
}
MPI_Group mpi_group = MPI_GROUP_NULL;
CHECK_RET(MPI_Group_incl(comm_group_world_, group_ranks.size(), group_ranks.data(), &mpi_group), MPI_SUCCESS,
@ -102,9 +103,9 @@ bool MPICollective::CreateCommGroup(const std::string &name, const std::vector<u
return false;
}
CHECK_RET(HcclCommInitRootInfo(static_cast<uint32_t>(ranks.size()), &rootInfo, static_cast<uint32_t>(group_rank[0]),
&group_hcomm),
::HcclResult::HCCL_SUCCESS, "HcclCommInitRootInfo failed.");
CHECK_RET(static_cast<int32_t>(HcclCommInitRootInfo(static_cast<uint32_t>(ranks.size()), &rootInfo,
static_cast<uint32_t>(group_rank[0]), &group_hcomm)),
static_cast<int32_t>(::HcclResult::HCCL_SUCCESS), "HcclCommInitRootInfo failed.");
group_comm_[name] = group_hcomm;
group_info_[name] = {group_rank[0], static_cast<int>(ranks.size())};
return true;

View File

@ -187,7 +187,7 @@ bool AicpuExtInfoHandler::UpdateOutputShapeAndType(uint32_t output_index, const
}
std::vector<int64_t> tmp_shape;
std::transform(shape.begin(), shape.end(), std::back_inserter(tmp_shape), SizeToLong);
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(tmp_shape), SizeToLong);
if (output_index >= output_shape_and_type_.size()) {
MS_LOG(ERROR) << "Invalid output_index: " << output_index
<< " the size of output_shape_and_type_ is: " << output_shape_and_type_.size();

View File

@ -51,8 +51,8 @@ AicpuTask::~AicpuTask() {
void AicpuTask::Distribute() {
MS_LOG(INFO) << "InitAicpuTask start.";
std::vector<void *> io_addrs;
io_addrs.insert(io_addrs.end(), task_info_->input_data_addrs().begin(), task_info_->input_data_addrs().end());
io_addrs.insert(io_addrs.end(), task_info_->output_data_addrs().begin(), task_info_->output_data_addrs().end());
(void)io_addrs.insert(io_addrs.end(), task_info_->input_data_addrs().begin(), task_info_->input_data_addrs().end());
(void)io_addrs.insert(io_addrs.end(), task_info_->output_data_addrs().begin(), task_info_->output_data_addrs().end());
auto io_addrs_num = static_cast<uint32_t>(io_addrs.size());
auto io_addrs_size = static_cast<uint32_t>(io_addrs_num * sizeof(void *));
constexpr uint32_t io_addr_offset = sizeof(aicpu::AicpuParamHead);

View File

@ -26,7 +26,7 @@
namespace mindspore::ge::model_runner {
class LabelGuard {
public:
explicit LabelGuard(void *label_info) : label_info_(reinterpret_cast<uintptr_t>(label_info)) {}
explicit LabelGuard(const void *label_info) : label_info_(reinterpret_cast<uintptr_t>(label_info)) {}
~LabelGuard();
void *GetLabelInfo() noexcept { return reinterpret_cast<void *>(label_info_); }

View File

@ -336,7 +336,10 @@ class StreamSwitchTaskInfo : public TaskInfo {
value_addr_(value_addr),
cond_(cond),
data_type_(data_type) {}
~StreamSwitchTaskInfo() override {}
~StreamSwitchTaskInfo() override {
input_addr_ = nullptr;
value_addr_ = nullptr;
}
int64_t true_stream_id() const { return true_stream_id_; }
void *input_addr() const { return input_addr_; }

View File

@ -105,7 +105,7 @@ void AllToAllvCalcParam::CalcMemOffset(const std::vector<size_t> &mem_sizes, con
MS_LOG(EXCEPTION) << "Invalid rank id " << rank_ids[i] << " at index " << i << " as rank size " << rank_size_;
}
(*counts)[LongToSize(rank_ids[i])] = static_cast<int64_t>(real_sizes[i]);
(*displs)[LongToSize(rank_ids[i])] = mem_offset[i];
(*displs)[LongToSize(rank_ids[i])] = static_cast<int64_t>(mem_offset[i]);
}
return;
}

View File

@ -96,7 +96,7 @@ void HcclMetadataInfo(const CNodePtr &kernel_node, std::vector<std::shared_ptr<K
std::vector<TypeId> outputs_type;
size_t output_num = common::AnfAlgo::GetOutputTensorNum(kernel_node);
for (size_t output_index = 0; output_index < output_num; ++output_index) {
outputs_format.emplace_back(GetKernelFormat(kernel_node, output_index));
(void)outputs_format.emplace_back(GetKernelFormat(kernel_node, output_index));
if (op_name == kReceive) {
outputs_type.push_back(recv_type);
} else {

View File

@ -210,7 +210,7 @@ bool FusionBuildTbeJsonCreator::GenInputsJson(const AnfNodePtr &anf_node, nlohma
optional_input_desc[kJShape] = kJNull;
optional_input_desc[kJDataType] = 0;
optional_index_++;
input_desc_list_tmp.emplace_back(optional_input_desc);
(void)input_desc_list_tmp.emplace_back(optional_input_desc);
}
std::vector<nlohmann::json> input_desc_list;
TbeAdapter::InputOrderPass<nlohmann::json>(cnode, input_desc_list_tmp, &input_desc_list);
@ -293,7 +293,7 @@ void FusionBuildTbeJsonCreator::GenReusedOutputDesc(const AnfNodePtr &anf_node,
std::vector<size_t> FusionBuildTbeJsonCreator::GetDescOutputIndex(const std::vector<int64_t> &output_used_nums) {
std::vector<size_t> desc_output_index = {};
for (size_t idx = 0; idx < output_used_nums.size(); ++idx) {
desc_output_index.emplace_back(idx);
(void)desc_output_index.emplace_back(idx);
if (output_used_nums[idx] > 1) {
desc_output_index.emplace_back(idx);
}

View File

@ -29,7 +29,7 @@ namespace mindspore {
namespace opt {
class BnupdateEltwiseFusionPass : public FusionBasePass {
public:
explicit BnupdateEltwiseFusionPass(FusionIdAllocatorPtr idAllocator)
explicit BnupdateEltwiseFusionPass(const FusionIdAllocatorPtr &idAllocator)
: FusionBasePass("BnupdateEltwiseFusionPass", idAllocator) {
PassSwitchManager::GetInstance().RegistLicPass(name(), OptPassEnum::BnupdateEltwiseFusionPass);
}

View File

@ -29,7 +29,7 @@ namespace mindspore {
namespace opt {
class ConvBnReduceFusionPass : public FusionBasePass {
public:
explicit ConvBnReduceFusionPass(FusionIdAllocatorPtr idAllocator)
explicit ConvBnReduceFusionPass(const FusionIdAllocatorPtr &idAllocator)
: FusionBasePass("ConvBnReduceFusionPass", idAllocator) {
PassSwitchManager::GetInstance().RegistLicPass(name(), OptPassEnum::ConvBnReduceFusionPass);
}

View File

@ -29,7 +29,8 @@ namespace mindspore {
namespace opt {
class EltwiseFusionPass : public FusionBasePass {
public:
explicit EltwiseFusionPass(FusionIdAllocatorPtr idAllocator) : FusionBasePass("EltwiseFusionPass", idAllocator) {
explicit EltwiseFusionPass(const FusionIdAllocatorPtr &idAllocator)
: FusionBasePass("EltwiseFusionPass", idAllocator) {
PassSwitchManager::GetInstance().RegistLicPass(name(), OptPassEnum::EltwiseFusionPass);
}
~EltwiseFusionPass() override = default;

View File

@ -186,7 +186,7 @@ CNodePtr DealRefAndSpiltUnSupportedTransdata::DealRefForMultipleOutput(
auto ref_infos = op_info->ref_infos();
std::vector<AnfNodePtr> make_tuple_inputs;
AbstractBasePtrList abstract_list;
make_tuple_inputs.emplace_back(NewValueNode(prim::kPrimMakeTuple));
(void)make_tuple_inputs.emplace_back(NewValueNode(prim::kPrimMakeTuple));
size_t output_num = common::AnfAlgo::GetOutputTensorNum(cnode);
for (size_t output_index = 0; output_index < output_num; ++output_index) {
CNodePtr final_node = CreatTupleGetItemNode(func_graph, cnode, output_index);

View File

@ -146,10 +146,10 @@ std::vector<KernelWithIndex> GetCNodeNeighborFraczNodes(const FuncGraphManagerPt
if (AnfAlgo::GetOutputFormat(cnode, i) == kOpFormat_FRAC_Z) {
auto output = GetOutputItem(manager, cnode, groups, i);
if (output != nullptr) {
std::transform(node_user[output].begin(), node_user[output].end(), std::back_inserter(ret),
[](KernelWithIndex node_index) {
return KernelWithIndex{node_index.first, node_index.second - 1};
});
(void)std::transform(node_user[output].begin(), node_user[output].end(), std::back_inserter(ret),
[](KernelWithIndex node_index) {
return KernelWithIndex{node_index.first, node_index.second - 1};
});
}
}
}
@ -205,7 +205,7 @@ bool SetAttrFraczGroup(const FuncGraphPtr &func_graph, const CNodePtr &cnode) {
continue;
}
auto next_nodes = GetNeighborFraczNodes(manager, node_index.first, node_index.second, groups);
std::copy(next_nodes.begin(), next_nodes.end(), std::back_inserter(todo));
(void)std::copy(next_nodes.begin(), next_nodes.end(), std::back_inserter(todo));
}
return true;
}
@ -229,7 +229,7 @@ bool SetAttrFraczGroup(const FuncGraphPtr &func_graph, const ParameterPtr &param
continue;
}
auto next_nodes = GetNeighborFraczNodes(manager, node_index.first, node_index.second, groups);
std::copy(next_nodes.begin(), next_nodes.end(), std::back_inserter(todo));
(void)std::copy(next_nodes.begin(), next_nodes.end(), std::back_inserter(todo));
}
return true;
}

View File

@ -160,7 +160,7 @@ AnfNodePtr SyncBnSplit::SyncBNSplitForTBE(const FuncGraphPtr &func_graph, const
std::vector<AnfNodePtr> allreduce_mul_outputs;
for (size_t i = 0; i < bn_training_reduce_outputs.size(); ++i) {
auto allreduce_mul_output = CreateAllReduceAndMul(func_graph, bn_training_reduce_outputs[i], cnode, *this);
allreduce_mul_outputs.emplace_back(allreduce_mul_output);
(void)allreduce_mul_outputs.emplace_back(allreduce_mul_output);
}
// Create BNTrainingUpdate node
@ -239,7 +239,6 @@ AnfNodePtr CreateAllReduceAndMul(const FuncGraphPtr &graph, const AnfNodePtr &al
if (opid_pos == std::string::npos || opid_pos + kPositionOffset >= sync_bn_opname.size()) {
MS_LOG(EXCEPTION) << "Op[" << sync_bn_cnode->DebugString() << "] has no opid."
<< trace::DumpSourceLines(sync_bn_cnode);
return nullptr;
}
int64_t opid = std::stol(sync_bn_opname.substr(opid_pos + kPositionOffset));
// user defined fusion should be greater than 1

View File

@ -35,7 +35,7 @@ void AddNewOutputs(const FuncGraphPtr &func_graph, const AnfNodePtr &new_splitv,
MS_EXCEPTION_IF_NULL(inputs);
std::vector<AnfNodePtr> new_splitv_output;
CreateMultipleOutputsOfAnfNode(func_graph, new_splitv, LongToSize(outputs_num), &new_splitv_output);
inputs->insert(inputs->end(), new_splitv_output.begin(), new_splitv_output.end());
(void)inputs->insert(inputs->end(), new_splitv_output.begin(), new_splitv_output.end());
}
AnfNodePtr CreateTupleGetItem(const FuncGraphPtr &func_graph, const AnfNodePtr &input, int64_t index) {
@ -89,7 +89,7 @@ void SetAttrAndAbstractForBaseSplitv(const CNodePtr &origin_cnode, const CNodePt
auto num_split_l = LongToSize(num_split);
for (size_t i = 0; i < num_split_l; ++i) {
output_shape[split_dim_l] = LongToSize(size_splits_base[i]);
base_output_shapes_base.emplace_back(output_shape);
(void)base_output_shapes_base.emplace_back(output_shape);
common::AnfAlgo::SetOutputInferTypeAndShape({type_id}, {output_shape}, base_splitv_outputs[i].get());
}
common::AnfAlgo::SetOutputInferTypeAndShape(base_type_ids, base_output_shapes_base, base_splitv.get());
@ -169,7 +169,7 @@ AnfNodePtr SplitFission::DoFission(const FuncGraphPtr &func_graph, const CNodePt
} else {
auto tuple_getitem = CreateTupleGetItem(func_graph, base_splitv, nodes_num);
base_splitv_outputs.push_back(tuple_getitem);
make_tuple_inputs.emplace_back(tuple_getitem);
(void)make_tuple_inputs.emplace_back(tuple_getitem);
size_splits_base.emplace_back(size_splits[size_splits.size() - 1]);
}
nodes_num++;

View File

@ -167,7 +167,7 @@ const AnfNodePtr TopKSplit::Process(const FuncGraphPtr &func_graph, const AnfNod
}
// Copy a new node to check supported.
std::vector<AnfNodePtr> new_inputs{NewValueNode(std::make_shared<Primitive>(kTopKOpName))};
new_inputs.insert(new_inputs.end(), cnode->inputs().begin() + 1, cnode->inputs().end());
(void)new_inputs.insert(new_inputs.end(), cnode->inputs().begin() + 1, cnode->inputs().end());
CNodePtr new_cnode = NewCNode(new_inputs, func_graph);
MS_EXCEPTION_IF_NULL(new_cnode);
new_cnode->set_abstract(cnode->abstract());

View File

@ -24,7 +24,7 @@ namespace mindspore {
namespace opt {
class MatmulBiasaddFusion : public PatternProcessPassWithSwitch {
public:
explicit MatmulBiasaddFusion(bool multigraph = true, string pass_name = "matmul_biasadd_fusion")
explicit MatmulBiasaddFusion(bool multigraph = true, const string &pass_name = "matmul_biasadd_fusion")
: PatternProcessPassWithSwitch(pass_name, multigraph) {
x0_ = std::make_shared<Var>();
x1_ = std::make_shared<Var>();

View File

@ -110,7 +110,8 @@ std::vector<int64_t> CalGenMaskOutputShape(const std::vector<int64_t> &shape) {
std::vector<int64_t> CalGenMaskV3OutputShape(const std::vector<int64_t> &shape, TypeId type) {
// [*dim, M, N] -> [*dim, N/16, M/16, 16, 16] if M%16=0 and N%16=0
if (shape.size() >= 2 && shape[shape.size() - 1] % kCubeSize == 0 && shape[shape.size() - 2] % kCubeSize == 0) {
if (shape.size() >= 2 && shape[shape.size() - 1] % static_cast<int64_t>(kCubeSize) == 0 &&
shape[shape.size() - 2] % static_cast<int64_t>(kCubeSize) == 0) {
auto fnz_shape = trans::TransShapeToDevice(shape, kOpFormat_FRAC_NZ, type);
return fnz_shape;
}