pclint code clean

This commit is contained in:
yuchaojie 2021-07-30 10:08:07 +08:00
parent 09cbb960e8
commit 210bfeb6e0
9 changed files with 47 additions and 50 deletions

View File

@ -27,7 +27,7 @@
namespace mindspore {
namespace opt {
void Conv2DBackpropEltwiseFusionPass::MatchConv2DBackpropInputEltwise(const CNodePtr &cnode,
const session::KernelGraph &kernel_graph,
const session::KernelGraph &,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);

View File

@ -71,11 +71,11 @@ CNodePtr CreateFusionOp(const std::vector<AnfNodePtr> &inputs_list, const std::v
std::vector<std::string> input_names;
for (uint8_t i = 0; i < inputs_list.size(); i++) {
input_names.emplace_back("input" + std::to_string(i));
(void)input_names.emplace_back("input" + std::to_string(i));
}
std::vector<std::string> output_names;
for (uint8_t i = 0; i < outputs_list.size(); i++) {
output_names.emplace_back("output" + std::to_string(i));
(void)output_names.emplace_back("output" + std::to_string(i));
}
ValuePtr input_names_v = MakeValue(input_names);
@ -111,8 +111,8 @@ kernel::KernelBuildInfoPtr CreateFusionOpKernelInfo(const std::vector<AnfNodePtr
std::vector<TypeId> inputs_data_type;
for (const auto &input : inputs_list) {
auto real_input = AnfAlgo::VisitKernel(input, 0);
inputs_format.emplace_back(AnfAlgo::GetOutputFormat(real_input.first, real_input.second));
inputs_data_type.emplace_back(AnfAlgo::GetOutputDeviceDataType(real_input.first, real_input.second));
(void)inputs_format.emplace_back(AnfAlgo::GetOutputFormat(real_input.first, real_input.second));
(void)inputs_data_type.emplace_back(AnfAlgo::GetOutputDeviceDataType(real_input.first, real_input.second));
}
// outputs format and data type
std::vector<std::string> outputs_format;
@ -121,13 +121,13 @@ kernel::KernelBuildInfoPtr CreateFusionOpKernelInfo(const std::vector<AnfNodePtr
if (AnfAlgo::GetCNodeName(output) == prim::kPrimTupleGetItem->name()) {
auto tuple_getitem = output->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(tuple_getitem);
outputs_format.emplace_back(AnfAlgo::GetOutputFormat(
(void)outputs_format.emplace_back(AnfAlgo::GetOutputFormat(
tuple_getitem->input(kIndex1), LongToSize(GetValue<int64_t>(GetValueNode(tuple_getitem->input(kIndex2))))));
outputs_data_type.emplace_back(AnfAlgo::GetOutputDeviceDataType(
(void)outputs_data_type.emplace_back(AnfAlgo::GetOutputDeviceDataType(
tuple_getitem->input(kIndex1), LongToSize(GetValue<int64_t>(GetValueNode(tuple_getitem->input(kIndex2))))));
} else {
outputs_format.emplace_back(AnfAlgo::GetOutputFormat(output, 0));
outputs_data_type.emplace_back(AnfAlgo::GetOutputDeviceDataType(output, 0));
(void)outputs_format.emplace_back(AnfAlgo::GetOutputFormat(output, 0));
(void)outputs_data_type.emplace_back(AnfAlgo::GetOutputDeviceDataType(output, 0));
}
}
builder.SetInputsFormat(inputs_format);
@ -292,7 +292,7 @@ void GetFusionScopeOutputNodeList(session::KernelGraph *kernel_graph,
std::vector<AnfNodePtr> tuple_getitem_nodes;
for (auto &user : manager->node_users()[node]) {
if (AnfAlgo::CheckPrimitiveType(user.first, prim::kPrimTupleGetItem)) {
tuple_getitem_nodes.emplace_back(user.first);
(void)tuple_getitem_nodes.emplace_back(user.first);
}
}
std::sort(tuple_getitem_nodes.begin(), tuple_getitem_nodes.end(), TupleGetitemNodeCompare);
@ -386,7 +386,7 @@ void RemoveCircle(const session::KernelGraph &kernel_graph,
for (auto &[fusion_id, fusion_info] : *buffer_fusion_infos) {
bool has_circle = CheckCircle(kernel_graph, fusion_info);
if (has_circle) {
fusion_ids.emplace_back(fusion_id);
(void)fusion_ids.emplace_back(fusion_id);
}
}
@ -475,8 +475,8 @@ bool UbPatternFusion::ReplaceFusionOp(std::unordered_map<int64_t, BufferFusionIn
for (const auto &out_node : buffer_fusion_info.outputs_list) {
size_t out_num = AnfAlgo::GetOutputTensorNum(out_node);
for (size_t idx = 0; idx < out_num; ++idx) {
types.emplace_back(AnfAlgo::GetOutputInferDataType(out_node, idx));
shapes.emplace_back(AnfAlgo::GetOutputInferShape(out_node, idx));
(void)types.emplace_back(AnfAlgo::GetOutputInferDataType(out_node, idx));
(void)shapes.emplace_back(AnfAlgo::GetOutputInferShape(out_node, idx));
}
}
if (types.empty() || shapes.empty()) {

View File

@ -65,10 +65,10 @@ void ConvertReduceAttrFraczAnd6HD(const CNodePtr &cnode) {
for (auto elem : axis) {
switch (elem) {
case kAxis_H:
convert_axis.emplace_back(kAxis_6HD_H);
(void)convert_axis.emplace_back(kAxis_6HD_H);
break;
case kAxis_W:
convert_axis.emplace_back(kAxis_6HD_W);
(void)convert_axis.emplace_back(kAxis_6HD_W);
break;
default:
MS_LOG(INFO) << "reduce axis is axis : [" << elem << "]"

View File

@ -196,23 +196,23 @@ AnfNodePtr AddLSTMInputGradNode(const FuncGraphPtr &func_graph, const CNodePtr &
AnfAlgo::GetOutputInferShape(dynamic_rnn_grad_cnode->input(kIndex6), 0)[0],
AnfAlgo::GetOutputInferShape(dynamic_rnn_grad_cnode->input(kIndex6), 0)[1]};
AnfAlgo::SetOutputInferTypeAndShape({kNumberTypeFloat32}, {reshape_out_shape}, reshape.get());
basic_lstm_cell_c_state_grad_inputs.emplace_back(reshape);
(void)basic_lstm_cell_c_state_grad_inputs.emplace_back(reshape);
} else {
basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_c_outputs[idx - 1]);
(void)basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_c_outputs[idx - 1]);
}
basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_dy_outputs[idx]);
(void)basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_dy_outputs[idx]);
if (i == 0) {
basic_lstm_cell_c_state_grad_inputs.emplace_back(dynamic_rnn_grad_cnode->input(kIndex10));
basic_lstm_cell_c_state_grad_inputs.emplace_back(dynamic_rnn_grad_cnode->input(kIndex11));
(void)basic_lstm_cell_c_state_grad_inputs.emplace_back(dynamic_rnn_grad_cnode->input(kIndex10));
(void)basic_lstm_cell_c_state_grad_inputs.emplace_back(dynamic_rnn_grad_cnode->input(kIndex11));
} else {
basic_lstm_cell_c_state_grad_inputs.emplace_back(pre_split_outputs[1]);
basic_lstm_cell_c_state_grad_inputs.emplace_back(pre_basic_lstm_cell_c_state_grad_outputs[1]);
(void)basic_lstm_cell_c_state_grad_inputs.emplace_back(pre_split_outputs[1]);
(void)basic_lstm_cell_c_state_grad_inputs.emplace_back(pre_basic_lstm_cell_c_state_grad_outputs[1]);
}
basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_i_outputs[idx]);
basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_j_outputs[idx]);
basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_f_outputs[idx]);
basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_o_outputs[idx]);
basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_tanh_outputs[idx]);
(void)basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_i_outputs[idx]);
(void)basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_j_outputs[idx]);
(void)basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_f_outputs[idx]);
(void)basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_o_outputs[idx]);
(void)basic_lstm_cell_c_state_grad_inputs.emplace_back(lstm_split_tanh_outputs[idx]);
auto basic_lstm_cell_c_state_grad = func_graph->NewCNode(basic_lstm_cell_c_state_grad_inputs);
MS_EXCEPTION_IF_NULL(basic_lstm_cell_c_state_grad);
basic_lstm_cell_c_state_grad->set_abstract(basic_lstm_cell_c_state_grad_nodes[i]->abstract());
@ -225,8 +225,8 @@ AnfNodePtr AddLSTMInputGradNode(const FuncGraphPtr &func_graph, const CNodePtr &
// Create MatMul
std::vector<AnfNodePtr> matmul_inputs = {NewValueNode(std::make_shared<Primitive>(prim::kPrimMatMul->name()))};
matmul_inputs.emplace_back(basic_lstm_cell_c_state_grad_outputs[0]);
matmul_inputs.emplace_back(dynamic_rnn_grad_cnode->input(kIndex2));
(void)matmul_inputs.emplace_back(basic_lstm_cell_c_state_grad_outputs[0]);
(void)matmul_inputs.emplace_back(dynamic_rnn_grad_cnode->input(kIndex2));
auto matmul = func_graph->NewCNode(matmul_inputs);
MS_EXCEPTION_IF_NULL(matmul);
matmul->set_abstract(matmul_nodes[i]->abstract());
@ -483,8 +483,8 @@ AnfNodePtr CreateValueNode(const FuncGraphPtr &func_graph, const CNodePtr &dynam
return value_node;
}
AnfNodePtr CreateDbReduceSum(const FuncGraphPtr &func_graph, const CNodePtr &dynamic_rnn_grad_cnode,
const AnfNodePtr &lstm_input_grad, const AnfNodePtr &value_node) {
AnfNodePtr CreateDbReduceSum(const FuncGraphPtr &func_graph, const CNodePtr &, const AnfNodePtr &lstm_input_grad,
const AnfNodePtr &value_node) {
MS_EXCEPTION_IF_NULL(func_graph);
// Create node
auto batch_matmul = CreateBatchMatMul2(func_graph, lstm_input_grad, value_node);

View File

@ -29,7 +29,7 @@ namespace opt {
namespace {
constexpr size_t kAvgPool3DInputNum = 1;
constexpr size_t k5DInferDims = 5;
constexpr size_t kC0 = 16;
constexpr int64_t kC0 = 16;
constexpr size_t kDHWDimNum = 3;
constexpr size_t kNCDHWDimNum = 5;
@ -153,8 +153,8 @@ AnfNodePtr ConstructFilter(const FuncGraphPtr &func_graph, const std::vector<int
auto tensor_data = reinterpret_cast<float16 *>(assist_tensor->data_c());
int64_t cnt = c1 * kd * kh * kw;
for (int64_t i = 0; i < cnt; ++i) {
for (size_t j = 0; j < kC0; ++j) {
for (size_t k = 0; k < kC0; ++k) {
for (int64_t j = 0; j < kC0; ++j) {
for (int64_t k = 0; k < kC0; ++k) {
float t = j == k ? val : 0;
*tensor_data = float16(t);
++tensor_data;
@ -172,7 +172,7 @@ AnfNodePtr ConstructFilter(const FuncGraphPtr &func_graph, const std::vector<int
AnfNodePtr ConstructMultiplier(const FuncGraphPtr &func_graph, int64_t fn, int64_t fc, int64_t fd, int64_t fh,
int64_t fw, int64_t dd, int64_t dh, int64_t dw, int64_t kd, int64_t kh, int64_t kw,
int64_t sd, int64_t sh, int64_t sw, const std::vector<int64_t> &pad_list, bool ceil_mode,
int64_t sd, int64_t sh, int64_t sw, const std::vector<int64_t> &pad_list,
bool count_include_pad) {
MS_EXCEPTION_IF_NULL(func_graph);
// assist tensor 2
@ -288,7 +288,7 @@ const AnfNodePtr AvgPool3DFusion::Process(const FuncGraphPtr &func_graph, const
// assist node 2
if ((!IsZeroPads(pad_list) || ceil_mode) && !divisor_override) {
auto multiplier = ConstructMultiplier(func_graph, fn, fc, fd, fh, fw, dout, dh, dw, kd, kh, kw, sd, sh, sw,
pad_list, ceil_mode, count_include_pad);
pad_list, count_include_pad);
new_inputs.push_back(multiplier);
}
auto new_3d = func_graph->NewCNode(new_inputs);

View File

@ -34,7 +34,7 @@ constexpr size_t kStridesDims = 3;
constexpr size_t kOrigShapeDims = 5;
constexpr size_t kShapeDims = 6;
constexpr size_t kPadDims = 6;
constexpr size_t kC0 = 16;
constexpr int64_t kC0 = 16;
void GetAttrs(const AnfNodePtr &node, std::vector<int64_t> *kernel_size, std::vector<int64_t> *strides,
std::vector<int64_t> *pad_list, std::vector<int64_t> *origin_input_shape, bool *ceil_mode,
@ -120,8 +120,8 @@ AnfNodePtr ConstructFilter(const FuncGraphPtr &func_graph, const std::vector<int
auto tensor_data = reinterpret_cast<float16 *>(assist_tensor->data_c());
int64_t cnt = c1 * kd * kh * kw;
for (int64_t i = 0; i < cnt; ++i) {
for (size_t j = 0; j < kC0; ++j) {
for (size_t k = 0; k < kC0; ++k) {
for (int64_t j = 0; j < kC0; ++j) {
for (int64_t k = 0; k < kC0; ++k) {
float t = j == k ? val : 0;
*tensor_data = float16(t);
++tensor_data;
@ -140,7 +140,7 @@ AnfNodePtr ConstructFilter(const FuncGraphPtr &func_graph, const std::vector<int
AnfNodePtr ConstructMultiplier(const FuncGraphPtr &func_graph, const std::vector<size_t> &ori_shape,
const std::vector<int64_t> &ori_input_shape, const std::vector<int64_t> &kernel_size,
const std::vector<int64_t> &strides, const std::vector<int64_t> &pad_list,
bool ceil_mode, bool count_include_pad) {
bool count_include_pad) {
MS_EXCEPTION_IF_NULL(func_graph);
// assist tensor 2
std::vector<int64_t> grad_shape;
@ -246,8 +246,8 @@ const AnfNodePtr AvgPool3DGradFusion::Process(const FuncGraphPtr &func_graph, co
// assist node 2
if (divisor_override == 0 && (!IsZeroPads(pad_list) || ceil_mode)) {
auto multiplier = ConstructMultiplier(func_graph, dims_in, origin_input_shape, kernel_size, strides, pad_list,
ceil_mode, count_include_pad);
auto multiplier =
ConstructMultiplier(func_graph, dims_in, origin_input_shape, kernel_size, strides, pad_list, count_include_pad);
new_inputs.push_back(multiplier);
}
auto new_3d_grad = func_graph->NewCNode(new_inputs);

View File

@ -117,8 +117,8 @@ bool ParameterTransOpFusion::Run(const FuncGraphPtr &func_graph) {
auto cast = trans_road[kIndex1];
if (param_format == format && param_dtype != dtype) {
AnfAlgo::SetSelectKernelBuildInfo(GetKernelBuildInfo(cast, format, param_dtype, dtype), cast.get());
manager->Replace(trans_road[kIndex2], final_node);
manager->Replace(cur_transop, cast);
(void)manager->Replace(trans_road[kIndex2], final_node);
(void)manager->Replace(cur_transop, cast);
}
changed = true;
}

View File

@ -84,11 +84,8 @@ ValueNodePtr CreateKeepPorbValueNode(const FuncGraphPtr &func_graph, const AnfNo
MS_EXCEPTION_IF_NULL(data_ptr);
// keep_prob's datatype is same with input data
if (type_id == kNumberTypeFloat16) {
std::vector<float16> half_data = {float16(keep_prob)};
auto ret_code = memcpy_s(data_ptr, LongToSize(keep_prob_tensor->data().nbytes()), half_data.data(), kFloat16Len);
if (ret_code != 0) {
MS_LOG(EXCEPTION) << "Failed to copy data into Tensor.";
}
auto *val16 = reinterpret_cast<float16 *>(data_ptr);
*val16 = float16(keep_prob);
} else {
auto *val = reinterpret_cast<float *>(data_ptr);
*val = keep_prob;

View File

@ -997,7 +997,7 @@ std::vector<int64_t> GetNodeOutputUsedNum(const session::KernelGraph &kernel_gra
auto out_getitem_ptr = out_getitem.first->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(out_getitem_ptr);
auto getitem_input2 = out_getitem_ptr->input(kInputNodeOutputIndexInTupleGetItem);
auto output_idx = GetValue<int64_t>(GetValueNode(getitem_input2));
auto output_idx = LongToSize(GetValue<int64_t>(GetValueNode(getitem_input2)));
output_used_num[output_idx] = SizeToLong(manager->node_users()[out_getitem.first].size());
}
}