diff --git a/mindspore/ccsrc/transform/graph_ir/convert.cc b/mindspore/ccsrc/transform/graph_ir/convert.cc index 02d4cca89ed..b1abe240277 100644 --- a/mindspore/ccsrc/transform/graph_ir/convert.cc +++ b/mindspore/ccsrc/transform/graph_ir/convert.cc @@ -1591,13 +1591,13 @@ AnfNodePtr DfGraphConvertor::GetRealOpNode(AnfNodePtr node) { // make_tuple apply inputs:make_tuple, [tuple_items,] if (IsPrimitiveCNode(node_inputs[1], prim::kPrimMakeTuple)) { auto tuple_inputs = node->cast()->inputs(); - if (tuple_inputs.size() < IntToSize(index + 1)) { + if (tuple_inputs.size() < LongToSize(index + 1L)) { MS_LOG(ERROR) << "make tuple input items node not correct! size:" << tuple_inputs.size() << ", item index:" << index; error_ = FAILED; return node; } - return GetRealOpNode(tuple_inputs[IntToSize(index + 1)]); + return GetRealOpNode(tuple_inputs[LongToSize(index + 1L)]); } return GetRealOpNode(node_inputs[1]); } diff --git a/mindspore/core/abstract/prim_arrays.cc b/mindspore/core/abstract/prim_arrays.cc index 60d3963cd5a..9a16ae58a76 100644 --- a/mindspore/core/abstract/prim_arrays.cc +++ b/mindspore/core/abstract/prim_arrays.cc @@ -1164,7 +1164,7 @@ AbstractBasePtr InferImplDynamicStitch(const AnalysisEnginePtr &, const Primitiv const AbstractBasePtrList &args_spec_list) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input number", args_spec_list.size(), kEqual, 2, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input number", args_spec_list.size(), kEqual, 2, prim_name); for (const auto &item : args_spec_list) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/abstract/prim_nn.cc b/mindspore/core/abstract/prim_nn.cc index 50894ca204d..bf9c167eda9 100644 --- a/mindspore/core/abstract/prim_nn.cc +++ b/mindspore/core/abstract/prim_nn.cc @@ -80,7 +80,7 @@ AbstractBasePtr InferImplPooling(const AnalysisEnginePtr &, const PrimitivePtr & auto pad_mode_ptr = primitive->GetAttr("pad_mode"); if (pad_mode_ptr != nullptr) { int64_t pad_mode; - CheckAndConvertUtils::GetPadModEnumValue(pad_mode_ptr, &pad_mode, true); + (void)CheckAndConvertUtils::GetPadModEnumValue(pad_mode_ptr, &pad_mode, true); if (pad_mode == PadMode::VALID) { padding = 0; } else if (pad_mode == PadMode::SAME) { @@ -322,7 +322,7 @@ AbstractBasePtr InferImplConv2D(const AnalysisEnginePtr &, const PrimitivePtr &p std::vector padding = CheckAttrIntOrTuple(op_name, primitive->GetAttr("pad"), padding_start_idx, padding_num_element); int64_t pad_mode; - CheckAndConvertUtils::GetPadModEnumValue(primitive->GetAttr("pad_mode"), &pad_mode); + (void)CheckAndConvertUtils::GetPadModEnumValue(primitive->GetAttr("pad_mode"), &pad_mode); std::vector output_hw; std::vector pad_list; std::vector output_hw_min; diff --git a/mindspore/core/load_mindir/anf_model_parser.cc b/mindspore/core/load_mindir/anf_model_parser.cc index f9b64dafdbc..68c1bbd0e8b 100644 --- a/mindspore/core/load_mindir/anf_model_parser.cc +++ b/mindspore/core/load_mindir/anf_model_parser.cc @@ -533,7 +533,7 @@ bool MSANFModelParser::GetAttrValueForCNode(const PrimitivePtr &prim, const mind ValuePtr res = ObtainCNodeAttrInSingleScalarForm(attr_proto); const std::string &op_type = prim->name(); if (!IsLite()) { - CheckAndConvertUtils::ConvertAttrValueInLoad(op_type, attr_name, &res); + (void)CheckAndConvertUtils::ConvertAttrValueInLoad(op_type, attr_name, &res); } prim->AddAttr(attr_name, res); break; diff --git a/mindspore/core/load_mindir/load_model.cc b/mindspore/core/load_mindir/load_model.cc index 73bbb28e378..115c67fa1c8 100644 --- a/mindspore/core/load_mindir/load_model.cc +++ b/mindspore/core/load_mindir/load_model.cc @@ -226,9 +226,9 @@ std::shared_ptr LoadMindIR(const std::string &file_name, bool is_lite return nullptr; } - int file_size = files.size(); + size_t file_size = files.size(); mind_ir::GraphProto *mod_graph = origin_model.mutable_graph(); - for (auto file_index = 0; file_index < file_size; file_index++) { + for (size_t file_index = 0; file_index < file_size; file_index++) { mind_ir::GraphProto param_graph; if (!ParseGraphProto(¶m_graph, files[file_index], dec_key, key_len, dec_mode)) { return nullptr; diff --git a/mindspore/core/ops/add.cc b/mindspore/core/ops/add.cc index ed4548fe297..790e0b28819 100644 --- a/mindspore/core/ops/add.cc +++ b/mindspore/core/ops/add.cc @@ -28,7 +28,7 @@ namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 2, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, 2, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -40,7 +40,7 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector & MS_EXCEPTION_IF_NULL(item); } auto op_name = prim->name(); - CheckAndConvertUtils::CheckInteger("Add infer", input_args.size(), kGreaterEqual, 2, op_name); + (void)CheckAndConvertUtils::CheckInteger("Add infer", SizeToLong(input_args.size()), kGreaterEqual, 2, op_name); std::map types; types.emplace("x", input_args[0]->BuildType()); types.emplace("y", input_args[1]->BuildType()); diff --git a/mindspore/core/ops/adder.cc b/mindspore/core/ops/adder.cc index 9e14aa3aeb7..54e38414280 100644 --- a/mindspore/core/ops/adder.cc +++ b/mindspore/core/ops/adder.cc @@ -33,14 +33,14 @@ void Adder::Init(const int64_t in_channel, const int64_t out_channel, const std: set_format(format); } -void Adder::set_in_channel(const int64_t in_channel) { this->AddAttr(kInChannel, MakeValue(in_channel)); } +void Adder::set_in_channel(const int64_t in_channel) { (void)this->AddAttr(kInChannel, MakeValue(in_channel)); } int64_t Adder::get_in_channel() const { auto value_ptr = GetAttr(kInChannel); return GetValue(value_ptr); } -void Adder::set_out_channel(const int64_t out_channel) { this->AddAttr(kOutChannel, MakeValue(out_channel)); } +void Adder::set_out_channel(const int64_t out_channel) { (void)this->AddAttr(kOutChannel, MakeValue(out_channel)); } int64_t Adder::get_out_channel() const { auto value_ptr = GetAttr(kOutChannel); @@ -66,28 +66,28 @@ PadMode Adder::get_pad_mode() const { return PadMode(GetValue(value_ptr)); } -void Adder::set_stride(const std::vector &stride) { this->AddAttr(kStride, MakeValue(stride)); } +void Adder::set_stride(const std::vector &stride) { (void)this->AddAttr(kStride, MakeValue(stride)); } std::vector Adder::get_stride() const { auto value_ptr = GetAttr(kStride); return GetValue>(value_ptr); } -void Adder::set_pad_list(const std::vector &pad_list) { this->AddAttr(kPadList, MakeValue(pad_list)); } +void Adder::set_pad_list(const std::vector &pad_list) { (void)this->AddAttr(kPadList, MakeValue(pad_list)); } std::vector Adder::get_pad_list() const { auto value_ptr = GetAttr(kPadList); return GetValue>(value_ptr); } -void Adder::set_dilation(const std::vector &dilation) { this->AddAttr(kDilation, MakeValue(dilation)); } +void Adder::set_dilation(const std::vector &dilation) { (void)this->AddAttr(kDilation, MakeValue(dilation)); } std::vector Adder::get_dilation() const { auto value_ptr = GetAttr(kDilation); return GetValue>(value_ptr); } -void Adder::set_group(const int64_t group) { this->AddAttr(kGroup, MakeValue(group)); } +void Adder::set_group(const int64_t group) { (void)this->AddAttr(kGroup, MakeValue(group)); } int64_t Adder::get_group() const { auto value_ptr = GetAttr(kGroup); diff --git a/mindspore/core/ops/addn.cc b/mindspore/core/ops/addn.cc index e08f3f6d3d7..7962b20912d 100644 --- a/mindspore/core/ops/addn.cc +++ b/mindspore/core/ops/addn.cc @@ -70,7 +70,8 @@ TypePtr AddNInferType(const PrimitivePtr &prim, const std::vectorisa() ? input_args[0]->cast()->elements() : input_args[0]->cast()->elements(); - CheckAndConvertUtils::CheckInteger("concat element num", SizeToLong(elements.size()), kGreaterEqual, 1, prim->name()); + (void)CheckAndConvertUtils::CheckInteger("concat element num", SizeToLong(elements.size()), kGreaterEqual, 1, + prim->name()); std::map types; types.emplace("element_0", elements[0]->BuildType()); for (size_t i = 0; i < elements.size(); ++i) { @@ -90,7 +91,7 @@ AbstractBasePtr AddNInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kGreaterEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kGreaterEqual, 1, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/arg_max.cc b/mindspore/core/ops/arg_max.cc index 26487f823ba..c080a65769f 100644 --- a/mindspore/core/ops/arg_max.cc +++ b/mindspore/core/ops/arg_max.cc @@ -30,7 +30,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector out_shape; for (size_t i = 0; i < x_shape.size(); ++i) { if (SizeToLong(i) != axis) { - out_shape.emplace_back(x_shape[i]); + (void)out_shape.emplace_back(x_shape[i]); } } return std::make_shared(out_shape); @@ -38,7 +38,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(prim); - CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim->name()); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim->name()); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -51,8 +51,8 @@ void ArgMax::Init(const int64_t axis, const TypeId output_type) { set_output_type(output_type); } -void ArgMax::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } -void ArgMax::set_output_type(const TypeId output_type) { this->AddAttr(kOutputType, TypeIdToType(output_type)); } +void ArgMax::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); } +void ArgMax::set_output_type(const TypeId output_type) { (void)this->AddAttr(kOutputType, TypeIdToType(output_type)); } int64_t ArgMax::get_axis() const { return GetValue(GetAttr(kAxis)); } TypeId ArgMax::get_output_type() const { diff --git a/mindspore/core/ops/arg_min.cc b/mindspore/core/ops/arg_min.cc index 1bf95849668..c54efb7f26c 100644 --- a/mindspore/core/ops/arg_min.cc +++ b/mindspore/core/ops/arg_min.cc @@ -24,8 +24,8 @@ void ArgMin::Init(const int64_t axis, const TypeId output_type) { set_output_type(output_type); } -void ArgMin::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } -void ArgMin::set_output_type(const TypeId output_type) { this->AddAttr(kOutputType, TypeIdToType(output_type)); } +void ArgMin::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); } +void ArgMin::set_output_type(const TypeId output_type) { (void)this->AddAttr(kOutputType, TypeIdToType(output_type)); } int64_t ArgMin::get_axis() const { return GetValue(GetAttr(kAxis)); } @@ -51,7 +51,7 @@ AbstractBasePtr ArgMinInfer(const abstract::AnalysisEnginePtr &, const Primitive std::vector out_shape; for (int64_t i = 0; i < x_rank; i++) { if (i != axis) { - out_shape.push_back(x_shape[i]); + out_shape.push_back(x_shape[LongToSize(i)]); } } diff --git a/mindspore/core/ops/asin.cc b/mindspore/core/ops/asin.cc index 90a52ef8ca8..fb78967c815 100644 --- a/mindspore/core/ops/asin.cc +++ b/mindspore/core/ops/asin.cc @@ -26,7 +26,7 @@ AbstractBasePtr AsinInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("Asin_infer", SizeToLong(input_args.size()), kEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("Asin_infer", SizeToLong(input_args.size()), kEqual, 1, prim_name); // Infer Shape auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; diff --git a/mindspore/core/ops/assert.cc b/mindspore/core/ops/assert.cc index 1eff1ee8870..9f9cafaa1c2 100644 --- a/mindspore/core/ops/assert.cc +++ b/mindspore/core/ops/assert.cc @@ -40,19 +40,19 @@ AbstractBasePtr AssertInfer(const abstract::AnalysisEnginePtr &, const Primitive auto op_name = primitive->name(); TypePtr condition; if (!(input_args[0]->BuildType()->type_id() == kObjectTypeTensorType)) { - auto condition_value = GetValue>(input_args[0]->BuildValue()); - CheckAndConvertUtils::CheckInteger("condition's rank", condition_value.size(), kLessEqual, 1, op_name); - if (condition_value.size() == 1) { - CheckAndConvertUtils::CheckInteger("condition[0]", condition_value[0], kEqual, 1, op_name); + auto condition_values = GetValue>(input_args[0]->BuildValue()); + CheckAndConvertUtils::CheckInteger("condition's rank", SizeToLong(condition_values.size()), kLessEqual, 1, op_name); + if (condition_values.size() == 1) { + CheckAndConvertUtils::CheckInteger("condition[0]", SizeToLong(condition_values[0]), kEqual, 1, op_name); } condition = TypeIdToType(kNumberTypeBool); } else { auto condition_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; - CheckAndConvertUtils::CheckInteger("condition's rank", condition_shape[0], kLessEqual, 1, op_name); + (void)CheckAndConvertUtils::CheckInteger("condition's rank", condition_shape[0], kLessEqual, 1, op_name); if (condition_shape[0] == 1) { auto condition_value = reinterpret_cast(input_args[0]->BuildValue()->cast()->data_c()); MS_EXCEPTION_IF_NULL(condition_value); - CheckAndConvertUtils::CheckInteger("condition[0]", *condition_value, kEqual, 1, op_name); + (void)CheckAndConvertUtils::CheckInteger("condition[0]", *condition_value, kEqual, 1, op_name); } condition = input_args[0]->BuildType(); } @@ -63,7 +63,7 @@ AbstractBasePtr AssertInfer(const abstract::AnalysisEnginePtr &, const Primitive auto inputs_type = input_args[1]->BuildType()->cast()->elements(); for (auto dtype : inputs_type) { std::set template_types = {kTensorType}; - CheckAndConvertUtils::CheckSubClass("input", dtype, template_types, op_name); + (void)CheckAndConvertUtils::CheckSubClass("input", dtype, template_types, op_name); } return std::make_shared(kInt32, output_shape); } diff --git a/mindspore/core/ops/atan.cc b/mindspore/core/ops/atan.cc index 6f83bb9b3cc..1fe651e49ea 100644 --- a/mindspore/core/ops/atan.cc +++ b/mindspore/core/ops/atan.cc @@ -24,7 +24,7 @@ AbstractBasePtr AtanInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("Atan_infer", int64_t(input_args.size()), kEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("Atan_infer", SizeToLong(input_args.size()), kEqual, 1, prim_name); // Infer Shape auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; diff --git a/mindspore/core/ops/audio_spectrogram.cc b/mindspore/core/ops/audio_spectrogram.cc index 7d1461d4e72..21e46affe3e 100644 --- a/mindspore/core/ops/audio_spectrogram.cc +++ b/mindspore/core/ops/audio_spectrogram.cc @@ -73,7 +73,7 @@ int64_t AudioSpectrogram::get_window_size() const { return GetValue(value_ptr); } -void AudioSpectrogram::set_stride(const int64_t stride) { this->AddAttr(kStride, MakeValue(stride)); } +void AudioSpectrogram::set_stride(const int64_t stride) { (void)this->AddAttr(kStride, MakeValue(stride)); } int64_t AudioSpectrogram::get_stride() const { auto value_ptr = GetAttr(kStride); return GetValue(value_ptr); @@ -100,7 +100,7 @@ int64_t GetFftLength(int64_t length) { return SizeToLong(1 << (unsigned int)shift); } -void AudioSpectrogram::set_mag_square(const bool mag_square) { this->AddAttr(kMagSquare, MakeValue(mag_square)); } +void AudioSpectrogram::set_mag_square(const bool mag_square) { (void)this->AddAttr(kMagSquare, MakeValue(mag_square)); } bool AudioSpectrogram::get_mag_square() const { auto value_ptr = GetAttr(kMagSquare); return GetValue(value_ptr); diff --git a/mindspore/core/ops/avg_pool_3d.cc b/mindspore/core/ops/avg_pool_3d.cc index 7f5ba763d7a..31f65304fb9 100644 --- a/mindspore/core/ops/avg_pool_3d.cc +++ b/mindspore/core/ops/avg_pool_3d.cc @@ -107,7 +107,7 @@ void GetPadsByPadding(int64_t in_d, int64_t in_h, int64_t in_w, int64_t kernel_d pad_list->push_back(static_cast(std::floor(pad_w / 2))); pad_list->push_back(pad_w - pad_list->at(4)); } else if (pad_mode == PadMode::PAD) { - (void)pad_list->assign(padding.begin(), padding.end()); + pad_list->assign(padding.begin(), padding.end()); } } diff --git a/mindspore/core/ops/batch_matmul.cc b/mindspore/core/ops/batch_matmul.cc index d9798f13048..b73d895a29b 100644 --- a/mindspore/core/ops/batch_matmul.cc +++ b/mindspore/core/ops/batch_matmul.cc @@ -69,8 +69,8 @@ abstract::ShapePtr BatchMatmulInferShape(const PrimitivePtr &primitive, ShapeVector x_max_shape = x_shape_map[kMaxShape]; ShapeVector y_min_shape = y_shape_map[kMinShape]; ShapeVector y_max_shape = y_shape_map[kMaxShape]; - (void)CheckAndConvertUtils::CheckMinMaxShape(x_shp, &x_min_shape, &x_max_shape); - (void)CheckAndConvertUtils::CheckMinMaxShape(y_shp, &y_min_shape, &y_max_shape); + CheckAndConvertUtils::CheckMinMaxShape(x_shp, &x_min_shape, &x_max_shape); + CheckAndConvertUtils::CheckMinMaxShape(y_shp, &y_min_shape, &y_max_shape); // Additional check for dynamic shape // Last infer will be real shape values bool x_not_dyn = diff --git a/mindspore/core/ops/batch_norm.cc b/mindspore/core/ops/batch_norm.cc index 0fd59a80b11..78ae7bc6899 100644 --- a/mindspore/core/ops/batch_norm.cc +++ b/mindspore/core/ops/batch_norm.cc @@ -100,7 +100,7 @@ AbstractBasePtr BatchNormInfer(const abstract::AnalysisEnginePtr &, const Primit TypeError); if (!GetValue(primitive->GetAttr(kIsTraining))) { - CheckAndConvertUtils::CheckInteger("mean rank", SizeToLong(mean.size()), kEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("mean rank", SizeToLong(mean.size()), kEqual, 1, prim_name); CheckAndConvertUtils::Check("mean shape", mean, kEqual, "variance shape", variance, prim_name, TypeError); CheckAndConvertUtils::Check("mean shape", mean, kEqual, "scale shape", scale, prim_name, TypeError); } diff --git a/mindspore/core/ops/batch_to_space.cc b/mindspore/core/ops/batch_to_space.cc index e6988cb076c..180f59abb50 100644 --- a/mindspore/core/ops/batch_to_space.cc +++ b/mindspore/core/ops/batch_to_space.cc @@ -48,7 +48,7 @@ AbstractBasePtr BatchToSpaceInfer(const abstract::AnalysisEnginePtr &, const Pri const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -56,7 +56,7 @@ AbstractBasePtr BatchToSpaceInfer(const abstract::AnalysisEnginePtr &, const Pri prim_name); auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; - CheckAndConvertUtils::CheckInteger("x rank", SizeToLong(x_shape.size()), kEqual, 4, prim_name); + (void)CheckAndConvertUtils::CheckInteger("x rank", SizeToLong(x_shape.size()), kEqual, 4, prim_name); auto block_size = GetValue>(primitive->GetAttr(kBlockSize)); auto crops = GetValue>>(primitive->GetAttr(kCrops)); auto out_shape = x_shape; @@ -66,8 +66,8 @@ AbstractBasePtr BatchToSpaceInfer(const abstract::AnalysisEnginePtr &, const Pri CheckAndConvertUtils::Check("x block shape prod", x_block_prod, kGreaterThan, "crops sum", 4, prim_name); out_shape[i + 2] = x_block_prod - crops_sum; } - CheckAndConvertUtils::CheckInteger("x_shape[0] % (block_size[0]*block_size[1])", - out_shape[0] % (block_size[0] * block_size[1]), kEqual, 0, prim_name); + (void)CheckAndConvertUtils::CheckInteger("x_shape[0] % (block_size[0]*block_size[1])", + out_shape[0] % (block_size[0] * block_size[1]), kEqual, 0, prim_name); out_shape[0] /= block_size[0] * block_size[1]; auto ret = input_args[0]->Broaden(); diff --git a/mindspore/core/ops/batch_to_space_nd.cc b/mindspore/core/ops/batch_to_space_nd.cc index 54fb767ff71..70db427ffb3 100644 --- a/mindspore/core/ops/batch_to_space_nd.cc +++ b/mindspore/core/ops/batch_to_space_nd.cc @@ -33,11 +33,11 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector>(primitive->GetAttr(kBlockShape)); auto crops = GetValue>>(primitive->GetAttr(kCrops)); - int64_t size = block_shape.size(); - for (int64_t i = 0; i < size; i++) { + size_t size = block_shape.size(); + for (size_t i = 0; i < size; i++) { block_shape_prod = block_shape_prod * block_shape[i]; auto x_block_prod = out_shape[i + offset] * block_shape[i]; auto crops_sum = crops[i][0] + crops[i][1]; @@ -62,14 +62,14 @@ TypePtr InferType(const std::vector &input_args) { } // namespace void BatchToSpaceND::set_crops(std::vector> crops) { - CheckAndConvertUtils::CheckInteger(kCrops, SizeToLong(crops.size()), kEqual, 2, this->name()); - int64_t h = crops.size(); - int64_t w = crops[0].size(); - std::vector temp_w = {2, 2}; + (void)CheckAndConvertUtils::CheckInteger(kCrops, SizeToLong(crops.size()), kEqual, 2, this->name()); + size_t h = crops.size(); + size_t w = crops[0].size(); + std::vector temp_w = {2, 2}; CheckAndConvertUtils::Check(kCrops, {h, w}, kEqual, "paddings_shape(2,2)", temp_w, this->name()); - for (int64_t i = 0; i < h; i++) { - for (int64_t j = 0; j < w; j++) { - CheckAndConvertUtils::CheckInteger(kCrops, crops[i][j], kGreaterEqual, 0, this->name()); + for (size_t i = 0; i < h; i++) { + for (size_t j = 0; j < w; j++) { + (void)CheckAndConvertUtils::CheckInteger(kCrops, crops[i][j], kGreaterEqual, 0, this->name()); } } this->AddAttr(kCrops, MakeValue(crops)); @@ -81,8 +81,8 @@ std::vector> BatchToSpaceND::get_crops() const { } void BatchToSpaceND::set_block_shape(std::vector block_shape) { CheckAndConvertUtils::CheckInteger(kBlockShape, SizeToLong(block_shape.size()), kEqual, 2, this->name()); - for (int64_t i = 0; i < (int64_t)block_shape.size(); i++) { - CheckAndConvertUtils::CheckInteger(kBlockShape, block_shape[i], kGreaterEqual, 1, this->name()); + for (size_t i = 0; i < block_shape.size(); i++) { + (void)CheckAndConvertUtils::CheckInteger(kBlockShape, block_shape[i], kGreaterEqual, 1, this->name()); } this->AddAttr(kBlockShape, MakeValue(block_shape)); } diff --git a/mindspore/core/ops/bias_add.cc b/mindspore/core/ops/bias_add.cc index 78aaa5d8c96..df4daa0b252 100644 --- a/mindspore/core/ops/bias_add.cc +++ b/mindspore/core/ops/bias_add.cc @@ -35,15 +35,15 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector(prim_name, input_args, 1); MS_EXCEPTION_IF_NULL(x); MS_EXCEPTION_IF_NULL(bias); - CheckAndConvertUtils::CheckInteger("arg size", input_args.size(), kEqual, 2, prim_name); + CheckAndConvertUtils::CheckInteger("arg size", SizeToLong(input_args.size()), kEqual, 2, prim_name); auto shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape()); auto input_shape = shape_map[kShape]; auto min_shape = shape_map[kMinShape]; auto max_shape = shape_map[kMaxShape]; CheckAndConvertUtils::CheckInRange("bias_add_infer", input_shape.size(), kIncludeBoth, {2, 5}, prim_name); auto bias_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape]; - CheckAndConvertUtils::CheckInteger("bias rank", bias_shape.size(), kEqual, 1, prim_name); - CheckAndConvertUtils::CheckInteger("x rank", input_shape.size(), kGreaterEqual, 2, prim_name); + CheckAndConvertUtils::CheckInteger("bias rank", SizeToLong(bias_shape.size()), kEqual, 1, prim_name); + CheckAndConvertUtils::CheckInteger("x rank", SizeToLong(input_shape.size()), kGreaterEqual, 2, prim_name); auto data_format_ptr = primitive->GetAttr("format"); int64_t data_format = Format::NCHW; if (data_format_ptr != nullptr) { @@ -71,7 +71,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(prim); auto prim_name = prim->name(); - CheckAndConvertUtils::CheckInteger("biasadd_infer", input_args.size(), kEqual, 2, prim_name); + CheckAndConvertUtils::CheckInteger("biasadd_infer", SizeToLong(input_args.size()), kEqual, 2, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/binary_cross_entropy.cc b/mindspore/core/ops/binary_cross_entropy.cc index 91926b06633..0a33426862e 100644 --- a/mindspore/core/ops/binary_cross_entropy.cc +++ b/mindspore/core/ops/binary_cross_entropy.cc @@ -50,7 +50,8 @@ abstract::ShapePtr BinaryCrossEntroyInferShape(const PrimitivePtr &primitive, } TypePtr BinaryCrossEntroyInferType(const PrimitivePtr &prim, const std::vector &input_args) { - CheckAndConvertUtils::CheckInteger("binary_cross_entropy_infer", input_args.size(), kEqual, 3, prim->name()); + (void)CheckAndConvertUtils::CheckInteger("binary_cross_entropy_infer", SizeToLong(input_args.size()), kEqual, 3, + prim->name()); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/broadcast_to.cc b/mindspore/core/ops/broadcast_to.cc index 353bd78711c..fec49f67200 100644 --- a/mindspore/core/ops/broadcast_to.cc +++ b/mindspore/core/ops/broadcast_to.cc @@ -35,7 +35,8 @@ abstract::ShapePtr BroadcastToInferShape(const PrimitivePtr &primitive, } else { flag = true; } - if (flag == true) { + + if (flag) { for (size_t i = 0; i < input_x.size(); i++) { if (input_x[i] == -1) { if (i < outer_dim_offset) { diff --git a/mindspore/core/ops/clip.cc b/mindspore/core/ops/clip.cc index f53b44e1e33..fb8d09acc7c 100644 --- a/mindspore/core/ops/clip.cc +++ b/mindspore/core/ops/clip.cc @@ -26,14 +26,14 @@ void Clip::Init(const float max, const float min) { this->set_min(min); } -void Clip::set_max(const float max) { this->AddAttr(kMax, MakeValue(max)); } +void Clip::set_max(const float max) { (void)this->AddAttr(kMax, MakeValue(max)); } float Clip::get_max() const { auto value_ptr = this->GetAttr(kMax); return GetValue(value_ptr); } -void Clip::set_min(const float min) { this->AddAttr(kMin, MakeValue(min)); } +void Clip::set_min(const float min) { (void)this->AddAttr(kMin, MakeValue(min)); } float Clip::get_min() const { auto value_ptr = this->GetAttr(kMin); diff --git a/mindspore/core/ops/concat.cc b/mindspore/core/ops/concat.cc index 7b1a788a698..f7b2ec710c3 100644 --- a/mindspore/core/ops/concat.cc +++ b/mindspore/core/ops/concat.cc @@ -27,7 +27,7 @@ int64_t Concat::get_axis() const { return GetValue(value_ptr); } -void Concat::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } +void Concat::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); } AbstractBasePtr ConcatInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args) { @@ -40,7 +40,8 @@ AbstractBasePtr ConcatInfer(const abstract::AnalysisEnginePtr &, const Primitive auto input_tuple = input_args[0]->cast(); MS_EXCEPTION_IF_NULL(input_tuple); auto elements = input_tuple->elements(); - (void)CheckAndConvertUtils::CheckInteger("concat element num", elements.size(), kGreaterEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("concat element num", SizeToLong(elements.size()), kGreaterEqual, 1, + prim_name); auto element0 = elements[0]->cast(); MS_EXCEPTION_IF_NULL(element0); auto element0_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(element0->BuildShape())[kShape]; diff --git a/mindspore/core/ops/constant_of_shape.cc b/mindspore/core/ops/constant_of_shape.cc index ee5c38bf7af..c04b9687c72 100644 --- a/mindspore/core/ops/constant_of_shape.cc +++ b/mindspore/core/ops/constant_of_shape.cc @@ -24,7 +24,8 @@ namespace ops { namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); - CheckAndConvertUtils::CheckInteger("input args size", SizeToLong(input_args.size()), kEqual, 1, "ConstantOfShape"); + (void)CheckAndConvertUtils::CheckInteger("input args size", SizeToLong(input_args.size()), kEqual, 1, + "ConstantOfShape"); auto input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; return std::make_shared(input_shape); } @@ -41,14 +42,14 @@ void ConstantOfShape::Init(int64_t data_type, const std::vector &value) { this->set_value(value); } -void ConstantOfShape::set_data_type(int64_t data_type) { this->AddAttr(kDataType, MakeValue(data_type)); } +void ConstantOfShape::set_data_type(int64_t data_type) { (void)this->AddAttr(kDataType, MakeValue(data_type)); } int64_t ConstantOfShape::get_data_type() const { auto value_ptr = this->GetAttr(kDataType); return GetValue(value_ptr); } -void ConstantOfShape::set_value(const std::vector &value) { this->AddAttr(kValue, MakeValue(value)); } +void ConstantOfShape::set_value(const std::vector &value) { (void)this->AddAttr(kValue, MakeValue(value)); } std::vector ConstantOfShape::get_value() const { auto value_ptr = this->GetAttr(kValue); diff --git a/mindspore/core/ops/conv2d.cc b/mindspore/core/ops/conv2d.cc index 7d979115a97..cfe7c3ef0ec 100644 --- a/mindspore/core/ops/conv2d.cc +++ b/mindspore/core/ops/conv2d.cc @@ -106,8 +106,8 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve auto w_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape()); auto x_shape = x_shape_map[kShape]; auto w_shape = w_shape_map[kShape]; - CheckAndConvertUtils::CheckInteger("x shape size", x_shape.size(), kEqual, 4, primitive->name()); - CheckAndConvertUtils::CheckInteger("w shape size", w_shape.size(), kEqual, 4, primitive->name()); + CheckAndConvertUtils::CheckInteger("x shape size", SizeToLong(x_shape.size()), kEqual, 4, primitive->name()); + CheckAndConvertUtils::CheckInteger("w shape size", SizeToLong(w_shape.size()), kEqual, 4, primitive->name()); auto x_min_shape = x_shape_map[kMinShape]; auto x_max_shape = x_shape_map[kMaxShape]; auto w_min_shape = w_shape_map[kMinShape]; @@ -313,11 +313,12 @@ Format Conv2D::get_format() const { AbstractBasePtr Conv2dInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args) { - CheckAndConvertUtils::CheckInteger("Conv2d infer", input_args.size(), kGreaterEqual, 2, primitive->name()); + CheckAndConvertUtils::CheckInteger("Conv2d infer", SizeToLong(input_args.size()), kGreaterEqual, 2, + primitive->name()); const std::set valid_types = {kInt8, kInt32, kInt64, kFloat16, kFloat32}; std::map types; - (void)types.emplace("x", input_args[0]->BuildType()); - (void)types.emplace("w", input_args[1]->BuildType()); + types.emplace("x", input_args[0]->BuildType()); + types.emplace("w", input_args[1]->BuildType()); CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, primitive->name()); return abstract::MakeAbstract(Conv2dInferShape(primitive, input_args), Conv2dInferType(primitive, input_args)); } diff --git a/mindspore/core/ops/conv2d_transpose.cc b/mindspore/core/ops/conv2d_transpose.cc index a9d1bd9d2cd..9bc7d1cd0a1 100644 --- a/mindspore/core/ops/conv2d_transpose.cc +++ b/mindspore/core/ops/conv2d_transpose.cc @@ -54,7 +54,7 @@ void Conv2DTranspose::set_out_channel(int64_t out_channel) { void Conv2DTranspose::set_kernel_size(const std::vector &kernel_size) { CheckAndConvertUtils::CheckInteger(kKernelSize, SizeToLong(kernel_size.size()), kEqual, 2, name()); for (int64_t item : kernel_size) { - CheckAndConvertUtils::CheckInteger(kKernelSize, item, kGreaterEqual, 1, name()); + (void)CheckAndConvertUtils::CheckInteger(kKernelSize, item, kGreaterEqual, 1, name()); } AddAttr(kKernelSize, MakeValue(kernel_size)); } @@ -62,7 +62,7 @@ void Conv2DTranspose::set_kernel_size(const std::vector &kernel_size) { void Conv2DTranspose::set_stride(const std::vector &stride) { CheckAndConvertUtils::CheckInteger(kStride, SizeToLong(stride.size()), kEqual, 2, name()); for (int64_t item : stride) { - CheckAndConvertUtils::CheckInteger(kStride, item, kGreaterEqual, 1, name()); + (void)CheckAndConvertUtils::CheckInteger(kStride, item, kGreaterEqual, 1, name()); } AddAttr(kStride, MakeValue(stride)); } diff --git a/mindspore/core/ops/cos.cc b/mindspore/core/ops/cos.cc index 700580ab40f..845261b3f6e 100644 --- a/mindspore/core/ops/cos.cc +++ b/mindspore/core/ops/cos.cc @@ -13,11 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +#include "ops/cos.h" #include #include #include -#include "ops/cos.h" namespace mindspore { namespace ops { @@ -44,7 +44,7 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector & AbstractBasePtr CosInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args) { return std::make_shared(InferType(primitive, input_args), - InferShape(primitive, input_args)->shape()); + InferShape(primitive, input_args)); } REGISTER_PRIMITIVE_C(kNameCos, Cos); } // namespace ops diff --git a/mindspore/core/ops/crop.cc b/mindspore/core/ops/crop.cc index 766c4a8c8b9..8f6dd23864e 100644 --- a/mindspore/core/ops/crop.cc +++ b/mindspore/core/ops/crop.cc @@ -27,14 +27,14 @@ void Crop::Init(const int64_t axis, const std::vector &offsets) { this->set_offsets(offsets); } -void Crop::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } +void Crop::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); } int64_t Crop::get_axis() const { auto value_ptr = this->GetAttr(kAxis); return GetValue(value_ptr); } -void Crop::set_offsets(const std::vector &offsets) { this->AddAttr(kOffsets, MakeValue(offsets)); } +void Crop::set_offsets(const std::vector &offsets) { (void)this->AddAttr(kOffsets, MakeValue(offsets)); } std::vector Crop::get_offsets() const { auto value_ptr = this->GetAttr(kOffsets); @@ -44,7 +44,7 @@ AbstractBasePtr CropInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/custom.cc b/mindspore/core/ops/custom.cc index e01cf2f17af..3e4b7562fd1 100644 --- a/mindspore/core/ops/custom.cc +++ b/mindspore/core/ops/custom.cc @@ -35,10 +35,10 @@ std::string Custom::get_type() const { void Custom::set_attr(const std::map> &attrs) { ValuePtrList value_ptr_list; for (const auto &attr : attrs) { - value_ptr_list.emplace_back(MakeValue(attr.first)); - value_ptr_list.emplace_back(MakeValue>(attr.second)); + (void)value_ptr_list.emplace_back(MakeValue(attr.first)); + (void)value_ptr_list.emplace_back(MakeValue>(attr.second)); } - this->AddAttr(kAttr, MakeValue(value_ptr_list)); + (void)this->AddAttr(kAttr, MakeValue(value_ptr_list)); } std::map> Custom::get_attr() const { diff --git a/mindspore/core/ops/custom_predict.cc b/mindspore/core/ops/custom_predict.cc index 9d3f86d9a15..c3a0079e4d4 100644 --- a/mindspore/core/ops/custom_predict.cc +++ b/mindspore/core/ops/custom_predict.cc @@ -26,7 +26,7 @@ void CustomPredict::Init(const int64_t output_num, const float weight_threshold) this->set_weight_threshold(weight_threshold); } -void CustomPredict::set_output_num(const int64_t output_num) { this->AddAttr(kOutputNum, MakeValue(output_num)); } +void CustomPredict::set_output_num(const int64_t output_num) { (void)this->AddAttr(kOutputNum, MakeValue(output_num)); } int64_t CustomPredict::get_output_num() const { auto value_ptr = this->GetAttr(kOutputNum); diff --git a/mindspore/core/ops/detection_post_process.cc b/mindspore/core/ops/detection_post_process.cc index 076fd1a6434..4a1b951d533 100644 --- a/mindspore/core/ops/detection_post_process.cc +++ b/mindspore/core/ops/detection_post_process.cc @@ -43,7 +43,7 @@ int64_t DetectionPostProcess::get_input_size() const { return GetValue(value_ptr); } -void DetectionPostProcess::set_scale(const std::vector &scale) { this->AddAttr(kScale, MakeValue(scale)); } +void DetectionPostProcess::set_scale(const std::vector &scale) { (void)this->AddAttr(kScale, MakeValue(scale)); } std::vector DetectionPostProcess::get_scale() const { auto value_ptr = this->GetAttr(kScale); return GetValue>(value_ptr); @@ -113,8 +113,8 @@ AbstractBasePtr DetectionPostProcessInfer(const abstract::AnalysisEnginePtr &, c const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("detection_post_process_infer", SizeToLong(input_args.size()), kEqual, 3, - prim_name); + (void)CheckAndConvertUtils::CheckInteger("detection_post_process_infer", SizeToLong(input_args.size()), kEqual, 3, + prim_name); MS_EXCEPTION_IF_NULL(input_args[0]); MS_EXCEPTION_IF_NULL(input_args[1]); MS_EXCEPTION_IF_NULL(input_args[2]); diff --git a/mindspore/core/ops/equal.cc b/mindspore/core/ops/equal.cc index 648622f7147..496a80fd190 100644 --- a/mindspore/core/ops/equal.cc +++ b/mindspore/core/ops/equal.cc @@ -30,7 +30,7 @@ namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto op_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, op_name); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, op_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -40,7 +40,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(prim); auto op_name = prim->name(); - CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kGreaterEqual, 2, op_name); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kGreaterEqual, 2, op_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/exp.cc b/mindspore/core/ops/exp.cc index ddaf794d8f3..44aec9e1dc5 100644 --- a/mindspore/core/ops/exp.cc +++ b/mindspore/core/ops/exp.cc @@ -46,15 +46,14 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(prim); - auto op_name = prim->name(); - CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, op_name); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim->name()); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } std::map types; types.emplace("x", input_args[0]->BuildType()); std::set valid_params_types = {kTensorType}; - CheckAndConvertUtils::CheckSubClass("x_type", input_args[0]->BuildType(), valid_params_types, op_name); + CheckAndConvertUtils::CheckSubClass("x_type", input_args[0]->BuildType(), valid_params_types, prim->name()); return CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); } } // namespace diff --git a/mindspore/core/ops/expand_dims.cc b/mindspore/core/ops/expand_dims.cc index f347c9e7d8f..d37ad15e555 100644 --- a/mindspore/core/ops/expand_dims.cc +++ b/mindspore/core/ops/expand_dims.cc @@ -31,7 +31,7 @@ AbstractBasePtr ExpandDimsInfer(const abstract::AnalysisEnginePtr &, const Primi const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 2, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, 2, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/fft_real.cc b/mindspore/core/ops/fft_real.cc index 294059c12e6..bc22d8f2904 100644 --- a/mindspore/core/ops/fft_real.cc +++ b/mindspore/core/ops/fft_real.cc @@ -28,7 +28,7 @@ AbstractBasePtr FftRealInfer(const abstract::AnalysisEnginePtr &, const Primitiv const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/fill.cc b/mindspore/core/ops/fill.cc index 311db04e6d6..2eef2b63e39 100644 --- a/mindspore/core/ops/fill.cc +++ b/mindspore/core/ops/fill.cc @@ -26,7 +26,7 @@ AbstractBasePtr FillInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 3, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 3, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -47,11 +47,11 @@ AbstractBasePtr FillInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt tensor::TensorPtr tensor = std::make_shared(x_type_id, out_shape); auto mem_size = IntToSize(tensor->ElementsNum()); if (x_type_id == kNumberTypeInt) { - auto num = GetValue(x_value); - SetTensorData(tensor->data_c(), num, mem_size); + auto int_value = GetValue(x_value); + SetTensorData(tensor->data_c(), int_value, mem_size); } else if (x_type_id == kNumberTypeFloat || x_type_id == kNumberTypeFloat32) { - auto num = GetValue(x_value); - SetTensorData(tensor->data_c(), num, mem_size); + auto float_value = GetValue(x_value); + SetTensorData(tensor->data_c(), float_value, mem_size); } else { MS_LOG(ERROR) << " Fill not supported to flod the constant type " << input_args[2]->ToString(); } diff --git a/mindspore/core/ops/flatten.cc b/mindspore/core/ops/flatten.cc index a6c421e7e76..f19abba5e7d 100644 --- a/mindspore/core/ops/flatten.cc +++ b/mindspore/core/ops/flatten.cc @@ -24,7 +24,8 @@ namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input args size", input_args.size(), kGreaterEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input args size", SizeToLong(input_args.size()), kGreaterEqual, 1, + prim_name); auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; auto prod = 1; int64_t size = x_shape.size(); diff --git a/mindspore/core/ops/fused_batch_norm.cc b/mindspore/core/ops/fused_batch_norm.cc index ca527080dc9..68f197a17e2 100644 --- a/mindspore/core/ops/fused_batch_norm.cc +++ b/mindspore/core/ops/fused_batch_norm.cc @@ -27,11 +27,11 @@ void FusedBatchNorm::Init(const int64_t mode, const float epsilon, const float m this->set_momentum(momentum); } -void FusedBatchNorm::set_mode(const int64_t mode) { this->AddAttr(kMode, MakeValue(mode)); } +void FusedBatchNorm::set_mode(const int64_t mode) { (void)this->AddAttr(kMode, MakeValue(mode)); } -void FusedBatchNorm::set_epsilon(const float epsilon) { this->AddAttr(kEpsilon, MakeValue(epsilon)); } +void FusedBatchNorm::set_epsilon(const float epsilon) { (void)this->AddAttr(kEpsilon, MakeValue(epsilon)); } -void FusedBatchNorm::set_momentum(const float momentum) { this->AddAttr(kMomentum, MakeValue(momentum)); } +void FusedBatchNorm::set_momentum(const float momentum) { (void)this->AddAttr(kMomentum, MakeValue(momentum)); } int64_t FusedBatchNorm::get_mode() const { auto value_ptr = this->GetAttr(kMode); diff --git a/mindspore/core/ops/fusion/activation.cc b/mindspore/core/ops/fusion/activation.cc index d270fa59a60..ed807f93585 100644 --- a/mindspore/core/ops/fusion/activation.cc +++ b/mindspore/core/ops/fusion/activation.cc @@ -23,16 +23,15 @@ namespace mindspore { namespace ops { -void Activation::set_alpha(const float alpha) { this->AddAttr(kAlpha, MakeValue(alpha)); } +void Activation::set_alpha(const float alpha) { (void)this->AddAttr(kAlpha, MakeValue(alpha)); } -void Activation::set_min_val(const float min_val) { this->AddAttr(kMinVal, MakeValue(min_val)); } +void Activation::set_min_val(const float min_val) { (void)this->AddAttr(kMinVal, MakeValue(min_val)); } -void Activation::set_max_val(const float max_val) { this->AddAttr(kMaxVal, MakeValue(max_val)); } +void Activation::set_max_val(const float max_val) { (void)this->AddAttr(kMaxVal, MakeValue(max_val)); } void Activation::set_activation_type(const ActivationType &activation_type) { - int64_t swi; - swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + int64_t swi = activation_type; + (void)this->AddAttr(kActivationType, MakeValue(swi)); } float Activation::get_alpha() const { diff --git a/mindspore/core/ops/fusion/add_fusion.cc b/mindspore/core/ops/fusion/add_fusion.cc index 1635fd58cc1..b510a5c3e4f 100644 --- a/mindspore/core/ops/fusion/add_fusion.cc +++ b/mindspore/core/ops/fusion/add_fusion.cc @@ -26,9 +26,8 @@ namespace mindspore { namespace ops { void AddFusion::set_activation_type(const ActivationType activation_type) { - int64_t swi; - swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + int64_t swi = activation_type; + (void)this->AddAttr(kActivationType, MakeValue(swi)); } ActivationType AddFusion::get_activation_type() const { auto value_ptr = GetAttr(kActivationType); diff --git a/mindspore/core/ops/fusion/adder_fusion.cc b/mindspore/core/ops/fusion/adder_fusion.cc index d788d184605..edcea6d36db 100644 --- a/mindspore/core/ops/fusion/adder_fusion.cc +++ b/mindspore/core/ops/fusion/adder_fusion.cc @@ -37,7 +37,7 @@ void AdderFusion::Init(const int64_t in_channel, const int64_t out_channel, cons void AdderFusion::set_activation_type(const ActivationType activation_type) { int64_t swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + (void)this->AddAttr(kActivationType, MakeValue(swi)); } ActivationType AdderFusion::get_activation_type() const { diff --git a/mindspore/core/ops/fusion/arg_max_fusion.cc b/mindspore/core/ops/fusion/arg_max_fusion.cc index 35ac8bdc9fc..9966998fac2 100644 --- a/mindspore/core/ops/fusion/arg_max_fusion.cc +++ b/mindspore/core/ops/fusion/arg_max_fusion.cc @@ -25,11 +25,11 @@ void ArgMaxFusion::Init(const bool keep_dims, const bool out_max_value, const in set_top_k(top_k); } -void ArgMaxFusion::set_keep_dims(const bool keep_dims) { this->AddAttr(kKeepDims, MakeValue(keep_dims)); } +void ArgMaxFusion::set_keep_dims(const bool keep_dims) { (void)this->AddAttr(kKeepDims, MakeValue(keep_dims)); } void ArgMaxFusion::set_out_max_value(const bool out_max_value) { - this->AddAttr(kOutMaxValue, MakeValue(out_max_value)); + (void)this->AddAttr(kOutMaxValue, MakeValue(out_max_value)); } -void ArgMaxFusion::set_top_k(const int64_t top_k) { this->AddAttr(kTopK, MakeValue(top_k)); } +void ArgMaxFusion::set_top_k(const int64_t top_k) { (void)this->AddAttr(kTopK, MakeValue(top_k)); } bool ArgMaxFusion::get_keep_dims() const { return GetValue(GetAttr(kKeepDims)); } bool ArgMaxFusion::get_out_max_value() const { return GetValue(GetAttr(kOutMaxValue)); } diff --git a/mindspore/core/ops/fusion/arg_min_fusion.cc b/mindspore/core/ops/fusion/arg_min_fusion.cc index d72ebe8f31a..d48a4c7d53b 100644 --- a/mindspore/core/ops/fusion/arg_min_fusion.cc +++ b/mindspore/core/ops/fusion/arg_min_fusion.cc @@ -25,9 +25,9 @@ void ArgMinFusion::Init(bool keep_dims, bool out_max_value, int64_t top_k, int64 set_top_k(top_k); } -void ArgMinFusion::set_keep_dims(const bool keep_dims) { this->AddAttr(kKeepDims, MakeValue(keep_dims)); } -void ArgMinFusion::set_out_max_value(bool out_max_value) { AddAttr(kOutMaxValue, MakeValue(out_max_value)); } -void ArgMinFusion::set_top_k(int64_t top_k) { this->AddAttr(kTopK, MakeValue(top_k)); } +void ArgMinFusion::set_keep_dims(const bool keep_dims) { (void)this->AddAttr(kKeepDims, MakeValue(keep_dims)); } +void ArgMinFusion::set_out_max_value(bool out_max_value) { (void)AddAttr(kOutMaxValue, MakeValue(out_max_value)); } +void ArgMinFusion::set_top_k(int64_t top_k) { (void)this->AddAttr(kTopK, MakeValue(top_k)); } bool ArgMinFusion::get_keep_dims() const { return GetValue(GetAttr(kKeepDims)); } bool ArgMinFusion::get_out_max_value() const { diff --git a/mindspore/core/ops/fusion/avg_pool_fusion.cc b/mindspore/core/ops/fusion/avg_pool_fusion.cc index 5ca9dd5afaa..ce6535b6136 100644 --- a/mindspore/core/ops/fusion/avg_pool_fusion.cc +++ b/mindspore/core/ops/fusion/avg_pool_fusion.cc @@ -31,12 +31,11 @@ void AvgPoolFusion::Init(const std::vector &kernel_size, const std::vec this->set_activation_type(activation_type); } -void AvgPoolFusion::set_global(const bool global) { AddAttr(kGlobal, MakeValue(global)); } +void AvgPoolFusion::set_global(const bool global) { (void)AddAttr(kGlobal, MakeValue(global)); } void AvgPoolFusion::set_activation_type(ActivationType activation_type) { - int64_t swi; - swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + int64_t swi = activation_type; + (void)this->AddAttr(kActivationType, MakeValue(swi)); } bool AvgPoolFusion::get_global() const { @@ -58,7 +57,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector>(primitive->GetAttr(kKernelSize)); auto pad_mode = PadMode(GetValue(primitive->GetAttr(kPadMode))); auto batch = in_shape[0]; diff --git a/mindspore/core/ops/fusion/conv2d_backprop_filter_fusion.cc b/mindspore/core/ops/fusion/conv2d_backprop_filter_fusion.cc index c06535ece72..732ead683f0 100644 --- a/mindspore/core/ops/fusion/conv2d_backprop_filter_fusion.cc +++ b/mindspore/core/ops/fusion/conv2d_backprop_filter_fusion.cc @@ -43,11 +43,11 @@ void Conv2DBackpropFilterFusion::Init(const int64_t out_channel, const std::vect void Conv2DBackpropFilterFusion::set_activation_type(const ActivationType activation_type) { int64_t swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + (void)this->AddAttr(kActivationType, MakeValue(swi)); } void Conv2DBackpropFilterFusion::set_in_channel(const int64_t in_channel) { - this->AddAttr(kInChannel, MakeValue(in_channel)); + (void)this->AddAttr(kInChannel, MakeValue(in_channel)); } ActivationType Conv2DBackpropFilterFusion::get_activation_type() const { diff --git a/mindspore/core/ops/fusion/conv2d_backprop_input_fusion.cc b/mindspore/core/ops/fusion/conv2d_backprop_input_fusion.cc index 088c73dc2dc..71439e06cca 100644 --- a/mindspore/core/ops/fusion/conv2d_backprop_input_fusion.cc +++ b/mindspore/core/ops/fusion/conv2d_backprop_input_fusion.cc @@ -41,12 +41,13 @@ void Conv2DBackpropInputFusion::Init(int64_t in_channel, int64_t out_channel, co this->set_activation_type(activation_type); } -void Conv2DBackpropInputFusion::set_in_channel(int64_t in_channel) { this->AddAttr(kInChannel, MakeValue(in_channel)); } +void Conv2DBackpropInputFusion::set_in_channel(int64_t in_channel) { + (void)this->AddAttr(kInChannel, MakeValue(in_channel)); +} void Conv2DBackpropInputFusion::set_activation_type(const ActivationType &activation_type) { - int64_t swi; - swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + int64_t swi = activation_type; + (void)this->AddAttr(kActivationType, MakeValue(swi)); } int64_t Conv2DBackpropInputFusion::get_in_channel() const { auto value_ptr = GetAttr(kInChannel); diff --git a/mindspore/core/ops/fusion/conv2d_fusion.cc b/mindspore/core/ops/fusion/conv2d_fusion.cc index b516c8734da..291e440da8d 100644 --- a/mindspore/core/ops/fusion/conv2d_fusion.cc +++ b/mindspore/core/ops/fusion/conv2d_fusion.cc @@ -38,12 +38,13 @@ void Conv2DFusion::Init(int64_t in_channel, int64_t out_channel, const std::vect this->set_pad_list(pad_list); this->set_activation_type(activation_type); } -void Conv2DFusion::set_in_channel(const int64_t in_channel) { this->AddAttr(kInChannel, MakeValue(in_channel)); } -void Conv2DFusion::set_pad_list(const std::vector &pad_list) { this->AddAttr(kPadList, MakeValue(pad_list)); } +void Conv2DFusion::set_in_channel(const int64_t in_channel) { (void)this->AddAttr(kInChannel, MakeValue(in_channel)); } +void Conv2DFusion::set_pad_list(const std::vector &pad_list) { + (void)this->AddAttr(kPadList, MakeValue(pad_list)); +} void Conv2DFusion::set_activation_type(const ActivationType &activation_type) { - int64_t swi; - swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + int64_t swi = activation_type; + (void)this->AddAttr(kActivationType, MakeValue(swi)); } int64_t Conv2DFusion::get_in_channel() const { auto value_ptr = GetAttr(kInChannel); diff --git a/mindspore/core/ops/fusion/conv2d_transpose_fusion.cc b/mindspore/core/ops/fusion/conv2d_transpose_fusion.cc index 1c09872eb2d..b7490a704af 100644 --- a/mindspore/core/ops/fusion/conv2d_transpose_fusion.cc +++ b/mindspore/core/ops/fusion/conv2d_transpose_fusion.cc @@ -40,32 +40,33 @@ void Conv2dTransposeFusion::Init(int64_t in_channel, int64_t out_channel, const } void Conv2dTransposeFusion::set_kernel_size(const std::vector &kernel_size) { - CheckAndConvertUtils::CheckInteger(kKernelSize, SizeToLong(kernel_size.size()), kEqual, 2, name()); + (void)CheckAndConvertUtils::CheckInteger(kKernelSize, SizeToLong(kernel_size.size()), kEqual, 2, name()); for (int64_t item : kernel_size) { - CheckAndConvertUtils::CheckInteger(kKernelSize, item, kGreaterEqual, 1, name()); + (void)CheckAndConvertUtils::CheckInteger(kKernelSize, item, kGreaterEqual, 1, name()); } - AddAttr(kKernelSize, MakeValue(kernel_size)); + (void)AddAttr(kKernelSize, MakeValue(kernel_size)); } void Conv2dTransposeFusion::set_dilation(const std::vector &dilation) { - CheckAndConvertUtils::CheckInteger(kDilation, SizeToLong(dilation.size()), kEqual, 2, name()); + (void)CheckAndConvertUtils::CheckInteger(kDilation, SizeToLong(dilation.size()), kEqual, 2, name()); for (int64_t item : dilation) { - CheckAndConvertUtils::CheckInteger(kDilation, item, kGreaterEqual, 1, name()); + (void)CheckAndConvertUtils::CheckInteger(kDilation, item, kGreaterEqual, 1, name()); } - AddAttr(kDilation, MakeValue(dilation)); + (void)AddAttr(kDilation, MakeValue(dilation)); } void Conv2dTransposeFusion::set_output_paddings(const std::vector &output_paddings) { - CheckAndConvertUtils::CheckInteger(kOutputPaddings, output_paddings.size(), kGreaterEqual, 1, name()); + (void)CheckAndConvertUtils::CheckInteger(kOutputPaddings, SizeToLong(output_paddings.size()), kGreaterEqual, 1, + name()); for (int64_t item : output_paddings) { - CheckAndConvertUtils::CheckInteger(kOutputPaddings, item, kGreaterEqual, 0, name()); + (void)CheckAndConvertUtils::CheckInteger(kOutputPaddings, item, kGreaterEqual, 0, name()); } - AddAttr(kOutputPaddings, MakeValue(output_paddings)); + (void)AddAttr(kOutputPaddings, MakeValue(output_paddings)); } void Conv2dTransposeFusion::set_activation_type(ActivationType activation_type) { int64_t swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + (void)this->AddAttr(kActivationType, MakeValue(swi)); } std::vector Conv2dTransposeFusion::get_output_paddings() const { diff --git a/mindspore/core/ops/fusion/div_fusion.cc b/mindspore/core/ops/fusion/div_fusion.cc index 4fc79cd025c..b1c919f6687 100644 --- a/mindspore/core/ops/fusion/div_fusion.cc +++ b/mindspore/core/ops/fusion/div_fusion.cc @@ -23,9 +23,8 @@ namespace ops { void DivFusion::Init(const ActivationType &activation_type) { this->set_activation_type(activation_type); } void DivFusion::set_activation_type(const ActivationType &activation_type) { - int64_t swi; - swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + int64_t swi = activation_type; + (void)this->AddAttr(kActivationType, MakeValue(swi)); } ActivationType DivFusion::get_activation_type() const { diff --git a/mindspore/core/ops/fusion/embedding_lookup_fusion.cc b/mindspore/core/ops/fusion/embedding_lookup_fusion.cc index 934321bdb02..b243fef9ef8 100644 --- a/mindspore/core/ops/fusion/embedding_lookup_fusion.cc +++ b/mindspore/core/ops/fusion/embedding_lookup_fusion.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace ops { -void EmbeddingLookupFusion::set_max_norm(const float max_norm) { this->AddAttr(kMaxNorm, MakeValue(max_norm)); } +void EmbeddingLookupFusion::set_max_norm(const float max_norm) { (void)this->AddAttr(kMaxNorm, MakeValue(max_norm)); } float EmbeddingLookupFusion::get_max_norm() const { auto value_ptr = GetAttr(kMaxNorm); return GetValue(value_ptr); diff --git a/mindspore/core/ops/fusion/exp_fusion.cc b/mindspore/core/ops/fusion/exp_fusion.cc index 02d65ff43d4..e3f216605a7 100644 --- a/mindspore/core/ops/fusion/exp_fusion.cc +++ b/mindspore/core/ops/fusion/exp_fusion.cc @@ -29,11 +29,11 @@ void ExpFusion::Init(const float base, const float scale, const float shift) { this->set_shift(shift); } -void ExpFusion::set_base(const float base) { this->AddAttr(kBase, MakeValue(base)); } +void ExpFusion::set_base(const float base) { (void)this->AddAttr(kBase, MakeValue(base)); } -void ExpFusion::set_scale(const float scale) { this->AddAttr(kScale, MakeValue(scale)); } +void ExpFusion::set_scale(const float scale) { (void)this->AddAttr(kScale, MakeValue(scale)); } -void ExpFusion::set_shift(const float shift) { this->AddAttr(kShift, MakeValue(shift)); } +void ExpFusion::set_shift(const float shift) { (void)this->AddAttr(kShift, MakeValue(shift)); } float ExpFusion::get_base() const { auto value_ptr = GetAttr(kBase); diff --git a/mindspore/core/ops/fusion/full_connection.cc b/mindspore/core/ops/fusion/full_connection.cc index 651a10a2419..9581d9ea2a8 100644 --- a/mindspore/core/ops/fusion/full_connection.cc +++ b/mindspore/core/ops/fusion/full_connection.cc @@ -20,19 +20,19 @@ namespace mindspore { namespace ops { -void FullConnection::set_has_bias(const bool has_bias) { this->AddAttr(kHasBias, MakeValue(has_bias)); } +void FullConnection::set_has_bias(const bool has_bias) { (void)this->AddAttr(kHasBias, MakeValue(has_bias)); } + bool FullConnection::get_has_bias() const { return GetValue(GetAttr(kHasBias)); } -void FullConnection::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } +void FullConnection::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); } int64_t FullConnection::get_axis() const { return GetValue(GetAttr(kAxis)); } -void FullConnection::set_use_axis(const bool use_axis) { this->AddAttr(kUseAxis, MakeValue(use_axis)); } +void FullConnection::set_use_axis(const bool use_axis) { (void)this->AddAttr(kUseAxis, MakeValue(use_axis)); } bool FullConnection::get_use_axis() const { return GetValue(GetAttr(kUseAxis)); } void FullConnection::set_activation_type(const ActivationType &activation_type) { - int64_t swi; - swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + int64_t swi = activation_type; + (void)this->AddAttr(kActivationType, MakeValue(swi)); } ActivationType FullConnection::get_activation_type() const { auto value_ptr = GetAttr(kActivationType); @@ -58,9 +58,9 @@ AbstractBasePtr FullConnectionInfer(const abstract::AnalysisEnginePtr &, const P auto prim_axis = GetValue(primitive->GetAttr(kAxis)); auto has_bias = GetValue(primitive->GetAttr(kHasBias)); if (has_bias) { - CheckAndConvertUtils::CheckInteger("input_args.size()", input_args.size(), kEqual, 3, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input_args.size()", input_args.size(), kEqual, 3, prim_name); } else { - CheckAndConvertUtils::CheckInteger("input_args.size()", input_args.size(), kEqual, 2, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input_args.size()", input_args.size(), kEqual, 2, prim_name); } auto use_axis = GetValue(primitive->GetAttr(kUseAxis)); if (use_axis && (prim_axis < 1 || prim_axis > (int64_t)input0_shape.size())) { @@ -68,7 +68,7 @@ AbstractBasePtr FullConnectionInfer(const abstract::AnalysisEnginePtr &, const P } int64_t new_k = 1; if (use_axis) { - for (size_t t = prim_axis; t < input0_shape.size(); t++) { + for (size_t t = LongToSize(prim_axis); t < input0_shape.size(); t++) { new_k *= input0_shape[t]; } if (new_k != input1_shape[1]) { @@ -85,8 +85,8 @@ AbstractBasePtr FullConnectionInfer(const abstract::AnalysisEnginePtr &, const P } std::vector out_shape = {(int64_t)input0_shape.size()}; if (use_axis) { - out_shape.resize(prim_axis + 1); - out_shape[prim_axis] = input1_shape[0]; + out_shape.resize(LongToSize(prim_axis) + 1); + out_shape[LongToSize(prim_axis)] = input1_shape[0]; } else { int64_t total = 1; for (size_t i = 0; i < input0_shape.size(); i++) { diff --git a/mindspore/core/ops/fusion/l2_normalize_fusion.cc b/mindspore/core/ops/fusion/l2_normalize_fusion.cc index 3c8198fb442..29341d864b9 100644 --- a/mindspore/core/ops/fusion/l2_normalize_fusion.cc +++ b/mindspore/core/ops/fusion/l2_normalize_fusion.cc @@ -28,9 +28,8 @@ void L2NormalizeFusion::Init(const std::vector &axis, const float epsil } void L2NormalizeFusion::set_activation_type(const ActivationType &activation_type) { - int64_t swi; - swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + int64_t swi = activation_type; + (void)this->AddAttr(kActivationType, MakeValue(swi)); } ActivationType L2NormalizeFusion::get_activation_type() const { diff --git a/mindspore/core/ops/fusion/layer_norm_fusion.cc b/mindspore/core/ops/fusion/layer_norm_fusion.cc index 4fa127ff1fe..5a12e31daac 100644 --- a/mindspore/core/ops/fusion/layer_norm_fusion.cc +++ b/mindspore/core/ops/fusion/layer_norm_fusion.cc @@ -27,7 +27,7 @@ void LayerNormFusion::Init(const int64_t begin_norm_axis, const int64_t begin_pa } void LayerNormFusion::set_elementwise_affine(const bool elementwise_affine) { - AddAttr(kElementwiseAffine, MakeValue(elementwise_affine)); + (void)AddAttr(kElementwiseAffine, MakeValue(elementwise_affine)); } bool LayerNormFusion::get_elementwise_affine() const { diff --git a/mindspore/core/ops/fusion/max_pool_fusion.cc b/mindspore/core/ops/fusion/max_pool_fusion.cc index ada2bee6262..9aafcb1391c 100644 --- a/mindspore/core/ops/fusion/max_pool_fusion.cc +++ b/mindspore/core/ops/fusion/max_pool_fusion.cc @@ -31,12 +31,11 @@ void MaxPoolFusion::Init(const std::vector &kernel_size, const std::vec this->set_activation_type(activation_type); } -void MaxPoolFusion::set_global(const bool global) { AddAttr(kGlobal, MakeValue(global)); } +void MaxPoolFusion::set_global(const bool global) { (void)AddAttr(kGlobal, MakeValue(global)); } void MaxPoolFusion::set_activation_type(ActivationType activation_type) { - int64_t swi; - swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + int64_t swi = activation_type; + (void)this->AddAttr(kActivationType, MakeValue(swi)); } bool MaxPoolFusion::get_global() const { @@ -58,7 +57,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector>(primitive->GetAttr(kKernelSize)); auto pad_mode = PadMode(GetValue(primitive->GetAttr(kPadMode))); auto batch = in_shape[0]; diff --git a/mindspore/core/ops/fusion/mul_fusion.cc b/mindspore/core/ops/fusion/mul_fusion.cc index 429686439c2..d29e927a221 100644 --- a/mindspore/core/ops/fusion/mul_fusion.cc +++ b/mindspore/core/ops/fusion/mul_fusion.cc @@ -24,9 +24,8 @@ namespace mindspore { namespace ops { void MulFusion::set_activation_type(const ActivationType &activation_type) { - int64_t swi; - swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + int64_t swi = activation_type; + (void)this->AddAttr(kActivationType, MakeValue(swi)); } ActivationType MulFusion::get_activation_type() const { auto value_ptr = GetAttr(kActivationType); diff --git a/mindspore/core/ops/fusion/pad_fusion.cc b/mindspore/core/ops/fusion/pad_fusion.cc index be0643e9310..17818349164 100644 --- a/mindspore/core/ops/fusion/pad_fusion.cc +++ b/mindspore/core/ops/fusion/pad_fusion.cc @@ -29,13 +29,12 @@ void PadFusion::Init(const PaddingMode &padding_mode, const float constant_value } void PadFusion::set_padding_mode(const PaddingMode &padding_mode) { - int64_t swi; - swi = padding_mode; - this->AddAttr(kPaddingMode, MakeValue(swi)); + int64_t swi = padding_mode; + (void)this->AddAttr(kPaddingMode, MakeValue(swi)); } void PadFusion::set_constant_value(const float constant_value) { - this->AddAttr(kConstantValue, MakeValue(constant_value)); + (void)this->AddAttr(kConstantValue, MakeValue(constant_value)); } PaddingMode PadFusion::get_padding_mode() const { diff --git a/mindspore/core/ops/fusion/partial_fusion.cc b/mindspore/core/ops/fusion/partial_fusion.cc index 709e28a9e16..cb12699e39a 100644 --- a/mindspore/core/ops/fusion/partial_fusion.cc +++ b/mindspore/core/ops/fusion/partial_fusion.cc @@ -21,7 +21,7 @@ namespace mindspore { namespace ops { void PartialFusion::Init(const int64_t sub_graph_index) { this->set_sub_graph_index(sub_graph_index); } void PartialFusion::set_sub_graph_index(const int64_t sub_graph_index) { - this->AddAttr(kSubGraphIndex, MakeValue(sub_graph_index)); + (void)this->AddAttr(kSubGraphIndex, MakeValue(sub_graph_index)); } int64_t PartialFusion::get_sub_graph_index() const { auto value_ptr = GetAttr(kSubGraphIndex); diff --git a/mindspore/core/ops/fusion/pow_fusion.cc b/mindspore/core/ops/fusion/pow_fusion.cc index 648c1742961..4a8abdd8122 100644 --- a/mindspore/core/ops/fusion/pow_fusion.cc +++ b/mindspore/core/ops/fusion/pow_fusion.cc @@ -28,8 +28,8 @@ void PowFusion::Init(const float &scale, const float &shift) { this->set_shift(shift); } -void PowFusion::set_scale(const float &scale) { this->AddAttr(kScale, MakeValue(scale)); } -void PowFusion::set_shift(const float &shift) { this->AddAttr(kShift, MakeValue(shift)); } +void PowFusion::set_scale(const float &scale) { (void)this->AddAttr(kScale, MakeValue(scale)); } +void PowFusion::set_shift(const float &shift) { (void)this->AddAttr(kShift, MakeValue(shift)); } float PowFusion::get_scale() const { return GetValue(GetAttr(kScale)); } float PowFusion::get_shift() const { return GetValue(GetAttr(kShift)); } diff --git a/mindspore/core/ops/fusion/prelu_fusion.cc b/mindspore/core/ops/fusion/prelu_fusion.cc index cae628c8828..4566d9b669c 100644 --- a/mindspore/core/ops/fusion/prelu_fusion.cc +++ b/mindspore/core/ops/fusion/prelu_fusion.cc @@ -29,10 +29,10 @@ void PReLUFusion::Init(const bool channel_shared, const std::vector &slop } void PReLUFusion::set_channel_shared(const bool channel_shared) { - this->AddAttr(kChannelShared, MakeValue(channel_shared)); + (void)this->AddAttr(kChannelShared, MakeValue(channel_shared)); } -void PReLUFusion::set_slope(const std::vector &slope) { this->AddAttr(kSlope, MakeValue(slope)); } +void PReLUFusion::set_slope(const std::vector &slope) { (void)this->AddAttr(kSlope, MakeValue(slope)); } bool PReLUFusion::get_channel_shared() const { auto value_ptr = GetAttr(kChannelShared); diff --git a/mindspore/core/ops/fusion/reduce_fusion.cc b/mindspore/core/ops/fusion/reduce_fusion.cc index ec0b99702f0..4eada9e2f45 100644 --- a/mindspore/core/ops/fusion/reduce_fusion.cc +++ b/mindspore/core/ops/fusion/reduce_fusion.cc @@ -26,19 +26,18 @@ namespace mindspore { namespace ops { -void ReduceFusion::set_keep_dims(const bool keep_dims) { this->AddAttr(kKeepDims, MakeValue(keep_dims)); } +void ReduceFusion::set_keep_dims(const bool keep_dims) { (void)this->AddAttr(kKeepDims, MakeValue(keep_dims)); } void ReduceFusion::set_mode(const ReduceMode mode) { - int64_t swi; - swi = mode; - this->AddAttr(kMode, MakeValue(swi)); + int64_t swi = mode; + (void)this->AddAttr(kMode, MakeValue(swi)); } void ReduceFusion::set_reduce_to_end(const bool reduce_to_end) { - this->AddAttr(kReduceToEnd, MakeValue(reduce_to_end)); + (void)this->AddAttr(kReduceToEnd, MakeValue(reduce_to_end)); } -void ReduceFusion::set_coeff(const float coeff) { this->AddAttr(kCoeff, MakeValue(coeff)); } +void ReduceFusion::set_coeff(const float coeff) { (void)this->AddAttr(kCoeff, MakeValue(coeff)); } bool ReduceFusion::get_keep_dims() const { auto value_ptr = GetAttr(kKeepDims); diff --git a/mindspore/core/ops/fusion/scale_fusion.cc b/mindspore/core/ops/fusion/scale_fusion.cc index 23fa6243e9d..cf1bb04d1da 100644 --- a/mindspore/core/ops/fusion/scale_fusion.cc +++ b/mindspore/core/ops/fusion/scale_fusion.cc @@ -26,9 +26,8 @@ void ScaleFusion::Init(const int64_t axis, const ActivationType &activation_type } void ScaleFusion::set_activation_type(const ActivationType &activation_type) { - int64_t swi; - swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + int64_t swi = activation_type; + (void)this->AddAttr(kActivationType, MakeValue(swi)); } ActivationType ScaleFusion::get_activation_type() const { diff --git a/mindspore/core/ops/fusion/slice_fusion.cc b/mindspore/core/ops/fusion/slice_fusion.cc index f85a534e77c..ecb6df7278c 100644 --- a/mindspore/core/ops/fusion/slice_fusion.cc +++ b/mindspore/core/ops/fusion/slice_fusion.cc @@ -22,7 +22,7 @@ namespace mindspore { namespace ops { void SliceFusion::Init(const std::vector &axes) { this->set_axes(axes); } -void SliceFusion::set_axes(const std::vector &axes) { this->AddAttr(kAxes, MakeValue(axes)); } +void SliceFusion::set_axes(const std::vector &axes) { (void)this->AddAttr(kAxes, MakeValue(axes)); } std::vector SliceFusion::get_axes() const { auto value_ptr = GetAttr(kAxes); @@ -52,7 +52,7 @@ AbstractBasePtr SliceFusionInfer(const abstract::AnalysisEnginePtr &, const Prim CheckAndConvertUtils::Check("len of size", (int64_t)size.size(), kEqual, "len x's dim", SizeToLong(x_shape_len)); for (size_t i = 0; i < x_shape_len; i++) { - CheckAndConvertUtils::CheckInteger("input size[" + std::to_string(i) + "]", size[i], kGreaterThan, 0, ""); + (void)CheckAndConvertUtils::CheckInteger("input size[" + std::to_string(i) + "]", size[i], kGreaterThan, 0, ""); if (x_shape[i] < (begin[i] + size[i])) { auto y = begin[i] + size[i]; MS_EXCEPTION(ValueError) << "For " + op_name + "slice shape can't bigger than origin shape " + diff --git a/mindspore/core/ops/fusion/sub_fusion.cc b/mindspore/core/ops/fusion/sub_fusion.cc index a3cde51cba6..e15c588de26 100644 --- a/mindspore/core/ops/fusion/sub_fusion.cc +++ b/mindspore/core/ops/fusion/sub_fusion.cc @@ -23,9 +23,8 @@ namespace ops { void SubFusion::Init(const ActivationType &activation_type) { this->set_activation_type(activation_type); } void SubFusion::set_activation_type(const ActivationType &activation_type) { - int64_t swi; - swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + int64_t swi = activation_type; + (void)this->AddAttr(kActivationType, MakeValue(swi)); } ActivationType SubFusion::get_activation_type() const { diff --git a/mindspore/core/ops/fusion/tile_fusion.cc b/mindspore/core/ops/fusion/tile_fusion.cc index 5789dc07e67..02c71e06042 100644 --- a/mindspore/core/ops/fusion/tile_fusion.cc +++ b/mindspore/core/ops/fusion/tile_fusion.cc @@ -22,7 +22,7 @@ namespace mindspore { namespace ops { void TileFusion::Init(const std::vector &dims) { this->set_dims(dims); } -void TileFusion::set_dims(const std::vector &dims) { this->AddAttr(kDims, MakeValue(dims)); } +void TileFusion::set_dims(const std::vector &dims) { (void)this->AddAttr(kDims, MakeValue(dims)); } std::vector TileFusion::get_dims() const { auto value_ptr = GetAttr(kDims); diff --git a/mindspore/core/ops/fusion/topk_fusion.cc b/mindspore/core/ops/fusion/topk_fusion.cc index 933c00a4890..16b0bed679f 100644 --- a/mindspore/core/ops/fusion/topk_fusion.cc +++ b/mindspore/core/ops/fusion/topk_fusion.cc @@ -26,9 +26,9 @@ void TopKFusion::Init(const bool sorted, const int64_t axis, const int64_t large this->set_sorted(sorted); } -void TopKFusion::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } +void TopKFusion::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); } -void TopKFusion::set_largest(const int64_t largest) { this->AddAttr(kLargest, MakeValue(largest)); } +void TopKFusion::set_largest(const int64_t largest) { (void)this->AddAttr(kLargest, MakeValue(largest)); } int64_t TopKFusion::get_axis() const { auto value_ptr = GetAttr(kAxis); diff --git a/mindspore/core/ops/gather_nd.cc b/mindspore/core/ops/gather_nd.cc index aed66cb04c9..528fb708595 100644 --- a/mindspore/core/ops/gather_nd.cc +++ b/mindspore/core/ops/gather_nd.cc @@ -28,7 +28,7 @@ namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -36,13 +36,13 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vectorBuildShape())[kShape]; auto input_rank = input_shape.size(); auto indices_rank = indices_shape.size(); - CheckAndConvertUtils::CheckInteger("Input of indices data", input_rank, kGreaterEqual, - indices_shape[indices_rank - 1], prim_name); + (void)CheckAndConvertUtils::CheckInteger("Input of indices data", SizeToLong(input_rank), kGreaterEqual, + indices_shape[indices_rank - 1], prim_name); std::vector output_shape; for (size_t i = 0; i < indices_rank - 1; i++) { output_shape.push_back(indices_shape[i]); } - for (size_t i = indices_shape[indices_rank - 1]; i < input_rank; ++i) { + for (size_t i = LongToSize(indices_shape[indices_rank - 1]); i < input_rank; ++i) { output_shape.push_back(input_shape[i]); } return std::make_shared(output_shape); diff --git a/mindspore/core/ops/gelu.cc b/mindspore/core/ops/gelu.cc index 2d6426a959c..27360a6f83a 100644 --- a/mindspore/core/ops/gelu.cc +++ b/mindspore/core/ops/gelu.cc @@ -30,7 +30,7 @@ namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto op_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("gelu infer", input_args.size(), kEqual, 1, op_name); + (void)CheckAndConvertUtils::CheckInteger("gelu infer", SizeToLong(input_args.size()), kEqual, 1, op_name); MS_EXCEPTION_IF_NULL(input_args[0]); auto shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape()); auto in_shape = shape_map[kShape]; @@ -44,7 +44,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(prim); auto op_name = prim->name(); - CheckAndConvertUtils::CheckInteger("gelu infer", input_args.size(), kEqual, 1, op_name); + (void)CheckAndConvertUtils::CheckInteger("gelu infer", SizeToLong(input_args.size()), kEqual, 1, op_name); std::map types; const std::set valid_types = {kFloat16, kFloat32}; MS_EXCEPTION_IF_NULL(input_args[0]); diff --git a/mindspore/core/ops/grad/activation_grad.cc b/mindspore/core/ops/grad/activation_grad.cc index afc2638b1d9..67752c2d871 100644 --- a/mindspore/core/ops/grad/activation_grad.cc +++ b/mindspore/core/ops/grad/activation_grad.cc @@ -33,7 +33,7 @@ void ActivationGrad::Init(const ActivationType &type, const float alpha) { void ActivationGrad::set_activation_type(const ActivationType &type) { int64_t swi = type; - this->AddAttr(kActivationType, MakeValue(swi)); + (void)this->AddAttr(kActivationType, MakeValue(swi)); } ActivationType ActivationGrad::get_activation_type() const { @@ -41,7 +41,7 @@ ActivationType ActivationGrad::get_activation_type() const { return ActivationType(GetValue(value_ptr)); } -void ActivationGrad::set_alpha(const float alpha) { this->AddAttr(kAlpha, MakeValue(alpha)); } +void ActivationGrad::set_alpha(const float alpha) { (void)this->AddAttr(kAlpha, MakeValue(alpha)); } float ActivationGrad::get_alpha() const { auto value_ptr = GetAttr(kAlpha); diff --git a/mindspore/core/ops/grad/batch_norm_grad.cc b/mindspore/core/ops/grad/batch_norm_grad.cc index b2db2ef5bba..34bf4b0741e 100644 --- a/mindspore/core/ops/grad/batch_norm_grad.cc +++ b/mindspore/core/ops/grad/batch_norm_grad.cc @@ -27,10 +27,7 @@ void BatchNormGrad::Init(const bool is_training, const float epsilon) { this->set_epsilon(epsilon); } -void BatchNormGrad::set_epsilon(const float epsilon) { - // CheckAndConvertUtils::CheckInRange(kEpsilon, epsilon, kIncludeRight, {0, 1}, this->name()); - this->AddAttr(kEpsilon, MakeValue(epsilon)); -} +void BatchNormGrad::set_epsilon(const float epsilon) { (void)this->AddAttr(kEpsilon, MakeValue(epsilon)); } float BatchNormGrad::get_epsilon() const { auto value_ptr = this->GetAttr(kEpsilon); diff --git a/mindspore/core/ops/grad/bias_add_grad.cc b/mindspore/core/ops/grad/bias_add_grad.cc index 165c7e30b9c..5b419b696d8 100644 --- a/mindspore/core/ops/grad/bias_add_grad.cc +++ b/mindspore/core/ops/grad/bias_add_grad.cc @@ -40,7 +40,7 @@ std::vector GetFormatShape(const int64_t &format, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, 1, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -60,7 +60,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(prim); auto prim_name = prim->name(); - CheckAndConvertUtils::CheckInteger("BiasAddGrad infer", input_args.size(), kEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("BiasAddGrad infer", SizeToLong(input_args.size()), kEqual, 1, prim_name); MS_EXCEPTION_IF_NULL(input_args[0]); auto x_type_map = input_args[0]->BuildType(); MS_EXCEPTION_IF_NULL(x_type_map); diff --git a/mindspore/core/ops/grad/binary_cross_entropy_grad.cc b/mindspore/core/ops/grad/binary_cross_entropy_grad.cc index 5ff9c8b95d9..c2384cccd17 100644 --- a/mindspore/core/ops/grad/binary_cross_entropy_grad.cc +++ b/mindspore/core/ops/grad/binary_cross_entropy_grad.cc @@ -30,9 +30,9 @@ abstract::ShapePtr BinaryCrossEntroyGradInferShape(const PrimitivePtr &primitive auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; auto y_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape]; auto weight_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[2]->BuildShape())[kShape]; - CheckAndConvertUtils::Check("x shape", x_shape, kEqual, "y shape", y_shape, prim_name); + (void)CheckAndConvertUtils::Check("x shape", x_shape, kEqual, "y shape", y_shape, prim_name); if (weight_shape.size() < 1) { - CheckAndConvertUtils::Check("y shape", y_shape, kEqual, "weight shape", weight_shape, prim_name); + (void)CheckAndConvertUtils::Check("y shape", y_shape, kEqual, "weight shape", weight_shape, prim_name); } return std::make_shared(x_shape); } @@ -58,7 +58,7 @@ void BinaryCrossEntropyGrad::Init(const Reduction &reduction) { set_reduction(re void BinaryCrossEntropyGrad::set_reduction(const Reduction &reduction) { int64_t swi = reduction; - this->AddAttr(kReduction, MakeValue(swi)); + (void)this->AddAttr(kReduction, MakeValue(swi)); } Reduction BinaryCrossEntropyGrad::get_reduction() const { auto value_ptr = GetAttr(kReduction); diff --git a/mindspore/core/ops/grad/bn_grad.cc b/mindspore/core/ops/grad/bn_grad.cc index 6078551111c..14542c5f850 100644 --- a/mindspore/core/ops/grad/bn_grad.cc +++ b/mindspore/core/ops/grad/bn_grad.cc @@ -26,14 +26,14 @@ void BNGrad::Init(const float eps, const float momentum) { this->set_momentum(momentum); } -void BNGrad::set_eps(const float eps) { this->AddAttr(kEps, MakeValue(eps)); } +void BNGrad::set_eps(const float eps) { (void)this->AddAttr(kEps, MakeValue(eps)); } float BNGrad::get_eps() const { auto value_ptr = this->GetAttr(kEps); return GetValue(value_ptr); } -void BNGrad::set_momentum(const float momentum) { this->AddAttr(kMomentum, MakeValue(momentum)); } +void BNGrad::set_momentum(const float momentum) { (void)this->AddAttr(kMomentum, MakeValue(momentum)); } float BNGrad::get_momentum() const { auto value_ptr = this->GetAttr(kMomentum); diff --git a/mindspore/core/ops/grad/conv2d_backprop_filter.cc b/mindspore/core/ops/grad/conv2d_backprop_filter.cc index 1a859a11973..428581d51bb 100644 --- a/mindspore/core/ops/grad/conv2d_backprop_filter.cc +++ b/mindspore/core/ops/grad/conv2d_backprop_filter.cc @@ -66,7 +66,7 @@ void Conv2DBackpropFilter::Init(const int64_t out_channel, const std::vectorAddAttr(kOutChannel, MakeValue(out_channel)); + (void)this->AddAttr(kOutChannel, MakeValue(out_channel)); } int64_t Conv2DBackpropFilter::get_out_channel() const { @@ -75,7 +75,7 @@ int64_t Conv2DBackpropFilter::get_out_channel() const { } void Conv2DBackpropFilter::set_kernel_size(const std::vector &kernel_size) { - this->AddAttr(kKernelSize, MakeValue(kernel_size)); + (void)this->AddAttr(kKernelSize, MakeValue(kernel_size)); } std::vector Conv2DBackpropFilter::get_kernel_size() const { @@ -85,7 +85,7 @@ std::vector Conv2DBackpropFilter::get_kernel_size() const { void Conv2DBackpropFilter::set_pad_mode(const PadMode &pad_mode) { int64_t swi = pad_mode; - this->AddAttr(kPadMode, MakeValue(swi)); + (void)this->AddAttr(kPadMode, MakeValue(swi)); } PadMode Conv2DBackpropFilter::get_pad_mode() const { @@ -94,7 +94,7 @@ PadMode Conv2DBackpropFilter::get_pad_mode() const { } void Conv2DBackpropFilter::set_pad_list(const std::vector &pad_list) { - this->AddAttr(kPadList, MakeValue(pad_list)); + (void)this->AddAttr(kPadList, MakeValue(pad_list)); } std::vector Conv2DBackpropFilter::get_pad_list() const { @@ -102,7 +102,7 @@ std::vector Conv2DBackpropFilter::get_pad_list() const { return GetValue>(value_ptr); } -void Conv2DBackpropFilter::set_mode(const int64_t mode) { this->AddAttr(kMode, MakeValue(mode)); } +void Conv2DBackpropFilter::set_mode(const int64_t mode) { (void)this->AddAttr(kMode, MakeValue(mode)); } int64_t Conv2DBackpropFilter::get_mode() const { auto value_ptr = GetAttr(kMode); @@ -125,7 +125,7 @@ std::vector Conv2DBackpropFilter::get_dilation() const { return GetValue>(value_ptr); } -void Conv2DBackpropFilter::set_group(const int64_t group) { this->AddAttr(kGroup, MakeValue(group)); } +void Conv2DBackpropFilter::set_group(const int64_t group) { (void)this->AddAttr(kGroup, MakeValue(group)); } int64_t Conv2DBackpropFilter::get_group() const { auto value_ptr = GetAttr(kGroup); @@ -147,7 +147,7 @@ AbstractBasePtr Conv2DBackpropFilterInfer(const abstract::AnalysisEnginePtr &, c MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); // check - CheckAndConvertUtils::CheckInteger("input size", input_args.size(), kGreaterEqual, 3, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input size", SizeToLong(input_args.size()), kGreaterEqual, 3, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/grad/conv2d_backprop_input.cc b/mindspore/core/ops/grad/conv2d_backprop_input.cc index e6934a53495..bc3b0a0d29d 100644 --- a/mindspore/core/ops/grad/conv2d_backprop_input.cc +++ b/mindspore/core/ops/grad/conv2d_backprop_input.cc @@ -37,7 +37,7 @@ void SetPadList(const PrimitivePtr &primitive, const std::vector &dout_ // default pad mode is valid auto attr_pad_list_prt = primitive->GetAttr(kPadList); int64_t pad_mode; - CheckAndConvertUtils::GetPadModEnumValue(primitive->GetAttr(kPadMode), &pad_mode, true); + (void)CheckAndConvertUtils::GetPadModEnumValue(primitive->GetAttr(kPadMode), &pad_mode, true); ShapeVector pad_list = {0, 0, 0, 0}; if (!attr_pad_list_prt->isa()) { pad_list = GetValue(attr_pad_list_prt); @@ -60,7 +60,7 @@ void SetPadList(const PrimitivePtr &primitive, const std::vector &dout_ } else if (pad_mode == PAD) { pad_list = GetValue>(primitive->GetAttr(kPad)); } - primitive->AddAttr(kPadList, MakeValue(pad_list)); + (void)primitive->AddAttr(kPadList, MakeValue(pad_list)); } abstract::ShapePtr Conv2DBackpropInputInferShape(const PrimitivePtr &primitive, @@ -93,7 +93,7 @@ AbstractBasePtr Conv2DBackpropInputInfer(const abstract::AnalysisEnginePtr &, co MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); // check - CheckAndConvertUtils::CheckInteger("input size", input_args.size(), kGreaterEqual, 3, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input size", input_args.size(), kGreaterEqual, 3, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -119,55 +119,55 @@ void Conv2DBackpropInput::Init(int64_t out_channel, const std::vector & } void Conv2DBackpropInput::set_out_channel(int64_t out_channel) { - AddAttr(kOutChannel, - MakeValue(CheckAndConvertUtils::CheckInteger(kOutChannel, out_channel, kGreaterThan, 0, name()))); + (void)AddAttr(kOutChannel, + MakeValue(CheckAndConvertUtils::CheckInteger(kOutChannel, out_channel, kGreaterThan, 0, name()))); } void Conv2DBackpropInput::set_kernel_size(const std::vector &kernel_size) { - AddAttr(kKernelSize, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, name()))); + (void)AddAttr(kKernelSize, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kKernelSize, kernel_size, name()))); } void Conv2DBackpropInput::set_stride(const std::vector &stride) { - AddAttr(kStride, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kStride, stride, name()))); + (void)AddAttr(kStride, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kStride, stride, name()))); } void Conv2DBackpropInput::set_dilation(const std::vector &dilation) { - AddAttr(kDilation, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kDilation, dilation, name()))); + (void)AddAttr(kDilation, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kDilation, dilation, name()))); } void Conv2DBackpropInput::set_pad_mode(const PadMode &pad_mode) { std::vector pad = get_pad(); if (pad_mode == PAD) { for (auto item : pad) { - CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, "zeros_list", 0, name()); + (void)CheckAndConvertUtils::Check(kPadItem, item, kGreaterEqual, "zeros_list", 0, name()); } } else { - CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, name()); + (void)CheckAndConvertUtils::Check(kPad, pad, kEqual, "zeros_list", {0, 0, 0, 0}, name()); } int64_t swi = pad_mode; - AddAttr(kPadMode, MakeValue(swi)); + (void)AddAttr(kPadMode, MakeValue(swi)); } void Conv2DBackpropInput::set_pad(const std::vector &pad) { - CheckAndConvertUtils::CheckInteger("pad_size", SizeToLong(pad.size()), kEqual, 4, name()); - AddAttr(kPad, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kPad, pad, name()))); + (void)CheckAndConvertUtils::CheckInteger("pad_size", SizeToLong(pad.size()), kEqual, 4, name()); + (void)AddAttr(kPad, MakeValue(CheckAndConvertUtils::CheckPositiveVector(kPad, pad, name()))); } void Conv2DBackpropInput::set_mode(int64_t mode) { - AddAttr(kMode, MakeValue(CheckAndConvertUtils::CheckInteger(kMode, mode, kEqual, 1, name()))); + (void)AddAttr(kMode, MakeValue(CheckAndConvertUtils::CheckInteger(kMode, mode, kEqual, 1, name()))); } void Conv2DBackpropInput::set_group(int64_t group) { - AddAttr(kGroup, MakeValue(CheckAndConvertUtils::CheckInteger(kGroup, group, kGreaterThan, 0, name()))); + (void)AddAttr(kGroup, MakeValue(CheckAndConvertUtils::CheckInteger(kGroup, group, kGreaterThan, 0, name()))); } void Conv2DBackpropInput::set_format(const Format &format) { int64_t f = format; - AddAttr(kFormat, MakeValue(f)); + (void)AddAttr(kFormat, MakeValue(f)); } void Conv2DBackpropInput::set_pad_list(const std::vector &pad_list) { - this->AddAttr(kPadList, MakeValue(pad_list)); + (void)this->AddAttr(kPadList, MakeValue(pad_list)); } int64_t Conv2DBackpropInput::get_out_channel() const { diff --git a/mindspore/core/ops/grad/de_conv2d_grad_filter.cc b/mindspore/core/ops/grad/de_conv2d_grad_filter.cc index 15a77fe12bb..bccee4719f3 100644 --- a/mindspore/core/ops/grad/de_conv2d_grad_filter.cc +++ b/mindspore/core/ops/grad/de_conv2d_grad_filter.cc @@ -39,7 +39,9 @@ void DeConv2DGradFilter::Init(const int64_t in_channel, const int64_t out_channe set_has_bias(has_bias); } -void DeConv2DGradFilter::set_in_channel(const int64_t in_channel) { this->AddAttr(kInChannel, MakeValue(in_channel)); } +void DeConv2DGradFilter::set_in_channel(const int64_t in_channel) { + (void)this->AddAttr(kInChannel, MakeValue(in_channel)); +} int64_t DeConv2DGradFilter::get_in_channel() const { auto value_ptr = GetAttr(kInChannel); @@ -47,7 +49,7 @@ int64_t DeConv2DGradFilter::get_in_channel() const { } void DeConv2DGradFilter::set_out_channel(const int64_t out_channel) { - this->AddAttr(kOutChannel, MakeValue(out_channel)); + (void)this->AddAttr(kOutChannel, MakeValue(out_channel)); } int64_t DeConv2DGradFilter::get_out_channel() const { @@ -56,7 +58,7 @@ int64_t DeConv2DGradFilter::get_out_channel() const { } void DeConv2DGradFilter::set_kernel_size(const std::vector &kernel_size) { - this->AddAttr(kKernelSize, MakeValue(kernel_size)); + (void)this->AddAttr(kKernelSize, MakeValue(kernel_size)); } std::vector DeConv2DGradFilter::get_kernel_size() const { @@ -66,7 +68,7 @@ std::vector DeConv2DGradFilter::get_kernel_size() const { void DeConv2DGradFilter::set_pad_mode(const PadMode &pad_mode) { int64_t swi = pad_mode; - this->AddAttr(kPadMode, MakeValue(swi)); + (void)this->AddAttr(kPadMode, MakeValue(swi)); } PadMode DeConv2DGradFilter::get_pad_mode() const { @@ -75,7 +77,7 @@ PadMode DeConv2DGradFilter::get_pad_mode() const { } void DeConv2DGradFilter::set_pad_list(const std::vector &pad_list) { - this->AddAttr(kPadList, MakeValue(pad_list)); + (void)this->AddAttr(kPadList, MakeValue(pad_list)); } std::vector DeConv2DGradFilter::get_pad_list() const { @@ -83,7 +85,9 @@ std::vector DeConv2DGradFilter::get_pad_list() const { return GetValue>(value_ptr); } -void DeConv2DGradFilter::set_stride(const std::vector &stride) { this->AddAttr(kStride, MakeValue(stride)); } +void DeConv2DGradFilter::set_stride(const std::vector &stride) { + (void)this->AddAttr(kStride, MakeValue(stride)); +} std::vector DeConv2DGradFilter::get_stride() const { auto value_ptr = GetAttr(kStride); @@ -91,7 +95,7 @@ std::vector DeConv2DGradFilter::get_stride() const { } void DeConv2DGradFilter::set_dilation(const std::vector &dilation) { - this->AddAttr(kDilation, MakeValue(dilation)); + (void)this->AddAttr(kDilation, MakeValue(dilation)); } std::vector DeConv2DGradFilter::get_dilation() const { @@ -99,7 +103,7 @@ std::vector DeConv2DGradFilter::get_dilation() const { return GetValue>(value_ptr); } -void DeConv2DGradFilter::set_group(const int64_t group) { this->AddAttr(kGroup, MakeValue(group)); } +void DeConv2DGradFilter::set_group(const int64_t group) { (void)this->AddAttr(kGroup, MakeValue(group)); } int64_t DeConv2DGradFilter::get_group() const { auto value_ptr = GetAttr(kGroup); @@ -108,7 +112,7 @@ int64_t DeConv2DGradFilter::get_group() const { void DeConv2DGradFilter::set_format(const Format &format) { int64_t swi = format; - this->AddAttr(kFormat, MakeValue(swi)); + (void)this->AddAttr(kFormat, MakeValue(swi)); } Format DeConv2DGradFilter::get_format() const { @@ -118,7 +122,7 @@ Format DeConv2DGradFilter::get_format() const { void DeConv2DGradFilter::set_activation_type(const ActivationType &activation_type) { int64_t swi = activation_type; - this->AddAttr(kActivationType, MakeValue(swi)); + (void)this->AddAttr(kActivationType, MakeValue(swi)); } ActivationType DeConv2DGradFilter::get_activation_type() const { @@ -126,7 +130,7 @@ ActivationType DeConv2DGradFilter::get_activation_type() const { return ActivationType(GetValue(value_ptr)); } -void DeConv2DGradFilter::set_has_bias(const bool has_bias) { this->AddAttr(kHasBias, MakeValue(has_bias)); } +void DeConv2DGradFilter::set_has_bias(const bool has_bias) { (void)this->AddAttr(kHasBias, MakeValue(has_bias)); } bool DeConv2DGradFilter::get_has_bias() const { auto value_ptr = GetAttr(kHasBias); diff --git a/mindspore/core/ops/grad/dropout_grad.cc b/mindspore/core/ops/grad/dropout_grad.cc index df2307d7ecc..f9ebc951d4b 100644 --- a/mindspore/core/ops/grad/dropout_grad.cc +++ b/mindspore/core/ops/grad/dropout_grad.cc @@ -23,7 +23,7 @@ void DropoutGrad::Init(const float keep_prob) { this->set_keep_prob(keep_prob); void DropoutGrad::set_keep_prob(const float keep_prob) { CheckAndConvertUtils::CheckInRange(kKeepProb, keep_prob, kIncludeRight, {0.0, 1.0}, this->name()); - this->AddAttr(kKeepProb, MakeValue(keep_prob)); + (void)this->AddAttr(kKeepProb, MakeValue(keep_prob)); } float DropoutGrad::get_keep_prob() const { @@ -44,8 +44,8 @@ TypePtr DropoutGradInferType(const PrimitivePtr &prim, const std::vectorname(); auto mask_dtype = input_args[1]->BuildType(); auto dy_dtype = input_args[0]->BuildType(); - CheckAndConvertUtils::CheckTensorTypeValid("mask", mask_dtype, {kTensorType}, op_name); - CheckAndConvertUtils::CheckTensorTypeValid("dy", dy_dtype, {kFloat16, kFloat32}, op_name); + (void)CheckAndConvertUtils::CheckTensorTypeValid("mask", mask_dtype, {kTensorType}, op_name); + (void)CheckAndConvertUtils::CheckTensorTypeValid("dy", dy_dtype, {kFloat16, kFloat32}, op_name); auto tensor_type = dy_dtype->cast(); MS_EXCEPTION_IF_NULL(tensor_type); auto data_type = tensor_type->element(); diff --git a/mindspore/core/ops/grad/flatten_grad.cc b/mindspore/core/ops/grad/flatten_grad.cc index 1e172b5bfe1..33735814a2a 100644 --- a/mindspore/core/ops/grad/flatten_grad.cc +++ b/mindspore/core/ops/grad/flatten_grad.cc @@ -22,7 +22,7 @@ AbstractBasePtr FlattenGradInfer(const abstract::AnalysisEnginePtr &, const Prim const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/grad/group_conv2d_grad_input.cc b/mindspore/core/ops/grad/group_conv2d_grad_input.cc index f74924d01fe..7bf56c2db05 100644 --- a/mindspore/core/ops/grad/group_conv2d_grad_input.cc +++ b/mindspore/core/ops/grad/group_conv2d_grad_input.cc @@ -102,7 +102,7 @@ std::vector GroupConv2DGradInput::get_dilation() const { return GetValue>(value_ptr); } -void GroupConv2DGradInput::set_group(const int64_t &group) { this->AddAttr(kGroup, MakeValue(group)); } +void GroupConv2DGradInput::set_group(const int64_t &group) { (void)this->AddAttr(kGroup, MakeValue(group)); } int64_t GroupConv2DGradInput::get_group() const { auto value_ptr = GetAttr(kGroup); @@ -137,7 +137,7 @@ ActivationType GroupConv2DGradInput::get_activation_type() const { return ActivationType(GetValue(value_ptr)); } -void GroupConv2DGradInput::set_has_bias(const bool has_bias) { this->AddAttr(kHasBias, MakeValue(has_bias)); } +void GroupConv2DGradInput::set_has_bias(const bool has_bias) { (void)this->AddAttr(kHasBias, MakeValue(has_bias)); } bool GroupConv2DGradInput::get_has_bias() const { auto value_ptr = GetAttr(kHasBias); @@ -147,7 +147,8 @@ AbstractBasePtr GroupConv2DGradInputInfer(const abstract::AnalysisEnginePtr &, c const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("group_conv_2D_infer", input_args.size(), kGreaterEqual, 2, prim_name); + (void)CheckAndConvertUtils::CheckInteger("group_conv_2D_infer", SizeToLong(input_args.size()), kGreaterEqual, 2, + prim_name); MS_EXCEPTION_IF_NULL(input_args[0]); // Infer shape diff --git a/mindspore/core/ops/grad/layer_norm_grad.cc b/mindspore/core/ops/grad/layer_norm_grad.cc index a5699e95530..382727bb44d 100644 --- a/mindspore/core/ops/grad/layer_norm_grad.cc +++ b/mindspore/core/ops/grad/layer_norm_grad.cc @@ -26,7 +26,7 @@ AbstractBasePtr LayerNormGradInfer(const abstract::AnalysisEnginePtr &, const Pr // Outputs: x_backprob, gamma_backprob, beta_backprob MS_EXCEPTION_IF_NULL(primitive); auto op_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 5, op_name); + (void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, 5, op_name); auto x_backprob = input_args[0]->Broaden(); auto gamma_backprob = input_args[4]->Broaden(); auto beta_backprob = input_args[4]->Broaden(); @@ -41,10 +41,10 @@ void LayerNormGrad::Init(const int64_t begin_norm_axis, const int64_t begin_para this->set_begin_params_axis(begin_params_axis); } void LayerNormGrad::set_begin_norm_axis(const int64_t begin_norm_axis) { - this->AddAttr(kBeginNormAxis, MakeValue(begin_norm_axis)); + (void)this->AddAttr(kBeginNormAxis, MakeValue(begin_norm_axis)); } void LayerNormGrad::set_begin_params_axis(const int64_t begin_params_axis) { - this->AddAttr(kBeginParamsAxis, MakeValue(begin_params_axis)); + (void)this->AddAttr(kBeginParamsAxis, MakeValue(begin_params_axis)); } int64_t LayerNormGrad::get_begin_norm_axis() const { auto value_ptr = this->GetAttr(kBeginNormAxis); diff --git a/mindspore/core/ops/grad/maximum_grad.cc b/mindspore/core/ops/grad/maximum_grad.cc index eb060a98730..6d700a4520d 100644 --- a/mindspore/core/ops/grad/maximum_grad.cc +++ b/mindspore/core/ops/grad/maximum_grad.cc @@ -24,9 +24,9 @@ void MaximumGrad::Init(const bool grad_x, const bool grad_y) { set_grad_y(grad_y); } -void MaximumGrad::set_grad_x(const bool grad_x) { this->AddAttr(kGradX, MakeValue(grad_x)); } +void MaximumGrad::set_grad_x(const bool grad_x) { (void)this->AddAttr(kGradX, MakeValue(grad_x)); } -void MaximumGrad::set_grad_y(const bool grad_y) { this->AddAttr(kGradY, MakeValue(grad_y)); } +void MaximumGrad::set_grad_y(const bool grad_y) { (void)this->AddAttr(kGradY, MakeValue(grad_y)); } bool MaximumGrad::get_grad_x() const { auto value_ptr = GetAttr(kGradX); diff --git a/mindspore/core/ops/grad/minimum_grad.cc b/mindspore/core/ops/grad/minimum_grad.cc index 5a1cd154ec4..87de1a6683a 100644 --- a/mindspore/core/ops/grad/minimum_grad.cc +++ b/mindspore/core/ops/grad/minimum_grad.cc @@ -24,9 +24,9 @@ void MinimumGrad::Init(const bool grad_x, const bool grad_y) { set_grad_y(grad_y); } -void MinimumGrad::set_grad_x(const bool grad_x) { this->AddAttr(kGradX, MakeValue(grad_x)); } +void MinimumGrad::set_grad_x(const bool grad_x) { (void)this->AddAttr(kGradX, MakeValue(grad_x)); } -void MinimumGrad::set_grad_y(const bool grad_y) { this->AddAttr(kGradY, MakeValue(grad_y)); } +void MinimumGrad::set_grad_y(const bool grad_y) { (void)this->AddAttr(kGradY, MakeValue(grad_y)); } bool MinimumGrad::get_grad_x() const { auto value_ptr = GetAttr(kGradX); diff --git a/mindspore/core/ops/grad/pooling_grad.cc b/mindspore/core/ops/grad/pooling_grad.cc index 2e9c12e2cec..a9664ca16fc 100644 --- a/mindspore/core/ops/grad/pooling_grad.cc +++ b/mindspore/core/ops/grad/pooling_grad.cc @@ -43,14 +43,14 @@ PoolMode PoolingGrad::get_pool_mode() const { return PoolMode(GetValue(value_ptr)); } -void PoolingGrad::set_window(const std::vector &window) { this->AddAttr(kWindow, MakeValue(window)); } +void PoolingGrad::set_window(const std::vector &window) { (void)this->AddAttr(kWindow, MakeValue(window)); } std::vector PoolingGrad::get_window() const { auto value_ptr = GetAttr(kWindow); return GetValue>(value_ptr); } -void PoolingGrad::set_stride(const std::vector &stride) { this->AddAttr(kStride, MakeValue(stride)); } +void PoolingGrad::set_stride(const std::vector &stride) { (void)this->AddAttr(kStride, MakeValue(stride)); } std::vector PoolingGrad::get_stride() const { auto value_ptr = GetAttr(kStride); @@ -94,7 +94,7 @@ Format PoolingGrad::get_format() const { return Format(GetValue(value_ptr)); } -void PoolingGrad::set_global(const bool global) { this->AddAttr(kGlobal, MakeValue(global)); } +void PoolingGrad::set_global(const bool global) { (void)this->AddAttr(kGlobal, MakeValue(global)); } bool PoolingGrad::get_global() const { auto value_ptr = GetAttr(kGlobal); diff --git a/mindspore/core/ops/grad/power_grad.cc b/mindspore/core/ops/grad/power_grad.cc index 97fdfbaa86d..39738aee894 100644 --- a/mindspore/core/ops/grad/power_grad.cc +++ b/mindspore/core/ops/grad/power_grad.cc @@ -26,19 +26,19 @@ namespace mindspore { namespace ops { -void PowerGrad::set_power(const float power) { this->AddAttr(kPower, MakeValue(power)); } +void PowerGrad::set_power(const float power) { (void)this->AddAttr(kPower, MakeValue(power)); } float PowerGrad::get_power() const { auto value_ptr = GetAttr(kPower); return GetValue(value_ptr); } -void PowerGrad::set_scale(const float scale) { this->AddAttr(kScale, MakeValue(scale)); } +void PowerGrad::set_scale(const float scale) { (void)this->AddAttr(kScale, MakeValue(scale)); } float PowerGrad::get_scale() const { auto value_ptr = GetAttr(kScale); return GetValue(value_ptr); } -void PowerGrad::set_shift(const float shift) { this->AddAttr(kShift, MakeValue(shift)); } +void PowerGrad::set_shift(const float shift) { (void)this->AddAttr(kShift, MakeValue(shift)); } float PowerGrad::get_shift() const { auto value_ptr = GetAttr(kShift); return GetValue(value_ptr); diff --git a/mindspore/core/ops/grad/resize_grad.cc b/mindspore/core/ops/grad/resize_grad.cc index 39c36631c83..c4165a2f75b 100644 --- a/mindspore/core/ops/grad/resize_grad.cc +++ b/mindspore/core/ops/grad/resize_grad.cc @@ -32,10 +32,12 @@ void ResizeGrad::Init(const ResizeMethod method, const bool align_corners) { void ResizeGrad::set_method(const ResizeMethod method) { auto swi = (int64_t)method; - this->AddAttr(kMethod, MakeValue(swi)); + (void)this->AddAttr(kMethod, MakeValue(swi)); } -void ResizeGrad::set_align_corners(const bool align_corners) { this->AddAttr(kAlignCorners, MakeValue(align_corners)); } +void ResizeGrad::set_align_corners(const bool align_corners) { + (void)this->AddAttr(kAlignCorners, MakeValue(align_corners)); +} ResizeMethod ResizeGrad::get_method() const { auto value_ptr = GetAttr(kMethod); diff --git a/mindspore/core/ops/grad/sigmoid_cross_entropy_with_logits_grad.cc b/mindspore/core/ops/grad/sigmoid_cross_entropy_with_logits_grad.cc index 35994473d93..61824e4435b 100644 --- a/mindspore/core/ops/grad/sigmoid_cross_entropy_with_logits_grad.cc +++ b/mindspore/core/ops/grad/sigmoid_cross_entropy_with_logits_grad.cc @@ -31,15 +31,15 @@ AbstractBasePtr SigmoidCrossEntropyWithLogitsGradInfer(const abstract::AnalysisE const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("sigmoid_cross_entropy_with_logits_grad_infer", SizeToLong(input_args.size()), - kEqual, 3, prim_name); + (void)CheckAndConvertUtils::CheckInteger("sigmoid_cross_entropy_with_logits_grad_infer", + SizeToLong(input_args.size()), kEqual, 3, prim_name); // Infer Shape auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; auto y_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape]; auto dout_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[2]->BuildShape())[kShape]; - CheckAndConvertUtils::Check("x_shape", x_shape, kEqual, "y_shape", y_shape, prim_name, TypeError); - CheckAndConvertUtils::Check("x_shape", x_shape, kEqual, "dout_shape", dout_shape, prim_name, TypeError); + (void)CheckAndConvertUtils::Check("x_shape", x_shape, kEqual, "y_shape", y_shape, prim_name, TypeError); + (void)CheckAndConvertUtils::Check("x_shape", x_shape, kEqual, "dout_shape", dout_shape, prim_name, TypeError); // Infer type const std::set valid_types = {kBool, kInt, kInt8, kInt16, kInt32, kInt64, kUInt, kUInt8, diff --git a/mindspore/core/ops/grad/smooth_l1_loss_grad.cc b/mindspore/core/ops/grad/smooth_l1_loss_grad.cc index 43405433dff..178068f2242 100644 --- a/mindspore/core/ops/grad/smooth_l1_loss_grad.cc +++ b/mindspore/core/ops/grad/smooth_l1_loss_grad.cc @@ -26,7 +26,7 @@ namespace mindspore { namespace ops { void SmoothL1LossGrad::Init(const float beta) { this->set_beta(beta); } -void SmoothL1LossGrad::set_beta(const float beta) { this->AddAttr(kBeta, MakeValue(beta)); } +void SmoothL1LossGrad::set_beta(const float beta) { (void)this->AddAttr(kBeta, MakeValue(beta)); } float SmoothL1LossGrad::get_beta() const { auto value_ptr = this->GetAttr(kBeta); @@ -37,7 +37,8 @@ AbstractBasePtr SmoothL1LossGradInfer(const abstract::AnalysisEnginePtr &, const const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("smooth_l1_loss_grad_infer", input_args.size(), kEqual, 3, prim_name); + (void)CheckAndConvertUtils::CheckInteger("smooth_l1_loss_grad_infer", SizeToLong(input_args.size()), kEqual, 3, + prim_name); // Infer shape auto prediction = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; diff --git a/mindspore/core/ops/grad/strided_slice_grad.cc b/mindspore/core/ops/grad/strided_slice_grad.cc index 738c9d07ab1..0ea58632337 100644 --- a/mindspore/core/ops/grad/strided_slice_grad.cc +++ b/mindspore/core/ops/grad/strided_slice_grad.cc @@ -33,46 +33,46 @@ void StridedSliceGrad::Init(int64_t begin_mask, int64_t end_mask, int64_t ellips } void StridedSliceGrad::set_begin_mask(int64_t begin_mask) { - CheckAndConvertUtils::CheckInteger(kBeginMask, begin_mask, kGreaterEqual, 0, this->name()); - this->AddAttr(kBeginMask, MakeValue(begin_mask)); + (void)CheckAndConvertUtils::CheckInteger(kBeginMask, begin_mask, kGreaterEqual, 0, this->name()); + (void)this->AddAttr(kBeginMask, MakeValue(begin_mask)); } int64_t StridedSliceGrad::get_begin_mask() const { auto value_ptr = GetAttr(kBeginMask); return GetValue(value_ptr); } void StridedSliceGrad::set_end_mask(int64_t end_mask) { - CheckAndConvertUtils::CheckInteger(kEndMask, end_mask, kGreaterEqual, 0, this->name()); - this->AddAttr(kEndMask, MakeValue(end_mask)); + (void)CheckAndConvertUtils::CheckInteger(kEndMask, end_mask, kGreaterEqual, 0, this->name()); + (void)this->AddAttr(kEndMask, MakeValue(end_mask)); } int64_t StridedSliceGrad::get_end_mask() const { auto value_ptr = GetAttr(kEndMask); return GetValue(value_ptr); } void StridedSliceGrad::set_ellipsis_mask(int64_t ellipsis_mask) { - CheckAndConvertUtils::CheckInteger(kEllipsisMask, ellipsis_mask, kGreaterEqual, 0, this->name()); + (void)CheckAndConvertUtils::CheckInteger(kEllipsisMask, ellipsis_mask, kGreaterEqual, 0, this->name()); std::bitset bs(ellipsis_mask); std::ostringstream buffer; if (bs.count() > 1) { buffer << "For" << this->name() << ", only support one ellipsis in the index, but got " << this->get_end_mask(); MS_EXCEPTION(ValueError) << buffer.str(); } - this->AddAttr(kEllipsisMask, MakeValue(ellipsis_mask)); + (void)this->AddAttr(kEllipsisMask, MakeValue(ellipsis_mask)); } int64_t StridedSliceGrad::get_ellipsis_mask() const { auto value_ptr = GetAttr(kEllipsisMask); return GetValue(value_ptr); } void StridedSliceGrad::set_new_axis_mask(int64_t new_axis_mask) { - CheckAndConvertUtils::CheckInteger(kNewAxisMask, new_axis_mask, kGreaterEqual, 0, this->name()); - this->AddAttr(kNewAxisMask, MakeValue(new_axis_mask)); + (void)CheckAndConvertUtils::CheckInteger(kNewAxisMask, new_axis_mask, kGreaterEqual, 0, this->name()); + (void)this->AddAttr(kNewAxisMask, MakeValue(new_axis_mask)); } int64_t StridedSliceGrad::get_new_axis_mask() const { auto value_ptr = GetAttr(kNewAxisMask); return GetValue(value_ptr); } void StridedSliceGrad::set_shrink_axis_mask(int64_t shrink_axis_mask) { - CheckAndConvertUtils::CheckInteger(kShrinkAxisMask, shrink_axis_mask, kGreaterEqual, 0, this->name()); - this->AddAttr(kShrinkAxisMask, MakeValue(shrink_axis_mask)); + (void)CheckAndConvertUtils::CheckInteger(kShrinkAxisMask, shrink_axis_mask, kGreaterEqual, 0, this->name()); + (void)this->AddAttr(kShrinkAxisMask, MakeValue(shrink_axis_mask)); } int64_t StridedSliceGrad::get_shrink_axis_mask() const { auto value_ptr = GetAttr(kShrinkAxisMask); diff --git a/mindspore/core/ops/gru.cc b/mindspore/core/ops/gru.cc index da2a84718c5..4b0f4752d2e 100644 --- a/mindspore/core/ops/gru.cc +++ b/mindspore/core/ops/gru.cc @@ -33,33 +33,33 @@ void GRU::Init(bool bidirectional, int64_t cell_depth, float keep_prob, float ce this->set_gate_order(gate_order); } -void GRU::set_bidirectional(bool bidirectional) { AddAttr(kBidirectional, MakeValue(bidirectional)); } +void GRU::set_bidirectional(bool bidirectional) { (void)AddAttr(kBidirectional, MakeValue(bidirectional)); } -void GRU::set_cell_depth(int64_t cell_depth) { AddAttr(kCellDepth, MakeValue(cell_depth)); } +void GRU::set_cell_depth(int64_t cell_depth) { (void)AddAttr(kCellDepth, MakeValue(cell_depth)); } -void GRU::set_keep_prob(float keep_prob) { AddAttr(kKeepProb, MakeValue(keep_prob)); } +void GRU::set_keep_prob(float keep_prob) { (void)AddAttr(kKeepProb, MakeValue(keep_prob)); } -void GRU::set_cell_clip(float cell_clip) { AddAttr(kCellClip, MakeValue(cell_clip)); } +void GRU::set_cell_clip(float cell_clip) { (void)AddAttr(kCellClip, MakeValue(cell_clip)); } void GRU::set_num_proj(int64_t num_proj) { - CheckAndConvertUtils::CheckInteger(kNumProj, num_proj, kGreaterThan, 0, this->name()); - AddAttr(kNumProj, MakeValue(num_proj)); + (void)CheckAndConvertUtils::CheckInteger(kNumProj, num_proj, kGreaterThan, 0, this->name()); + (void)AddAttr(kNumProj, MakeValue(num_proj)); } -void GRU::set_time_major(bool time_major) { AddAttr(kTimeMajor, MakeValue(time_major)); } +void GRU::set_time_major(bool time_major) { (void)AddAttr(kTimeMajor, MakeValue(time_major)); } -void GRU::set_reset_after(bool reset_after) { AddAttr(kResetAfter, MakeValue(reset_after)); } +void GRU::set_reset_after(bool reset_after) { (void)AddAttr(kResetAfter, MakeValue(reset_after)); } -void GRU::set_is_training(bool is_training) { AddAttr(kIsTraining, MakeValue(is_training)); } +void GRU::set_is_training(bool is_training) { (void)AddAttr(kIsTraining, MakeValue(is_training)); } void GRU::set_activation(ActivationType activation) { int64_t swi = activation; - AddAttr(kActivation, MakeValue(swi)); + (void)AddAttr(kActivation, MakeValue(swi)); } void GRU::set_gate_order(GateOrderMode gate_order) { int64_t swi = gate_order; - AddAttr(kGateOrder, MakeValue(swi)); + (void)AddAttr(kGateOrder, MakeValue(swi)); } bool GRU::get_bidirectional() const { diff --git a/mindspore/core/ops/layer_norm.cc b/mindspore/core/ops/layer_norm.cc index 98ee4f96baa..f5590647fad 100644 --- a/mindspore/core/ops/layer_norm.cc +++ b/mindspore/core/ops/layer_norm.cc @@ -99,13 +99,15 @@ AbstractBasePtr LayerNormInfer(const abstract::AnalysisEnginePtr &, const Primit auto input_min_shape = input_shape->min_shape(); auto input_max_shape = input_shape->max_shape(); if (input_min_shape.empty() || input_max_shape.empty()) { - shapes_list.emplace_back(std::make_shared(mean_var_shape)); - shapes_list.emplace_back(std::make_shared(mean_var_shape)); + (void)shapes_list.emplace_back(std::make_shared(mean_var_shape)); + (void)shapes_list.emplace_back(std::make_shared(mean_var_shape)); } else { auto mean_var_shape_min = CalLayerNormMeanAndVarShape(begin_norm_axis, input_min_shape); auto mean_var_shape_max = CalLayerNormMeanAndVarShape(begin_norm_axis, input_min_shape); - shapes_list.emplace_back(std::make_shared(mean_var_shape, mean_var_shape_min, mean_var_shape_max)); - shapes_list.emplace_back(std::make_shared(mean_var_shape, mean_var_shape_min, mean_var_shape_max)); + (void)shapes_list.emplace_back( + std::make_shared(mean_var_shape, mean_var_shape_min, mean_var_shape_max)); + (void)shapes_list.emplace_back( + std::make_shared(mean_var_shape, mean_var_shape_min, mean_var_shape_max)); } return abstract::MakeAbstract(std::make_shared(shapes_list), std::make_shared(types_list)); diff --git a/mindspore/core/ops/leaky_relu.cc b/mindspore/core/ops/leaky_relu.cc index 54f8fb78b2e..b5750329844 100644 --- a/mindspore/core/ops/leaky_relu.cc +++ b/mindspore/core/ops/leaky_relu.cc @@ -29,7 +29,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(prim); - CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, prim->name()); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 1, prim->name()); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -48,7 +48,7 @@ float LeakyRelu::get_negative_slope() const { return GetValue(GetAttr(kNe AbstractBasePtr LeakyReluInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args) { return std::make_shared(InferType(primitive, input_args), - InferShape(primitive, input_args)->shape()); + InferShape(primitive, input_args)); } REGISTER_PRIMITIVE_C(kNameLeakyRelu, LeakyRelu); } // namespace ops diff --git a/mindspore/core/ops/log.cc b/mindspore/core/ops/log.cc index 21c0cd23fde..bc65b2740d0 100644 --- a/mindspore/core/ops/log.cc +++ b/mindspore/core/ops/log.cc @@ -30,7 +30,7 @@ namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 1, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -47,7 +47,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(prim); auto op_name = prim->name(); - CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, op_name); + (void)CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 1, op_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/logical_not.cc b/mindspore/core/ops/logical_not.cc index b64e262e275..cc215908fbc 100644 --- a/mindspore/core/ops/logical_not.cc +++ b/mindspore/core/ops/logical_not.cc @@ -14,9 +14,10 @@ * limitations under the License. */ +#include "ops/logical_not.h" + #include -#include "ops/logical_not.h" #include "ops/op_utils.h" namespace mindspore { diff --git a/mindspore/core/ops/lrn.cc b/mindspore/core/ops/lrn.cc index f1d1e1a3db1..c498d4666bf 100644 --- a/mindspore/core/ops/lrn.cc +++ b/mindspore/core/ops/lrn.cc @@ -27,7 +27,7 @@ namespace mindspore { namespace ops { void LRN::set_depth_radius(const int64_t depth_radius) { - CheckAndConvertUtils::CheckInteger(kDepthRadius, depth_radius, kGreaterEqual, 0, this->name()); + (void)CheckAndConvertUtils::CheckInteger(kDepthRadius, depth_radius, kGreaterEqual, 0, this->name()); this->AddAttr(kDepthRadius, MakeValue(depth_radius)); } @@ -79,7 +79,8 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vectorname(); auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; - CheckAndConvertUtils::CheckInteger("input shape", in_shape.size(), kEqual, 4, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input shape", SizeToLong(in_shape.size()), kEqual, 4, prim_name); + return std::make_shared(in_shape); } diff --git a/mindspore/core/ops/lsh_projection.cc b/mindspore/core/ops/lsh_projection.cc index 6d7d101e05f..3f3670ec64c 100644 --- a/mindspore/core/ops/lsh_projection.cc +++ b/mindspore/core/ops/lsh_projection.cc @@ -34,14 +34,14 @@ AbstractBasePtr LshProjectionInfer(const abstract::AnalysisEnginePtr &, const Pr auto op_name = primitive->name(); auto input0 = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; auto input1 = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape]; - CheckAndConvertUtils::CheckInteger("input0_shape", SizeToLong(input0.size()), kEqual, 2, op_name); - CheckAndConvertUtils::CheckInteger("input0_shape_dimen_1", input0[1], kLessEqual, 32, op_name); - CheckAndConvertUtils::CheckInteger("input1_shape", SizeToLong(input1.size()), kGreaterEqual, 1, op_name); + (void)CheckAndConvertUtils::CheckInteger("input0_shape", SizeToLong(input0.size()), kEqual, 2, op_name); + (void)CheckAndConvertUtils::CheckInteger("input0_shape_dimen_1", input0[1], kLessEqual, 32, op_name); + (void)CheckAndConvertUtils::CheckInteger("input1_shape", SizeToLong(input1.size()), kGreaterEqual, 1, op_name); if (input_args.size() == 3) { auto input2 = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[2]->BuildShape())[kShape]; - CheckAndConvertUtils::CheckInteger("input2_shape", SizeToLong(input2.size()), kEqual, 1, op_name); - CheckAndConvertUtils::CheckInteger("input2_shape_dimen_0", input2[0], kEqual, input1[0], op_name); + (void)CheckAndConvertUtils::CheckInteger("input2_shape", SizeToLong(input2.size()), kEqual, 1, op_name); + (void)CheckAndConvertUtils::CheckInteger("input2_shape_dimen_0", input2[0], kEqual, input1[0], op_name); } std::vector out_shape; diff --git a/mindspore/core/ops/lstm.cc b/mindspore/core/ops/lstm.cc index 720defdb88f..6bd09f61c61 100644 --- a/mindspore/core/ops/lstm.cc +++ b/mindspore/core/ops/lstm.cc @@ -31,7 +31,7 @@ AbstractBasePtr LstmInfer(const PrimitivePtr &primitive, const std::vectorname(); - CheckAndConvertUtils::CheckInteger("lstm_prim_infer", SizeToLong(input_args.size()), kEqual, 4, prim_name); + (void)CheckAndConvertUtils::CheckInteger("lstm_prim_infer", SizeToLong(input_args.size()), kEqual, 4, prim_name); auto x_input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; auto h_input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape]; auto c_input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[2]->BuildShape())[kShape]; @@ -47,9 +47,10 @@ AbstractBasePtr LstmInfer(const PrimitivePtr &primitive, const std::vector(primitive->GetAttr(kNumDirections)); int64_t hidden_size = GetValue(primitive->GetAttr(kHidden_size)); int64_t input_size = input_x_size; - CheckAndConvertUtils::CheckInteger("h_shape[0]", h_input_shape[0], kEqual, num_layers * num_directions, prim_name); - CheckAndConvertUtils::CheckInteger("h_shape[1]", h_input_shape[1], kEqual, x_input_shape[1], prim_name); - CheckAndConvertUtils::CheckInteger("h_shape[2]", h_input_shape[2], kEqual, hidden_size, prim_name); + (void)CheckAndConvertUtils::CheckInteger("h_shape[0]", h_input_shape[0], kEqual, num_layers * num_directions, + prim_name); + (void)CheckAndConvertUtils::CheckInteger("h_shape[1]", h_input_shape[1], kEqual, x_input_shape[1], prim_name); + (void)CheckAndConvertUtils::CheckInteger("h_shape[2]", h_input_shape[2], kEqual, hidden_size, prim_name); std::vector y_shape = {x_input_shape[0], x_input_shape[1], hidden_size * num_directions}; @@ -74,7 +75,7 @@ AbstractBasePtr LstmInfer(const PrimitivePtr &primitive, const std::vector x_shape = {x_input_shape}; - // std::vector h_shape = {h_input_shape}; + std::vector c_shape = {c_input_shape}; std::vector reverse_shape = {current_offset, 1}; std::vector state_shape = {1, 1}; diff --git a/mindspore/core/ops/mat_mul.cc b/mindspore/core/ops/mat_mul.cc index 4ae0776f461..1ca4a83ead3 100644 --- a/mindspore/core/ops/mat_mul.cc +++ b/mindspore/core/ops/mat_mul.cc @@ -63,8 +63,8 @@ abstract::ShapePtr MatMulInferShape(const PrimitivePtr &primitive, const std::ve ShapeVector x_max_shape = x_shape_map[kMaxShape]; ShapeVector y_min_shape = y_shape_map[kMinShape]; ShapeVector y_max_shape = y_shape_map[kMaxShape]; - (void)CheckAndConvertUtils::CheckMinMaxShape(x_shp, &x_min_shape, &x_max_shape); - (void)CheckAndConvertUtils::CheckMinMaxShape(y_shp, &y_min_shape, &y_max_shape); + CheckAndConvertUtils::CheckMinMaxShape(x_shp, &x_min_shape, &x_max_shape); + CheckAndConvertUtils::CheckMinMaxShape(y_shp, &y_min_shape, &y_max_shape); // Additional check for dynamic shape // Last infer will be real shape values bool x_not_dyn = @@ -98,8 +98,8 @@ TypePtr MatMulInferType(const PrimitivePtr &prim, const std::vector valid_types = {kInt8, kInt16, kInt32, kInt64, kFloat16, kFloat32, kFloat64}; std::map types; - (void)types.emplace("x", input_args[0]->BuildType()); - (void)types.emplace("w", input_args[1]->BuildType()); + types.emplace("x", input_args[0]->BuildType()); + types.emplace("w", input_args[1]->BuildType()); return CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); } } // namespace @@ -125,7 +125,8 @@ bool MatMul::get_transpose_b() const { AbstractBasePtr MatMulInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args) { - CheckAndConvertUtils::CheckInteger("MatMul infer", input_args.size(), kGreaterEqual, 2, primitive->name()); + (void)CheckAndConvertUtils::CheckInteger("MatMul infer", SizeToLong(input_args.size()), kGreaterEqual, 2, + primitive->name()); return abstract::MakeAbstract(MatMulInferShape(primitive, input_args), MatMulInferType(primitive, input_args)); } // Add diff --git a/mindspore/core/ops/max_pool.cc b/mindspore/core/ops/max_pool.cc index bf4999a2450..a3f9c88a97f 100644 --- a/mindspore/core/ops/max_pool.cc +++ b/mindspore/core/ops/max_pool.cc @@ -87,7 +87,8 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector>(primitive->GetAttr(kKernelSize)); auto pad_mode_value = (primitive->GetAttr(kPadMode)); auto pad_mode = PadMode(GetValue(pad_mode_value)); diff --git a/mindspore/core/ops/merge.cc b/mindspore/core/ops/merge.cc index b5b760354cf..97c759a8367 100644 --- a/mindspore/core/ops/merge.cc +++ b/mindspore/core/ops/merge.cc @@ -36,7 +36,7 @@ AbstractBasePtr MergeInfer(const abstract::AnalysisEnginePtr &, const PrimitiveP args.insert({"input[" + std::to_string(i) + "]", inputs_type[i]}); } std::set template_type = common_valid_types; - (void)template_type.emplace(kBool); + template_type.emplace(kBool); auto infered_type = CheckAndConvertUtils::CheckScalarOrTensorTypesSame(args, template_type, op_name); std::vector in_shape0 = inputs_shape[0]->cast()->shape(); diff --git a/mindspore/core/ops/mfcc.cc b/mindspore/core/ops/mfcc.cc index b697ef66577..9b9f2d324e0 100644 --- a/mindspore/core/ops/mfcc.cc +++ b/mindspore/core/ops/mfcc.cc @@ -27,8 +27,8 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vectorname(); auto first_input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; auto second_input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape]; - CheckAndConvertUtils::CheckInteger("first input rank", SizeToLong(first_input_shape.size()), kEqual, 3, prim_name); - CheckAndConvertUtils::CheckInteger("second input rank", SizeToLong(second_input_shape.size()), kEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input 0 rank", SizeToLong(first_input_shape.size()), kEqual, 3, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input 1 rank", SizeToLong(second_input_shape.size()), kEqual, 1, prim_name); std::vector out_shape = {first_input_shape[0], first_input_shape[1], GetValue(primitive->GetAttr(kDctCoeffNum))}; return std::make_shared(out_shape); diff --git a/mindspore/core/ops/minimum.cc b/mindspore/core/ops/minimum.cc index 2cc820607e6..7637879de22 100644 --- a/mindspore/core/ops/minimum.cc +++ b/mindspore/core/ops/minimum.cc @@ -20,9 +20,9 @@ #include #include #include +#include "abstract/primitive_infer_map.h" #include "ops/op_utils.h" #include "utils/check_convert_utils.h" -#include "abstract/primitive_infer_map.h" namespace mindspore { namespace ops { @@ -35,7 +35,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(prim); - CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, prim->name()); + CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim->name()); if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) { MS_LOG(EXCEPTION) << "nullptr"; } diff --git a/mindspore/core/ops/not_equal.cc b/mindspore/core/ops/not_equal.cc index e2cb5cdac66..d88b765da13 100644 --- a/mindspore/core/ops/not_equal.cc +++ b/mindspore/core/ops/not_equal.cc @@ -30,7 +30,7 @@ namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto op_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, op_name); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, op_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -40,7 +40,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(prim); auto op_name = prim->name(); - CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kGreaterEqual, 2, op_name); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kGreaterEqual, 2, op_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/one_hot.cc b/mindspore/core/ops/one_hot.cc index 327ba44f160..620d891bd47 100644 --- a/mindspore/core/ops/one_hot.cc +++ b/mindspore/core/ops/one_hot.cc @@ -23,7 +23,7 @@ namespace mindspore { namespace ops { void OneHot::Init(const int64_t axis) { this->set_axis(axis); } -void OneHot::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } +void OneHot::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); } int64_t OneHot::get_axis() const { return GetValue(GetAttr(kAxis)); } namespace { @@ -31,7 +31,7 @@ abstract::ShapePtr OneHotInferShape(const PrimitivePtr &primitive, const std::ve MS_EXCEPTION_IF_NULL(primitive); auto op_name = primitive->name(); int64_t axis = GetValue(primitive->GetAttr(kAxis)); - CheckAndConvertUtils::CheckInteger("one_hot infer", input_args.size(), kEqual, 4, op_name); + (void)CheckAndConvertUtils::CheckInteger("one_hot infer", SizeToLong(input_args.size()), kEqual, 4, op_name); MS_EXCEPTION_IF_NULL(input_args[0]); auto shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape()); auto in_shape = shape_map[kShape]; @@ -40,7 +40,7 @@ abstract::ShapePtr OneHotInferShape(const PrimitivePtr &primitive, const std::ve CheckAndConvertUtils::CheckInRange("axis", axis, kIncludeBoth, {-1, SizeToLong(in_shape.size())}, op_name); MS_EXCEPTION_IF_NULL(input_args[1]); auto depth_val = GetValue(input_args[1]->BuildValue()); - CheckAndConvertUtils::CheckInteger("depth", depth_val, kGreaterEqual, 0, op_name); + (void)CheckAndConvertUtils::CheckInteger("depth", depth_val, kGreaterEqual, 0, op_name); if (min_shape.size() == 0 || max_shape.size() == 0) { if (axis >= 0) { in_shape.insert(in_shape.begin() + axis, depth_val); diff --git a/mindspore/core/ops/op_utils.cc b/mindspore/core/ops/op_utils.cc index b8f0f9d8fe2..05abcf43b4b 100644 --- a/mindspore/core/ops/op_utils.cc +++ b/mindspore/core/ops/op_utils.cc @@ -54,7 +54,7 @@ std::vector CalBroadCastShape(std::vector x_shape, std::vector } abstract::ShapePtr BroadCastInferShape(const std::string &op_name, const std::vector &input_args) { MS_LOG(INFO) << "Do infer shape for op " << op_name; - CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kGreaterEqual, 2, op_name); + CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kGreaterEqual, 2, op_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/pack.cc b/mindspore/core/ops/pack.cc index af42e8757f8..3e97de5a0bd 100644 --- a/mindspore/core/ops/pack.cc +++ b/mindspore/core/ops/pack.cc @@ -21,12 +21,11 @@ namespace ops { namespace { std::vector _get_pack_shape(std::vector x_shapes, std::vector x_types, int64_t axis, const std::string &name) { - CheckAndConvertUtils::CheckInteger("len of input_x", (int64_t)x_shapes.size(), kGreaterEqual, 1, name); - CheckAndConvertUtils::CheckSubClass("input_x[0]", x_types[0], {TypeIdToType(kObjectTypeTensorType)}, name); + (void)CheckAndConvertUtils::CheckInteger("len of input_x", (int64_t)x_shapes.size(), kGreaterEqual, 1, name); + (void)CheckAndConvertUtils::CheckSubClass("input_x[0]", x_types[0], {TypeIdToType(kObjectTypeTensorType)}, name); auto output_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(x_shapes[0])[kShape]; int64_t rank_base = SizeToLong(output_shape.size()); int64_t N = SizeToLong(x_shapes.size()); - // CheckAndConvertUtils::CheckInRange("axis", axis, kIncludeBoth, {-rank_base-1, rank_base}, name); if (axis < 0) { axis = axis + rank_base + 1; } diff --git a/mindspore/core/ops/pad.cc b/mindspore/core/ops/pad.cc index d43ea8f4553..33c5e674fcd 100644 --- a/mindspore/core/ops/pad.cc +++ b/mindspore/core/ops/pad.cc @@ -38,7 +38,8 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector out_shape; for (int64_t i = 0; i < int64_t(paddings_attr.size() / 2); i++) { - out_shape.emplace_back(x_shape[LongToSize(i)] + paddings_attr[LongToSize(i)][0] + paddings_attr[LongToSize(i)][1]); + (void)out_shape.emplace_back(x_shape[LongToSize(i)] + paddings_attr[LongToSize(i)][0] + + paddings_attr[LongToSize(i)][1]); } return std::make_shared(out_shape); } diff --git a/mindspore/core/ops/prelu.cc b/mindspore/core/ops/prelu.cc index b87abe9d71b..a724be22ea1 100644 --- a/mindspore/core/ops/prelu.cc +++ b/mindspore/core/ops/prelu.cc @@ -28,8 +28,8 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(prim); - CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim->name()); + (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, 2, prim->name()); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/primitive_c.h b/mindspore/core/ops/primitive_c.h index b25243767f3..ad7ec8c1a65 100644 --- a/mindspore/core/ops/primitive_c.h +++ b/mindspore/core/ops/primitive_c.h @@ -55,6 +55,9 @@ class OpPrimCRegisterHelper { OpPrimCRegister::GetInstance().SetPrimCMap(kname, fn); } ~OpPrimCRegisterHelper() = default; + + private: + int id_{0}; }; #define REGISTER_PRIMITIVE_C(kname, primc) \ diff --git a/mindspore/core/ops/prior_box.cc b/mindspore/core/ops/prior_box.cc index 2ec1f078f1d..3c26756303d 100644 --- a/mindspore/core/ops/prior_box.cc +++ b/mindspore/core/ops/prior_box.cc @@ -60,14 +60,14 @@ int64_t PriorBox::get_image_size_h() const { return GetValue(value_ptr); } -void PriorBox::set_step_w(const float step_w) { this->AddAttr(kStepW, MakeValue(step_w)); } +void PriorBox::set_step_w(const float step_w) { (void)this->AddAttr(kStepW, MakeValue(step_w)); } float PriorBox::get_step_w() const { auto value_ptr = GetAttr(kStepW); return GetValue(value_ptr); } -void PriorBox::set_step_h(const float step_h) { this->AddAttr(kStepH, MakeValue(step_h)); } +void PriorBox::set_step_h(const float step_h) { (void)this->AddAttr(kStepH, MakeValue(step_h)); } float PriorBox::get_step_h() const { auto value_ptr = GetAttr(kStepH); @@ -115,14 +115,14 @@ AbstractBasePtr PriorBoxInfer(const abstract::AnalysisEnginePtr &, const Primiti MS_EXCEPTION_IF_NULL(input_args[0]); std::vector different_aspect_ratios{1.0f}; auto aspect_ratios = GetValue>(primitive->GetAttr(kAspectRatios)); - for (int64_t i = 0; i < (int64_t)aspect_ratios.size(); i++) { + for (size_t i = 0; i < aspect_ratios.size(); i++) { float ratio = aspect_ratios[i]; bool exist = std::any_of(different_aspect_ratios.begin(), different_aspect_ratios.end(), [&](float v) { return abs(ratio - v) < 1e-6; }); if (!exist) { - different_aspect_ratios.emplace_back(ratio); + (void)different_aspect_ratios.emplace_back(ratio); if (GetValue(primitive->GetAttr(kFlip))) { - different_aspect_ratios.emplace_back(1.0f / ratio); + (void)different_aspect_ratios.emplace_back(1.0f / ratio); } } } diff --git a/mindspore/core/ops/range.cc b/mindspore/core/ops/range.cc index ca56e34ee91..642760a0b83 100644 --- a/mindspore/core/ops/range.cc +++ b/mindspore/core/ops/range.cc @@ -25,25 +25,25 @@ namespace mindspore { namespace ops { -void Range::set_d_type(const int64_t d_type) { this->AddAttr(kDType, MakeValue(d_type)); } +void Range::set_d_type(const int64_t d_type) { (void)this->AddAttr(kDType, MakeValue(d_type)); } int64_t Range::get_d_type() const { auto value_ptr = GetAttr(kDType); return GetValue(value_ptr); } -void Range::set_start(const int64_t start) { this->AddAttr(kStart, MakeValue(start)); } +void Range::set_start(const int64_t start) { (void)this->AddAttr(kStart, MakeValue(start)); } int64_t Range::get_start() const { return GetValue(GetAttr(kStart)); } -void Range::set_limit(const int64_t limit) { this->AddAttr(kLimit, MakeValue(limit)); } +void Range::set_limit(const int64_t limit) { (void)this->AddAttr(kLimit, MakeValue(limit)); } int64_t Range::get_limit() const { auto value_ptr = GetAttr(kLimit); return GetValue(value_ptr); } -void Range::set_delta(const int64_t delta) { this->AddAttr(kDelta, MakeValue(delta)); } +void Range::set_delta(const int64_t delta) { (void)this->AddAttr(kDelta, MakeValue(delta)); } int64_t Range::get_delta() const { auto value_ptr = GetAttr(kDelta); diff --git a/mindspore/core/ops/real_div.cc b/mindspore/core/ops/real_div.cc index 4773f4ffbad..642d152a82f 100644 --- a/mindspore/core/ops/real_div.cc +++ b/mindspore/core/ops/real_div.cc @@ -29,7 +29,7 @@ namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, 2, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, 2, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -42,7 +42,7 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector & MS_EXCEPTION_IF_NULL(item); } auto op_name = prim->name(); - CheckAndConvertUtils::CheckInteger("RealDiv infer", input_args.size(), kGreaterEqual, 2, op_name); + (void)CheckAndConvertUtils::CheckInteger("RealDiv infer", SizeToLong(input_args.size()), kGreaterEqual, 2, op_name); std::map types; types.emplace("x", input_args[0]->BuildType()); types.emplace("y", input_args[1]->BuildType()); diff --git a/mindspore/core/ops/reciprocal.cc b/mindspore/core/ops/reciprocal.cc index 515f8c406a6..7887efbdeac 100644 --- a/mindspore/core/ops/reciprocal.cc +++ b/mindspore/core/ops/reciprocal.cc @@ -30,7 +30,7 @@ namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 1, prim_name); + CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, 1, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/reduce.cc b/mindspore/core/ops/reduce.cc index c19c733bdf5..331dcd7ae3a 100644 --- a/mindspore/core/ops/reduce.cc +++ b/mindspore/core/ops/reduce.cc @@ -35,7 +35,7 @@ void reduce_one_axis(const int64_t one_axis, const int64_t dim, std::set infer_shape_reduce(std::vector input_x_shape, const ValuePtr axis_value, const bool keep_dims) { - int64_t dim = input_x_shape.size(); + int64_t dim = SizeToLong(input_x_shape.size()); std::set axis_reduce; if (axis_value == nullptr) { std::vector vec; @@ -48,19 +48,19 @@ std::vector infer_shape_reduce(std::vector input_x_shape, cons if (axis_value_elem.size() == 1) { reduce_one_axis(axis_value_elem[0], dim, axis_reduce); } else { - int64_t size = axis_value_elem.size(); - for (int64_t i = 0; i < size; i++) { - reduce_one_axis(axis_value_elem[LongToSize(i)], dim, axis_reduce); + size_t size = axis_value_elem.size(); + for (size_t i = 0; i < size; i++) { + reduce_one_axis(axis_value_elem[i], dim, axis_reduce); } } std::vector out_shape; for (int64_t i = 0; i < dim; i++) { if (axis_reduce.find(i) != axis_reduce.end()) { if (keep_dims) { - out_shape.emplace_back(1); + (void)out_shape.emplace_back(1); } } else { - out_shape.emplace_back(input_x_shape[LongToSize(i)]); + (void)out_shape.emplace_back(input_x_shape[LongToSize(i)]); } } return out_shape; diff --git a/mindspore/core/ops/relu.cc b/mindspore/core/ops/relu.cc index a24ae7c7615..743939ab491 100644 --- a/mindspore/core/ops/relu.cc +++ b/mindspore/core/ops/relu.cc @@ -30,7 +30,7 @@ namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kGreaterEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kGreaterEqual, 1, prim_name); CheckAndConvertUtils::CheckArgs(prim_name, input_args, 0); auto x = input_args[0]->BuildShape(); MS_EXCEPTION_IF_NULL(x); @@ -41,7 +41,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(prim); auto prim_name = prim->name(); - CheckAndConvertUtils::CheckInteger("ReLU infer", input_args.size(), kEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("ReLU infer", SizeToLong(input_args.size()), kEqual, 1, prim_name); MS_EXCEPTION_IF_NULL(input_args[0]); auto x_type = input_args[0]->BuildType(); (void)CheckAndConvertUtils::CheckTensorTypeValid("input_x", x_type, common_valid_types, prim_name); diff --git a/mindspore/core/ops/relu6.cc b/mindspore/core/ops/relu6.cc index e1b4e1b177d..ba9e27983b5 100644 --- a/mindspore/core/ops/relu6.cc +++ b/mindspore/core/ops/relu6.cc @@ -29,7 +29,7 @@ namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kGreaterEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kGreaterEqual, 1, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/resize_bilinear.cc b/mindspore/core/ops/resize_bilinear.cc index 81d731bd71b..b4d6a50ccad 100644 --- a/mindspore/core/ops/resize_bilinear.cc +++ b/mindspore/core/ops/resize_bilinear.cc @@ -46,7 +46,7 @@ AbstractBasePtr ResizeBilinearInfer(const abstract::AnalysisEnginePtr &, const P const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("resize_bilinear_infer", SizeToLong(input_args.size()), kEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("infer", SizeToLong(input_args.size()), kEqual, 1, prim_name); // Infer shape auto input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; diff --git a/mindspore/core/ops/reverse_sequence.cc b/mindspore/core/ops/reverse_sequence.cc index 1d187f05db4..d735e94423b 100644 --- a/mindspore/core/ops/reverse_sequence.cc +++ b/mindspore/core/ops/reverse_sequence.cc @@ -27,8 +27,8 @@ void ReverseSequence::Init(const int64_t seq_dim, const int64_t batch_dim) { this->set_seq_dim(seq_dim); this->set_batch_dim(batch_dim); } -void ReverseSequence::set_seq_dim(const int64_t seq_dim) { this->AddAttr(kSeqDim, MakeValue(seq_dim)); } -void ReverseSequence::set_batch_dim(const int64_t batch_dim) { this->AddAttr(kBatchDim, MakeValue(batch_dim)); } +void ReverseSequence::set_seq_dim(const int64_t seq_dim) { (void)this->AddAttr(kSeqDim, MakeValue(seq_dim)); } +void ReverseSequence::set_batch_dim(const int64_t batch_dim) { (void)this->AddAttr(kBatchDim, MakeValue(batch_dim)); } int64_t ReverseSequence::get_seq_dim() const { return GetValue(GetAttr(kSeqDim)); } int64_t ReverseSequence::get_batch_dim() const { diff --git a/mindspore/core/ops/rfft.cc b/mindspore/core/ops/rfft.cc index b1aa6c5b3ef..f5dc5dba8d2 100644 --- a/mindspore/core/ops/rfft.cc +++ b/mindspore/core/ops/rfft.cc @@ -42,7 +42,7 @@ TypePtr InferType(const PrimitivePtr &primitive, const std::vectorset_fft_length(fft_length); } -void Rfft::set_fft_length(const int64_t fft_length) { this->AddAttr(kFftLength, MakeValue(fft_length)); } +void Rfft::set_fft_length(const int64_t fft_length) { (void)this->AddAttr(kFftLength, MakeValue(fft_length)); } int64_t Rfft::get_fft_length() const { return GetValue(GetAttr(kFftLength)); } diff --git a/mindspore/core/ops/scale.cc b/mindspore/core/ops/scale.cc index 9ccbb7f4803..336a4962167 100644 --- a/mindspore/core/ops/scale.cc +++ b/mindspore/core/ops/scale.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace ops { void Scale::Init(const int64_t axis) { set_axis(axis); } -void Scale::set_axis(const int64_t axis) { this->AddAttr(kAxis, MakeValue(axis)); } +void Scale::set_axis(const int64_t axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); } int64_t Scale::get_axis() const { return GetValue(GetAttr(kAxis)); } REGISTER_PRIMITIVE_C(kNameScale, Scale); diff --git a/mindspore/core/ops/scatter_nd.cc b/mindspore/core/ops/scatter_nd.cc index 0009e36d59f..2dce1f0bb66 100644 --- a/mindspore/core/ops/scatter_nd.cc +++ b/mindspore/core/ops/scatter_nd.cc @@ -27,12 +27,12 @@ abstract::ShapePtr InferShape(const std::vector &input_args) { auto shape_value = input_args[2]->BuildValue(); auto shape_value_element = GetValue>(shape_value); for (const auto &shape : shape_value_element) { - CheckAndConvertUtils::CheckInteger("shape value", shape, kGreaterThan, 0, "ScatterNd"); + (void)CheckAndConvertUtils::CheckInteger("shape value", shape, kGreaterThan, 0, "ScatterNd"); } auto indices_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; auto update_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape]; - CheckAndConvertUtils::CheckInteger("indices_shape[0] and update_shape[0]", indices_shape[0], kEqual, update_shape[0], - "ScatterNd"); + (void)CheckAndConvertUtils::CheckInteger("indices_shape[0] and update_shape[0]", indices_shape[0], kEqual, + update_shape[0], "ScatterNd"); return std::make_shared(shape_value_element); } diff --git a/mindspore/core/ops/smooth_l1_loss.cc b/mindspore/core/ops/smooth_l1_loss.cc index bd18edd4cdd..3926b7b87d6 100644 --- a/mindspore/core/ops/smooth_l1_loss.cc +++ b/mindspore/core/ops/smooth_l1_loss.cc @@ -26,7 +26,7 @@ namespace mindspore { namespace ops { void SmoothL1Loss::Init(const float beta) { this->set_beta(beta); } -void SmoothL1Loss::set_beta(const float beta) { this->AddAttr(kBeta, MakeValue(beta)); } +void SmoothL1Loss::set_beta(const float beta) { (void)this->AddAttr(kBeta, MakeValue(beta)); } float SmoothL1Loss::get_beta() const { auto value_ptr = this->GetAttr(kBeta); @@ -37,7 +37,7 @@ AbstractBasePtr SmoothL1LossInfer(const abstract::AnalysisEnginePtr &, const Pri const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - (void)CheckAndConvertUtils::CheckInteger("smooth_l1_loss_infer", input_args.size(), kEqual, 2, prim_name); + (void)CheckAndConvertUtils::CheckInteger("smooth_l1_loss_infer", SizeToLong(input_args.size()), kEqual, 2, prim_name); // Infer shape auto prediction = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; diff --git a/mindspore/core/ops/softmax.cc b/mindspore/core/ops/softmax.cc index 746fa3007ac..177cc978eec 100644 --- a/mindspore/core/ops/softmax.cc +++ b/mindspore/core/ops/softmax.cc @@ -26,7 +26,7 @@ namespace mindspore { namespace ops { -void Softmax::set_axis(const std::vector &axis) { this->AddAttr(kAxis, MakeValue(axis)); } +void Softmax::set_axis(const std::vector &axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); } std::vector Softmax::get_axis() const { auto value_ptr = GetAttr(kAxis); @@ -36,7 +36,7 @@ std::vector Softmax::get_axis() const { void Softmax::Init(const int64_t axis) { auto op_name = this->name(); std::vector axis_vec = {axis}; - CheckAndConvertUtils::CheckInteger("axis_len", axis_vec.size(), kEqual, 1, op_name); + (void)CheckAndConvertUtils::CheckInteger("axis_len", SizeToLong(axis_vec.size()), kEqual, 1, op_name); auto rank = SizeToLong(axis_vec.size()); for (auto &item : axis_vec) { CheckAndConvertUtils::CheckInRange("axis", item, kIncludeLeft, {-rank, rank}, op_name); diff --git a/mindspore/core/ops/softmax_cross_entropy_with_logits.cc b/mindspore/core/ops/softmax_cross_entropy_with_logits.cc index 4f7108b4892..a49dec12a7e 100644 --- a/mindspore/core/ops/softmax_cross_entropy_with_logits.cc +++ b/mindspore/core/ops/softmax_cross_entropy_with_logits.cc @@ -30,8 +30,8 @@ AbstractBasePtr SoftmaxCrossEntropyWithLogitsInfer(const abstract::AnalysisEngin const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("softmax_cross_entropy_with_logics_infer", SizeToLong(input_args.size()), kEqual, - 2, prim_name); + (void)CheckAndConvertUtils::CheckInteger("softmax_cross_entropy_with_logics_infer", SizeToLong(input_args.size()), + kEqual, 2, prim_name); // Infer shape auto logits_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; diff --git a/mindspore/core/ops/space_to_batch.cc b/mindspore/core/ops/space_to_batch.cc index a4b422acac0..946666ddb7d 100644 --- a/mindspore/core/ops/space_to_batch.cc +++ b/mindspore/core/ops/space_to_batch.cc @@ -30,16 +30,16 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vectorname(); auto input_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; - CheckAndConvertUtils::CheckInteger("input shape", SizeToLong(input_shape.size()), kEqual, 4, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input shape", SizeToLong(input_shape.size()), kEqual, 4, prim_name); std::vector output_shape(input_shape.size()); auto block_shape_vector = GetValue>(primitive->GetAttr(kBlockSize)); auto paddings = GetValue>>(primitive->GetAttr(kPaddings)); for (size_t i = 0; i < 2; i++) { - auto padded = LongToSize(output_shape[i + 2] + paddings[i][0] + paddings[i][1]); - CheckAndConvertUtils::CheckInteger("padded shape", padded % block_shape_vector.size(), kEqual, 0, prim_name); + auto padded = output_shape[i + 2] + paddings[i][0] + paddings[i][1]; + (void)CheckAndConvertUtils::CheckInteger("padded shape", padded % block_shape_vector.size(), kEqual, 0, prim_name); output_shape[i + 2] = padded / block_shape_vector.size(); } - output_shape[0] *= block_shape_vector.size() * block_shape_vector.size(); + output_shape[0] *= SizeToLong(block_shape_vector.size() * block_shape_vector.size()); return std::make_shared(output_shape); } @@ -60,7 +60,7 @@ void SpaceToBatch::set_paddings(const std::vector> &padding CheckAndConvertUtils::Check(kPaddings, {h, w}, kEqual, "paddings_shape(2,2)", temp_w, this->name()); for (size_t i = 0; i < LongToSize(h); i++) { for (size_t j = 0; j < LongToSize(w); j++) { - CheckAndConvertUtils::CheckInteger(kPadding, paddings[i][j], kGreaterEqual, 0, this->name()); + (void)CheckAndConvertUtils::CheckInteger(kPadding, paddings[i][j], kGreaterEqual, 0, this->name()); } } } @@ -84,7 +84,7 @@ void SpaceToBatch::Init(const std::vector block_size, const std::vector AbstractBasePtr SpaceToBatchInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args) { return std::make_shared(InferType(primitive, input_args), - InferShape(primitive, input_args)->shape()); + InferShape(primitive, input_args)); } REGISTER_PRIMITIVE_C(kNameSpaceToBatch, SpaceToBatch); } // namespace ops diff --git a/mindspore/core/ops/space_to_batch_nd.cc b/mindspore/core/ops/space_to_batch_nd.cc index de0c261248d..bdb222e5b1e 100644 --- a/mindspore/core/ops/space_to_batch_nd.cc +++ b/mindspore/core/ops/space_to_batch_nd.cc @@ -30,14 +30,14 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vectorname(); auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; - CheckAndConvertUtils::CheckInteger("input_x rank", x_shape.size(), kEqual, 4, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input_x rank", SizeToLong(x_shape.size()), kEqual, 4, prim_name); auto out_shape = x_shape; int64_t block_shape_prod = 1; - const int64_t offset = 2; + const size_t offset = 2; auto block_shape = GetValue>(primitive->GetAttr(kBlockShape)); auto padding = GetValue>>(primitive->GetAttr(kPaddings)); - int64_t size = block_shape.size(); - for (int64_t i = 0; i < size; i++) { + size_t size = block_shape.size(); + for (size_t i = 0; i < size; i++) { int64_t padded = out_shape[i + offset] + padding[i][0] + padding[i][1]; if (padded % block_shape[i] != 0) { MS_EXCEPTION(ValueError) << prim_name << " padded[" << i << "]" << padded << "should be divisible by block_shape[" @@ -60,14 +60,14 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector & } // namespace void SpaceToBatchND::set_paddings(std::vector> paddings) { - CheckAndConvertUtils::CheckInteger(kPaddings, SizeToLong(paddings.size()), kEqual, 2, this->name()); - int64_t h = paddings.size(); - int64_t w = paddings[0].size(); - std::vector temp_w = {2, 2}; + (void)CheckAndConvertUtils::CheckInteger(kPaddings, SizeToLong(paddings.size()), kEqual, 2, this->name()); + size_t h = paddings.size(); + size_t w = paddings[0].size(); + std::vector temp_w = {2, 2}; CheckAndConvertUtils::Check(kPaddings, {h, w}, kEqual, "paddings_shape(2,2)", temp_w, this->name()); - for (int64_t i = 0; i < h; i++) { - for (int64_t j = 0; j < w; j++) { - CheckAndConvertUtils::CheckInteger(kPaddings, SizeToLong(paddings[i][j]), kGreaterEqual, 0, this->name()); + for (size_t i = 0; i < h; i++) { + for (size_t j = 0; j < w; j++) { + (void)CheckAndConvertUtils::CheckInteger(kPaddings, SizeToLong(paddings[i][j]), kGreaterEqual, 0, this->name()); } } this->AddAttr(kPaddings, MakeValue(paddings)); @@ -78,9 +78,9 @@ std::vector> SpaceToBatchND::get_paddings() const { return GetValue>>(value_ptr); } void SpaceToBatchND::set_block_shape(std::vector block_shape) { - CheckAndConvertUtils::CheckInteger(kBlockShape, SizeToLong(block_shape.size()), kEqual, 2, this->name()); - for (int64_t i = 0; i < (int64_t)block_shape.size(); i++) { - CheckAndConvertUtils::CheckInteger(kBlockShape, SizeToLong(block_shape[i]), kGreaterEqual, 1, this->name()); + (void)CheckAndConvertUtils::CheckInteger(kBlockShape, SizeToLong(block_shape.size()), kEqual, 2, this->name()); + for (size_t i = 0; i < block_shape.size(); i++) { + (void)CheckAndConvertUtils::CheckInteger(kBlockShape, SizeToLong(block_shape[i]), kGreaterEqual, 1, this->name()); } this->AddAttr(kBlockShape, MakeValue(block_shape)); } diff --git a/mindspore/core/ops/space_to_depth.cc b/mindspore/core/ops/space_to_depth.cc index d433982e1f6..bc66c2f2cbe 100644 --- a/mindspore/core/ops/space_to_depth.cc +++ b/mindspore/core/ops/space_to_depth.cc @@ -24,7 +24,7 @@ void SpaceToDepth::Init(const int64_t block_size, const Format &format) { } void SpaceToDepth::set_block_size(const int64_t block_size) { - CheckAndConvertUtils::Check(kBlockSize, block_size, kGreaterEqual, "", 2, this->name()); + (void)CheckAndConvertUtils::Check(kBlockSize, block_size, kGreaterEqual, "", 2, this->name()); AddAttr(kBlockSize, MakeValue(block_size)); } diff --git a/mindspore/core/ops/sparse_to_dense.cc b/mindspore/core/ops/sparse_to_dense.cc index 5668d8ac1a8..c9533f2f7e5 100644 --- a/mindspore/core/ops/sparse_to_dense.cc +++ b/mindspore/core/ops/sparse_to_dense.cc @@ -28,7 +28,7 @@ AbstractBasePtr SparseToDenseInfer(const abstract::AnalysisEnginePtr &, const Pr const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 3, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, 3, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/squeeze.cc b/mindspore/core/ops/squeeze.cc index 4fa07111f87..f8ca859c8c1 100644 --- a/mindspore/core/ops/squeeze.cc +++ b/mindspore/core/ops/squeeze.cc @@ -38,7 +38,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector("axis_or_elememt", item, kIncludeBoth, {-len, len + 1}, op_name); auto idx = item >= 0 ? item : len + item; - if (in_shape[idx] != 1) { + if (in_shape[LongToSize(idx)] != 1L) { MS_EXCEPTION(ValueError) << "Cannot select an axis to squeeze out which has size not equal to one."; } } @@ -46,7 +46,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector(primitive->GetAttr(kAxis)), input_args.size()); auto infer_type0 = input_args[0]->BuildType()->cast()->element(); - for (int64_t i = 1; i < SizeToLong(input_args.size()); i++) { + for (size_t i = 1; i < input_args.size(); i++) { if (input_args[i]->BuildType()->cast()->element() == infer_type0) { MS_LOG(ERROR) << "All input should have the same data type!input[" << i << "] data type = " << input_args[i]->BuildType()->cast()->element(); diff --git a/mindspore/core/ops/strided_slice.cc b/mindspore/core/ops/strided_slice.cc index 33360004737..fe72ec212c7 100644 --- a/mindspore/core/ops/strided_slice.cc +++ b/mindspore/core/ops/strided_slice.cc @@ -43,15 +43,16 @@ void EllipsisInferShape(const PrimitivePtr &primitive, const std::vector new_axis_pos = strided_slice_prim->TenToTwo(strided_slice_prim->get_new_axis_mask()); std::vector shrink_axis_pos = strided_slice_prim->TenToTwo(strided_slice_prim->get_shrink_axis_mask()); - int64_t num = 0; + size_t num = 0; for (size_t n = j + 1; n < slice_len; n++) { if (new_axis_pos[n] == 1) { num++; } } - int64_t ellipsis_occupied_dims = x_rank - i - (slice_len - (j + 1)) + num; - infer_shape->insert(infer_shape->end(), x_shape.begin() + i, x_shape.begin() + i + ellipsis_occupied_dims); + size_t ellipsis_occupied_dims = x_rank - i - (slice_len - (j + 1)) + num; + (void)infer_shape->insert(infer_shape->end(), x_shape.begin() + i, + x_shape.begin() + SizeToLong(i + ellipsis_occupied_dims)); j += 1; i += ellipsis_occupied_dims; @@ -200,7 +201,7 @@ TypePtr StridedSliceInferType(const std::vector &input_args) { } // namespace void StridedSlice::set_begin_mask(const int64_t begin_mask) { - CheckAndConvertUtils::CheckInteger(kBeginMask, begin_mask, kGreaterEqual, 0, this->name()); + (void)CheckAndConvertUtils::CheckInteger(kBeginMask, begin_mask, kGreaterEqual, 0, this->name()); this->AddAttr(kBeginMask, MakeValue(begin_mask)); } int64_t StridedSlice::get_begin_mask() const { @@ -208,7 +209,7 @@ int64_t StridedSlice::get_begin_mask() const { return GetValue(value_ptr); } void StridedSlice::set_end_mask(const int64_t end_mask) { - CheckAndConvertUtils::CheckInteger(kEndMask, end_mask, kGreaterEqual, 0, this->name()); + (void)CheckAndConvertUtils::CheckInteger(kEndMask, end_mask, kGreaterEqual, 0, this->name()); this->AddAttr(kEndMask, MakeValue(end_mask)); } int64_t StridedSlice::get_end_mask() const { @@ -216,7 +217,7 @@ int64_t StridedSlice::get_end_mask() const { return GetValue(value_ptr); } void StridedSlice::set_ellipsis_mask(const int64_t ellipsis_mask) { - CheckAndConvertUtils::CheckInteger(kEllipsisMask, ellipsis_mask, kGreaterEqual, 0, this->name()); + (void)CheckAndConvertUtils::CheckInteger(kEllipsisMask, ellipsis_mask, kGreaterEqual, 0, this->name()); std::bitset bs(ellipsis_mask); std::ostringstream buffer; if (bs.count() > 1) { @@ -230,7 +231,7 @@ int64_t StridedSlice::get_ellipsis_mask() const { return GetValue(value_ptr); } void StridedSlice::set_new_axis_mask(const int64_t new_axis_mask) { - CheckAndConvertUtils::CheckInteger(kNewAxisMask, new_axis_mask, kGreaterEqual, 0, this->name()); + (void)CheckAndConvertUtils::CheckInteger(kNewAxisMask, new_axis_mask, kGreaterEqual, 0, this->name()); this->AddAttr(kNewAxisMask, MakeValue(new_axis_mask)); } int64_t StridedSlice::get_new_axis_mask() const { @@ -238,7 +239,7 @@ int64_t StridedSlice::get_new_axis_mask() const { return GetValue(value_ptr); } void StridedSlice::set_shrink_axis_mask(const int64_t shrink_axis_mask) { - CheckAndConvertUtils::CheckInteger(kShrinkAxisMask, shrink_axis_mask, kGreaterEqual, 0, this->name()); + (void)CheckAndConvertUtils::CheckInteger(kShrinkAxisMask, shrink_axis_mask, kGreaterEqual, 0, this->name()); this->AddAttr(kShrinkAxisMask, MakeValue(shrink_axis_mask)); } int64_t StridedSlice::get_shrink_axis_mask() const { @@ -296,13 +297,13 @@ int64_t StridedSlice::compute_slicing_length(int64_t start_pos, int64_t end_pos, if (start_pos < -x_dim || end_pos >= x_dim) { slicing_length = 0; } else { - if (0 < start_pos && start_pos < x_dim) { + if (start_pos > 0 && start_pos < x_dim) { start_pos += -x_dim; } if (start_pos >= x_dim) { start_pos = -1; } - if (0 <= end_pos && end_pos < x_dim) { + if (end_pos >= 0 && end_pos < x_dim) { end_pos += -x_dim; } if (end_pos < -x_dim - 1) { diff --git a/mindspore/core/ops/sub.cc b/mindspore/core/ops/sub.cc index 47ddc5599ed..9d0d4c8e27f 100644 --- a/mindspore/core/ops/sub.cc +++ b/mindspore/core/ops/sub.cc @@ -30,7 +30,7 @@ namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 2, prim_name); + (void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kEqual, 2, prim_name); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } @@ -42,7 +42,7 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector & MS_EXCEPTION_IF_NULL(item); } auto op_name = prim->name(); - CheckAndConvertUtils::CheckInteger("Sub infer", input_args.size(), kGreaterEqual, 2, op_name); + (void)CheckAndConvertUtils::CheckInteger("Sub infer", SizeToLong(input_args.size()), kGreaterEqual, 2, op_name); std::map types; types.emplace("x", input_args[0]->BuildType()); types.emplace("y", input_args[1]->BuildType()); diff --git a/mindspore/core/ops/tan.cc b/mindspore/core/ops/tan.cc index 4946b90cde1..a069644db6d 100644 --- a/mindspore/core/ops/tan.cc +++ b/mindspore/core/ops/tan.cc @@ -30,7 +30,7 @@ AbstractBasePtr TanInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("tan_infer", SizeToLong(input_args.size()), kEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("tan_infer", SizeToLong(input_args.size()), kEqual, 1, prim_name); // Infer Shape auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; diff --git a/mindspore/core/ops/topk.cc b/mindspore/core/ops/topk.cc index 8bd825eb154..e861dc7d4e5 100644 --- a/mindspore/core/ops/topk.cc +++ b/mindspore/core/ops/topk.cc @@ -22,7 +22,7 @@ namespace mindspore { namespace ops { void TopK::Init(const bool sorted) { this->set_sorted(sorted); } -void TopK::set_sorted(const bool sorted) { this->AddAttr(kSorted, MakeValue(sorted)); } +void TopK::set_sorted(const bool sorted) { (void)this->AddAttr(kSorted, MakeValue(sorted)); } bool TopK::get_sorted() const { auto value_ptr = this->GetAttr(kSorted); @@ -32,7 +32,7 @@ AbstractBasePtr TopKInfer(const abstract::AnalysisEnginePtr &, const PrimitivePt const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - (void)CheckAndConvertUtils::CheckInteger("top_k_infer", input_args.size(), kEqual, 2, prim_name); + (void)CheckAndConvertUtils::CheckInteger("top_k_infer", SizeToLong(input_args.size()), kEqual, 2, prim_name); // Infer dtype auto output1_type = kInt32; diff --git a/mindspore/core/ops/transpose.cc b/mindspore/core/ops/transpose.cc index a3214439114..72359d787a0 100644 --- a/mindspore/core/ops/transpose.cc +++ b/mindspore/core/ops/transpose.cc @@ -47,8 +47,8 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector tmp(p_value); for (auto it = tmp.begin(); it != tmp.end();) { @@ -84,7 +84,8 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector & AbstractBasePtr TransposeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); - CheckAndConvertUtils::CheckInteger("Transpose infer", input_args.size(), kGreaterEqual, 1, primitive->name()); + (void)CheckAndConvertUtils::CheckInteger("Transpose infer", SizeToLong(input_args.size()), kGreaterEqual, 1, + primitive->name()); auto type = InferType(primitive, input_args); auto shape = InferShape(primitive, input_args); return abstract::MakeAbstract(shape, type); diff --git a/mindspore/core/ops/uniform_real.cc b/mindspore/core/ops/uniform_real.cc index 728f609eb23..00946eb26d0 100644 --- a/mindspore/core/ops/uniform_real.cc +++ b/mindspore/core/ops/uniform_real.cc @@ -27,9 +27,9 @@ void UniformReal::Init(int64_t seed, int64_t seed2) { this->set_seed2(seed2); } -void UniformReal::set_seed(int64_t seed) { this->AddAttr(kSeed, MakeValue(seed)); } +void UniformReal::set_seed(int64_t seed) { (void)this->AddAttr(kSeed, MakeValue(seed)); } -void UniformReal::set_seed2(int64_t seed2) { this->AddAttr(kSeed2, MakeValue(seed2)); } +void UniformReal::set_seed2(int64_t seed2) { (void)this->AddAttr(kSeed2, MakeValue(seed2)); } int64_t UniformReal::get_seed() const { auto value_ptr = GetAttr(kSeed); diff --git a/mindspore/core/ops/unpack.cc b/mindspore/core/ops/unpack.cc index b64cc6bc9ce..2c5abf64f6b 100644 --- a/mindspore/core/ops/unpack.cc +++ b/mindspore/core/ops/unpack.cc @@ -31,15 +31,14 @@ AbstractBasePtr UnpackInfer(const abstract::AnalysisEnginePtr &, const Primitive auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; int64_t dim = SizeToLong(x_shape.size()); int64_t axis = GetValue(primitive->GetAttr(kAxis)); - // CheckAndConvertUtils::CheckInRange("axis value", axis, kIncludeLeft, {-dim, dim}, prim_name); if (axis < 0) { axis = axis + dim; } auto output_num = x_shape[LongToSize(axis)]; - CheckAndConvertUtils::CheckInteger("output_num", output_num, kGreaterThan, 0, prim_name); + (void)CheckAndConvertUtils::CheckInteger("output_num", output_num, kGreaterThan, 0, prim_name); auto output_valid_check = x_shape[axis] - output_num; - CheckAndConvertUtils::CheckInteger("The dimension which to unpack divides output_num", output_valid_check, kEqual, 0, - prim_name); + (void)CheckAndConvertUtils::CheckInteger("The dimension which to unpack divides output_num", output_valid_check, + kEqual, 0, prim_name); std::vector infer_shape(x_shape.begin(), x_shape.begin() + axis); infer_shape.insert(infer_shape.end(), x_shape.begin() + axis + 1, x_shape.end()); AbstractBasePtrList output; diff --git a/mindspore/core/ops/unsorted_segment_sum.cc b/mindspore/core/ops/unsorted_segment_sum.cc index 6d05d9f0740..670f8e57d36 100644 --- a/mindspore/core/ops/unsorted_segment_sum.cc +++ b/mindspore/core/ops/unsorted_segment_sum.cc @@ -37,16 +37,17 @@ AbstractBasePtr UnsortedSegmentSumInfer(const abstract::AnalysisEnginePtr &, con CheckAndConvertUtils::CheckInteger("x_shape", SizeToLong(x_shape.size()), kGreaterThan, 0, prim_name); auto shp = x_shape; auto segment_ids_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape]; - CheckAndConvertUtils::CheckInteger("segment_ids_shape", SizeToLong(segment_ids_shape.size()), kGreaterThan, 0, - prim_name); + (void)CheckAndConvertUtils::CheckInteger("segment_ids_shape", SizeToLong(segment_ids_shape.size()), kGreaterThan, 0, + prim_name); CheckAndConvertUtils::Check("input_x", int64_t(x_shape.size()), kGreaterEqual, "segment_ids_shape", int64_t(segment_ids_shape.size()), prim_name); if ((x_shape.end() != find(x_shape.begin(), x_shape.end(), -1)) && (segment_ids_shape.end() != find(segment_ids_shape.begin(), segment_ids_shape.end(), -1))) { - int64_t size = segment_ids_shape.size(); - for (int64_t i = 0; i < size; ++i) { - CheckAndConvertUtils::Check("segment_ids_shp", segment_ids_shape[i], kEqual, "x_shape", x_shape[i], prim_name); + size_t size = segment_ids_shape.size(); + for (size_t i = 0; i < size; ++i) { + CheckAndConvertUtils::Check("segment_ids_shp", SizeToLong(segment_ids_shape[i]), kEqual, "x_shape", + SizeToLong(x_shape[i]), prim_name); } } @@ -56,7 +57,7 @@ AbstractBasePtr UnsortedSegmentSumInfer(const abstract::AnalysisEnginePtr &, con int64_t size_segment_ids_shp = segment_ids_shape.size(); int64_t size_x_shpe = x_shape.size(); for (int64_t i = size_segment_ids_shp; i < size_x_shpe; ++i) { - shp.emplace_back(x_shape[i]); + (void)shp.emplace_back(x_shape[i]); } return std::make_shared(x_type, shp); diff --git a/mindspore/core/ops/unsqueeze.cc b/mindspore/core/ops/unsqueeze.cc index 8d7eed6d259..30c5dccee7a 100644 --- a/mindspore/core/ops/unsqueeze.cc +++ b/mindspore/core/ops/unsqueeze.cc @@ -21,16 +21,17 @@ namespace mindspore { namespace ops { -void Unsqueeze::Init(const std::vector axis) { this->set_axis(axis); } +void Unsqueeze::Init(const std::vector axis) { (void)this->set_axis(axis); } -void Unsqueeze::set_axis(std::vector axis) { this->AddAttr(kAxis, MakeValue(axis)); } +void Unsqueeze::set_axis(const std::vector axis) { (void)this->AddAttr(kAxis, MakeValue(axis)); } std::vector Unsqueeze::get_axis() const { return GetValue>(GetAttr(kAxis)); } AbstractBasePtr UnsqueezeInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - CheckAndConvertUtils::CheckInteger("unsqueeze_infer", input_args.size(), kEqual, 1, prim_name); + (void)CheckAndConvertUtils::CheckInteger("unsqueeze_infer", SizeToLong(input_args.size()), kEqual, 1, prim_name); + MS_EXCEPTION_IF_NULL(input_args[0]); auto input = input_args[0]; @@ -51,13 +52,13 @@ AbstractBasePtr UnsqueezeInfer(const abstract::AnalysisEnginePtr &, const Primit size_t ax_itr = 0; for (size_t i = 0; i < sz; i++) { if (ax_itr < dim_rank && dims[ax_itr] == (int64_t)i) { - out_shape.emplace_back(1); + (void)out_shape.emplace_back(1); ax_itr++; } else if (ax_itr < dim_rank && dims[ax_itr] + sz == i) { - out_shape.emplace_back(1); + (void)out_shape.emplace_back(1); ax_itr++; } else { - out_shape.emplace_back(input_shape[in_itr]); + (void)out_shape.emplace_back(input_shape[in_itr]); in_itr++; } } diff --git a/mindspore/core/ops/unstack.cc b/mindspore/core/ops/unstack.cc index 745a7599b05..e1cdf20f4ed 100644 --- a/mindspore/core/ops/unstack.cc +++ b/mindspore/core/ops/unstack.cc @@ -18,7 +18,6 @@ namespace mindspore { namespace ops { - void Unstack::Init(const int64_t axis) { this->set_axis(axis); } void Unstack::set_axis(const int64_t axis) { AddAttr(kAxis, MakeValue(axis)); } int64_t Unstack::get_axis() const { return GetValue(GetAttr(kAxis)); } @@ -29,15 +28,14 @@ AbstractBasePtr UnstackInfer(const abstract::AnalysisEnginePtr &, const Primitiv auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; int64_t dim = x_shape.size(); int64_t axis = GetValue(primitive->GetAttr(kAxis)); - // CheckAndConvertUtils::CheckInRange("axis value", axis, kIncludeLeft, {-dim, dim}, prim_name); if (axis < 0) { axis = axis + dim; } auto output_num = x_shape[LongToSize(axis)]; - CheckAndConvertUtils::CheckInteger("output_num", output_num, kGreaterThan, 0, prim_name); + (void)CheckAndConvertUtils::CheckInteger("output_num", output_num, kGreaterThan, 0, prim_name); auto output_valid_check = x_shape[LongToSize(axis)] - output_num; - CheckAndConvertUtils::CheckInteger("The dimension which to unstack divides output_num", output_valid_check, kEqual, 0, - prim_name); + (void)CheckAndConvertUtils::CheckInteger("The dimension which to unstack divides output_num", output_valid_check, + kEqual, 0, prim_name); std::vector infer_shape(x_shape.begin(), x_shape.begin() + axis); infer_shape.insert(infer_shape.end(), x_shape.begin() + axis + 1, x_shape.end()); AbstractBasePtrList output; diff --git a/mindspore/core/ops/where.cc b/mindspore/core/ops/where.cc index 08dca023a06..67758a822f3 100644 --- a/mindspore/core/ops/where.cc +++ b/mindspore/core/ops/where.cc @@ -39,7 +39,7 @@ AbstractBasePtr WhereInfer(const abstract::AnalysisEnginePtr &, const PrimitiveP auto num1 = input_args[1]->BuildValue()->cast()->ElementsNum(); auto input2_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[2]->BuildShape())[kShape]; auto num2 = input_args[2]->BuildValue()->cast()->ElementsNum(); - size_t nummax = num > num1 ? num : (num1 > num2 ? num1 : num2); + auto nummax = num > num1 ? num : (num1 > num2 ? num1 : num2); size_t axisout = 0; size_t temp = 0; for (size_t j = 0; j < input0_shape.size(); j++) { @@ -60,7 +60,7 @@ AbstractBasePtr WhereInfer(const abstract::AnalysisEnginePtr &, const PrimitiveP return std::make_shared(input0_type, input0_shape); } } - input0_shape[axisout] = nummax; + input0_shape[axisout] = (int64_t)nummax; return std::make_shared(input0_type, input0_shape); } REGISTER_PRIMITIVE_C(kNameWhere, Where); diff --git a/mindspore/nn/dynamic_lr.py b/mindspore/nn/dynamic_lr.py index b5053cecc1a..ff81befaf28 100644 --- a/mindspore/nn/dynamic_lr.py +++ b/mindspore/nn/dynamic_lr.py @@ -372,7 +372,7 @@ def warmup_lr(learning_rate, total_step, step_per_epoch, warmup_epoch): for i in range(total_step): current_epoch = math.floor(i / step_per_epoch) warmup_epoch, tmp_epoch = function(warmup_epoch, current_epoch) - lr.append(learning_rate * tmp_epoch/ warmup_epoch) + lr.append(learning_rate * tmp_epoch / warmup_epoch) return lr diff --git a/mindspore/nn/learning_rate_schedule.py b/mindspore/nn/learning_rate_schedule.py index 1bc708933c2..46c7065e6e5 100644 --- a/mindspore/nn/learning_rate_schedule.py +++ b/mindspore/nn/learning_rate_schedule.py @@ -462,7 +462,7 @@ class WarmUpLR(LearningRateSchedule): self.cast = P.Cast() def construct(self, global_step): - warmup_percent = self.cast(self.min(global_step, self.warmup_steps), mstype.float32)/ self.warmup_steps + warmup_percent = self.cast(self.min(global_step, self.warmup_steps), mstype.float32) / self.warmup_steps return self.learning_rate * warmup_percent