diff --git a/mindspore/core/ops/broadcast.cc b/mindspore/core/ops/broadcast.cc index e40d33df12a..322586d2cfc 100644 --- a/mindspore/core/ops/broadcast.cc +++ b/mindspore/core/ops/broadcast.cc @@ -35,7 +35,7 @@ void Broadcast::set_group(const std::string &group) { } int64_t Broadcast::get_root_rank() const { auto value_ptr = this->GetAttr(kRootRank); - return GetValue(value_ptr); + return GetValue(value_ptr); } std::string Broadcast::get_group() const { diff --git a/mindspore/core/ops/fake_quant_with_min_max_vars_per_channel.cc b/mindspore/core/ops/fake_quant_with_min_max_vars_per_channel.cc index e7d8666c216..c712b5fc23b 100644 --- a/mindspore/core/ops/fake_quant_with_min_max_vars_per_channel.cc +++ b/mindspore/core/ops/fake_quant_with_min_max_vars_per_channel.cc @@ -57,7 +57,7 @@ AbstractBasePtr FakeQuantWithMinMaxVarsPerChannelInfer(const abstract::AnalysisE auto max_type = input_args[2]->BuildType(); std::vector type_name = {"x", "min", "max"}; std::vector type = {x_type, min_type, max_type}; - for (int64_t i = 0; i < 3; i++) { + for (size_t i = 0; i < 3; i++) { (void)CheckAndConvertUtils::CheckTensorTypeValid(type_name[i], type[i], {kFloat16, kFloat32}, op_name); } auto tensor_type = x_type->cast(); diff --git a/mindspore/core/ops/fusion/max_pool_fusion.cc b/mindspore/core/ops/fusion/max_pool_fusion.cc index d928d75ba91..863a13e6b50 100644 --- a/mindspore/core/ops/fusion/max_pool_fusion.cc +++ b/mindspore/core/ops/fusion/max_pool_fusion.cc @@ -74,11 +74,11 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector(ceil((in_h - (kernel_h - 1)) / static_cast(stride_h))); + out_w = static_cast(ceil((in_w - (kernel_w - 1)) / static_cast(stride_w))); } else if (pad_mode == SAME) { - out_h = ceil(in_h / stride_h); - out_w = ceil(in_w / stride_w); + out_h = static_cast(ceil(in_h / static_cast(stride_h))); + out_w = static_cast(ceil(in_w / static_cast(stride_w))); } std::vector out_shape = {batch, channel, out_h, out_w}; if (format == NHWC) { diff --git a/mindspore/core/ops/grad/pool_grad.cc b/mindspore/core/ops/grad/pool_grad.cc index 7aabf5fcd6c..59499c45e79 100644 --- a/mindspore/core/ops/grad/pool_grad.cc +++ b/mindspore/core/ops/grad/pool_grad.cc @@ -19,8 +19,8 @@ namespace mindspore { namespace ops { -std::vector PoolGrad::_grad_check_vector(std::string arg_name, std::vector arg_val, - std::string op_name) { +std::vector PoolGrad::_grad_check_vector(const std::string &arg_name, std::vector arg_val, + const std::string &op_name) { std::vector ret; std::string error_msg = "For '" + op_name + "'" + " the '" + arg_name + "' should be a vector of one or two or four " diff --git a/mindspore/core/ops/grad/pool_grad.h b/mindspore/core/ops/grad/pool_grad.h index b6bd52ae29e..0fcf7b08af7 100644 --- a/mindspore/core/ops/grad/pool_grad.h +++ b/mindspore/core/ops/grad/pool_grad.h @@ -46,8 +46,8 @@ class PoolGrad : public PrimitiveC { std::vector get_strides() const; PadMode get_pad_mode() const; Format get_format() const; - std::vector _grad_check_vector(const std::string arg_name, const std::vector arg_val, - const std::string op_name); + std::vector _grad_check_vector(const std::string &arg_name, const std::vector arg_val, + const std::string &op_name); }; } // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/grad/smooth_l1_loss_grad.cc b/mindspore/core/ops/grad/smooth_l1_loss_grad.cc index bed8506b7ff..43405433dff 100644 --- a/mindspore/core/ops/grad/smooth_l1_loss_grad.cc +++ b/mindspore/core/ops/grad/smooth_l1_loss_grad.cc @@ -30,7 +30,7 @@ void SmoothL1LossGrad::set_beta(const float beta) { this->AddAttr(kBeta, MakeVal float SmoothL1LossGrad::get_beta() const { auto value_ptr = this->GetAttr(kBeta); - return GetValue(value_ptr); + return GetValue(value_ptr); } AbstractBasePtr SmoothL1LossGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, diff --git a/mindspore/core/ops/sin.cc b/mindspore/core/ops/sin.cc index ddb05f25369..2ff267f4f8b 100644 --- a/mindspore/core/ops/sin.cc +++ b/mindspore/core/ops/sin.cc @@ -28,6 +28,7 @@ namespace mindspore { namespace ops { namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(primitive); for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } diff --git a/mindspore/core/ops/space_to_batch.cc b/mindspore/core/ops/space_to_batch.cc index b8300ec8bdd..cc62af3d281 100644 --- a/mindspore/core/ops/space_to_batch.cc +++ b/mindspore/core/ops/space_to_batch.cc @@ -35,7 +35,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector>(primitive->GetAttr(kBlockSize)); auto paddings = GetValue>>(primitive->GetAttr(kPaddings)); for (size_t i = 0; i < 2; i++) { - auto padded = output_shape[i + 2] + paddings[i][0] + paddings[i][1]; + auto padded = LongToSize(output_shape[i + 2] + paddings[i][0] + paddings[i][1]); CheckAndConvertUtils::CheckInteger("padded shape", padded % block_shape_vector.size(), kEqual, 0, prim_name); output_shape[i + 2] = padded / block_shape_vector.size(); } @@ -58,8 +58,8 @@ void SpaceToBatch::set_paddings(const std::vector> &padding int64_t w = paddings[0].size(); std::vector temp_w = {2, 2}; CheckAndConvertUtils::Check(kPaddings, {h, w}, kEqual, "paddings_shape(2,2)", temp_w, this->name()); - for (int64_t i = 0; i < h; i++) { - for (int64_t j = 0; j < w; j++) { + for (size_t i = 0; i < LongToSize(h); i++) { + for (size_t j = 0; j < LongToSize(w); j++) { CheckAndConvertUtils::CheckInteger(kPadding, paddings[i][j], kGreaterEqual, 0, this->name()); } } diff --git a/mindspore/core/ops/unstack.cc b/mindspore/core/ops/unstack.cc index 66528a9870f..745a7599b05 100644 --- a/mindspore/core/ops/unstack.cc +++ b/mindspore/core/ops/unstack.cc @@ -33,9 +33,9 @@ AbstractBasePtr UnstackInfer(const abstract::AnalysisEnginePtr &, const Primitiv if (axis < 0) { axis = axis + dim; } - auto output_num = x_shape[axis]; + auto output_num = x_shape[LongToSize(axis)]; CheckAndConvertUtils::CheckInteger("output_num", output_num, kGreaterThan, 0, prim_name); - auto output_valid_check = x_shape[axis] - output_num; + auto output_valid_check = x_shape[LongToSize(axis)] - output_num; CheckAndConvertUtils::CheckInteger("The dimension which to unstack divides output_num", output_valid_check, kEqual, 0, prim_name); std::vector infer_shape(x_shape.begin(), x_shape.begin() + axis); diff --git a/mindspore/core/ops/where.cc b/mindspore/core/ops/where.cc index b34d2d529c0..08dca023a06 100644 --- a/mindspore/core/ops/where.cc +++ b/mindspore/core/ops/where.cc @@ -39,10 +39,10 @@ AbstractBasePtr WhereInfer(const abstract::AnalysisEnginePtr &, const PrimitiveP auto num1 = input_args[1]->BuildValue()->cast()->ElementsNum(); auto input2_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[2]->BuildShape())[kShape]; auto num2 = input_args[2]->BuildValue()->cast()->ElementsNum(); - int64_t nummax = num > num1 ? num : (num1 > num2 ? num1 : num2); - int64_t axisout = 0; - int64_t temp = 0; - for (int64_t j = 0; j < (int64_t)input0_shape.size(); j++) { + size_t nummax = num > num1 ? num : (num1 > num2 ? num1 : num2); + size_t axisout = 0; + size_t temp = 0; + for (size_t j = 0; j < input0_shape.size(); j++) { if (input0_shape[j] == input1_shape[j] && input0_shape[j] != input2_shape[j]) { axisout = j; break; @@ -56,7 +56,7 @@ AbstractBasePtr WhereInfer(const abstract::AnalysisEnginePtr &, const PrimitiveP break; } temp += 1; - if (temp == (int64_t)input0_shape.size()) { + if (temp == input0_shape.size()) { return std::make_shared(input0_type, input0_shape); } }