!23483 pclint warning clean

Merge pull request !23483 from liutongtong9/pclint_ma
This commit is contained in:
i-robot 2021-09-22 02:19:33 +00:00 committed by Gitee
commit 6b09551408
14 changed files with 21 additions and 31 deletions

View File

@ -44,7 +44,7 @@ void BatchNorm::set_format(const Format &format) {
}
void BatchNorm::set_momentum(const float momentun) {
CheckAndConvertUtils::CheckInRange<int64_t>(kMomentum, SizeToLong(momentun), kIncludeBoth, {0.0, 1.0}, this->name());
CheckAndConvertUtils::CheckInRange<float>(kMomentum, momentun, kIncludeBoth, {0.0, 1.0}, this->name());
(void)this->AddAttr(kMomentum, MakeValue(momentun));
}

View File

@ -49,7 +49,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
MS_EXCEPTION(ValueError) << prim_name << " input_x dimension 0 " << out_shape[0]
<< " should be divisible by block_shape_prod " << block_shape_prod;
}
out_shape[0] = int64_t(floor(out_shape[0] / block_shape_prod));
out_shape[0] = int64_t(floor(out_shape[0] / static_cast<float>(block_shape_prod)));
return std::make_shared<abstract::Shape>(out_shape);
}

View File

@ -40,9 +40,9 @@ int64_t CheckInputsAndGetShape(const AbstractBasePtr &input_arg, const string &p
if (max_shape.empty()) {
MS_LOG(EXCEPTION) << prim_name << " input shape is dynamic, but max shape is empty.";
}
return max_shape[0];
return static_cast<size_t>(max_shape[0]);
}
return input_shape[0];
return static_cast<size_t>(input_shape[0]);
} else if (input_arg->isa<abstract::AbstractTuple>()) {
auto x_shape = dyn_cast<abstract::AbstractTuple>(input_arg);
auto x_shape_data = x_shape->elements();

View File

@ -26,7 +26,7 @@
namespace mindspore {
namespace ops {
namespace {
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
abstract::ShapePtr InferShape(const std::vector<AbstractBasePtr> &input_args) {
auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->GetShapeTrack())[kShape];
return std::make_shared<abstract::Shape>(in_shape);
}
@ -43,8 +43,7 @@ AbstractBasePtr FloorInfer(const abstract::AnalysisEnginePtr &, const PrimitiveP
MS_EXCEPTION_IF_NULL(primitive);
const int64_t input_num = 1;
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, primitive->name());
return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args),
InferShape(primitive, input_args));
return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args), InferShape(input_args));
}
REGISTER_PRIMITIVE_C(kNameFloor, Floor);
} // namespace ops

View File

@ -96,7 +96,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
return std::make_shared<abstract::Shape>(out_shape);
}
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
TypePtr InferType(const std::vector<AbstractBasePtr> &input_args) {
for (auto item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
@ -106,8 +106,7 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &
AbstractBasePtr AvgPoolFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args),
InferShape(primitive, input_args));
return std::make_shared<abstract::AbstractTensor>(InferType(input_args), InferShape(primitive, input_args));
}
REGISTER_PRIMITIVE_C(kNameAvgPoolFusion, AvgPoolFusion);
} // namespace ops

View File

@ -56,8 +56,7 @@ bool IsDynamic(const std::vector<ShapeVector> &shape) {
return false;
}
abstract::AbstractBasePtr GetnextInferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
abstract::AbstractBasePtr GetnextInferShape(const PrimitivePtr &primitive) {
MS_EXCEPTION_IF_NULL(primitive);
auto types = GetValue<std::vector<TypePtr>>(primitive->GetAttr("types"));
ValuePtr shape_attr = primitive->GetAttr("shapes");
@ -89,7 +88,7 @@ abstract::AbstractBasePtr GetnextInferShape(const PrimitivePtr &primitive,
AbstractBasePtr GetNextInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
return GetnextInferShape(primitive, input_args);
return GetnextInferShape(primitive);
}
REGISTER_PRIMITIVE_EVAL_IMPL(GetNext, prim::kPrimGetNext, GetNextInfer, nullptr, true);
} // namespace ops

View File

@ -19,8 +19,7 @@
namespace mindspore {
namespace ops {
AbstractBasePtr MaxPoolGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
AbstractBasePtr MaxPoolGradInfer(const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(input_args[0]->BuildValue());
auto x1_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
auto tensor_type = input_args[0]->BuildType()->cast<TensorTypePtr>();

View File

@ -35,8 +35,7 @@ class MS_CORE_API MaxPoolGrad : public PoolGrad {
MS_DECLARE_PARENT(MaxPoolGrad, PoolGrad);
};
AbstractBasePtr MaxPoolGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args);
AbstractBasePtr MaxPoolGradInfer(const std::vector<AbstractBasePtr> &input_args);
using PrimMaxPoolGradPtr = std::shared_ptr<MaxPoolGrad>;
} // namespace ops
} // namespace mindspore

View File

@ -111,8 +111,8 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
out_w = static_cast<int64_t>(ceil((in_w - (kernel_w - 1)) + static_cast<float>(stride_w) - 1) /
static_cast<float>(stride_w));
} else if (pad_mode == SAME) {
out_h = static_cast<int64_t>(ceil(in_h / static_cast<int64_t>(stride_h)));
out_w = static_cast<int64_t>(ceil(in_w / static_cast<int64_t>(stride_w)));
out_h = static_cast<int64_t>(ceil(in_h / static_cast<float>(stride_h)));
out_w = static_cast<int64_t>(ceil(in_w / static_cast<float>(stride_w)));
}
std::vector<int64_t> out_shape = {batch, channel, out_h, out_w};
if (format == NHWC) {

View File

@ -29,8 +29,7 @@ int64_t NonMaxSuppression::get_center_point_box() const {
}
void NonMaxSuppression::Init(const int64_t center_point_box) { this->set_center_point_box(center_point_box); }
AbstractBasePtr NonMaxSuppressionInfer(const abstract::AnalysisEnginePtr &,
const std::vector<AbstractBasePtr> &input_args) {
AbstractBasePtr NonMaxSuppressionInfer(const abstract::AnalysisEnginePtr &) {
MS_LOG(INFO) << "NonMaxSuppression infer shape in runtime.";
return std::make_shared<abstract::AbstractTensor>(kInt32, std::vector<int64_t>{});
}

View File

@ -40,8 +40,7 @@ class MS_CORE_API NonMaxSuppression : public PrimitiveC {
void set_center_point_box(const int64_t center_point_box);
int64_t get_center_point_box() const;
};
AbstractBasePtr NonMaxSuppressionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args);
AbstractBasePtr NonMaxSuppressionInfer(const abstract::AnalysisEnginePtr &);
using PrimNonMaxSuppressionPtr = std::shared_ptr<NonMaxSuppression>;
} // namespace ops
} // namespace mindspore

View File

@ -22,7 +22,7 @@
namespace mindspore {
namespace ops {
namespace {
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
abstract::ShapePtr InferShape(const std::vector<AbstractBasePtr> &input_args) {
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
return std::make_shared<abstract::Shape>(x_shape);
}
@ -36,8 +36,7 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &
AbstractBasePtr RoundInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args),
InferShape(primitive, input_args)->shape());
return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args), InferShape(input_args)->shape());
}
REGISTER_PRIMITIVE_C(kNameRound, Round);
} // namespace ops

View File

@ -44,7 +44,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
MS_EXCEPTION(ValueError) << prim_name << " padded[" << i << "]" << padded << "should be divisible by block_shape["
<< i << "]" << block_shape[i];
}
out_shape[i + offset] = int64_t(floor(padded / block_shape[i]));
out_shape[i + offset] = int64_t(floor(padded / static_cast<float>(block_shape[i])));
block_shape_prod = block_shape_prod * block_shape[i];
}
out_shape[0] = out_shape[0] * block_shape_prod;

View File

@ -40,9 +40,7 @@ abstract::ShapePtr TensorListFromTensorInferShape(const PrimitivePtr &primitive,
return std::make_shared<abstract::Shape>(infer_shape);
}
TypePtr TensorListFromTensorInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
return kTensorType;
}
TypePtr TensorListFromTensorInferType() { return kTensorType; }
} // namespace
void TensorListFromTensor::Init(const int64_t element_dtype, const int64_t shape_type) {
@ -70,7 +68,7 @@ void TensorListFromTensor::set_shape_type(const int64_t shape_type) {
AbstractBasePtr TensorListFromTensorInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
return std::make_shared<abstract::AbstractTensor>(TensorListFromTensorInferType(primitive, input_args),
return std::make_shared<abstract::AbstractTensor>(TensorListFromTensorInferType(),
TensorListFromTensorInferShape(primitive, input_args)->shape());
}
REGISTER_PRIMITIVE_C(kNameTensorListFromTensor, TensorListFromTensor);