!17838 code check for master

Merge pull request !17838 from liuyang/code_check_master
This commit is contained in:
i-robot 2021-06-07 15:06:32 +08:00 committed by Gitee
commit 6587b5e5a4
10 changed files with 22 additions and 21 deletions

View File

@ -35,7 +35,7 @@ void Broadcast::set_group(const std::string &group) {
}
int64_t Broadcast::get_root_rank() const {
auto value_ptr = this->GetAttr(kRootRank);
return GetValue<float>(value_ptr);
return GetValue<int64_t>(value_ptr);
}
std::string Broadcast::get_group() const {

View File

@ -57,7 +57,7 @@ AbstractBasePtr FakeQuantWithMinMaxVarsPerChannelInfer(const abstract::AnalysisE
auto max_type = input_args[2]->BuildType();
std::vector<std::string> type_name = {"x", "min", "max"};
std::vector<TypePtr> type = {x_type, min_type, max_type};
for (int64_t i = 0; i < 3; i++) {
for (size_t i = 0; i < 3; i++) {
(void)CheckAndConvertUtils::CheckTensorTypeValid(type_name[i], type[i], {kFloat16, kFloat32}, op_name);
}
auto tensor_type = x_type->cast<TensorTypePtr>();

View File

@ -74,11 +74,11 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
int64_t out_h = -1;
int64_t out_w = -1;
if (pad_mode == VALID) {
out_h = ceil((in_h - (kernel_h - 1)) / stride_h);
out_w = ceil((in_w - (kernel_w - 1)) / stride_w);
out_h = static_cast<int64_t>(ceil((in_h - (kernel_h - 1)) / static_cast<float>(stride_h)));
out_w = static_cast<int64_t>(ceil((in_w - (kernel_w - 1)) / static_cast<float>(stride_w)));
} else if (pad_mode == SAME) {
out_h = ceil(in_h / stride_h);
out_w = ceil(in_w / stride_w);
out_h = static_cast<int64_t>(ceil(in_h / static_cast<float>(stride_h)));
out_w = static_cast<int64_t>(ceil(in_w / static_cast<float>(stride_w)));
}
std::vector<int64_t> out_shape = {batch, channel, out_h, out_w};
if (format == NHWC) {

View File

@ -19,8 +19,8 @@
namespace mindspore {
namespace ops {
std::vector<int64_t> PoolGrad::_grad_check_vector(std::string arg_name, std::vector<int64_t> arg_val,
std::string op_name) {
std::vector<int64_t> PoolGrad::_grad_check_vector(const std::string &arg_name, std::vector<int64_t> arg_val,
const std::string &op_name) {
std::vector<int64_t> ret;
std::string error_msg = "For '" + op_name + "'" + " the '" + arg_name +
"' should be a vector of one or two or four "

View File

@ -46,8 +46,8 @@ class PoolGrad : public PrimitiveC {
std::vector<int64_t> get_strides() const;
PadMode get_pad_mode() const;
Format get_format() const;
std::vector<int64_t> _grad_check_vector(const std::string arg_name, const std::vector<int64_t> arg_val,
const std::string op_name);
std::vector<int64_t> _grad_check_vector(const std::string &arg_name, const std::vector<int64_t> arg_val,
const std::string &op_name);
};
} // namespace ops
} // namespace mindspore

View File

@ -30,7 +30,7 @@ void SmoothL1LossGrad::set_beta(const float beta) { this->AddAttr(kBeta, MakeVal
float SmoothL1LossGrad::get_beta() const {
auto value_ptr = this->GetAttr(kBeta);
return GetValue<int64_t>(value_ptr);
return GetValue<int32_t>(value_ptr);
}
AbstractBasePtr SmoothL1LossGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,

View File

@ -28,6 +28,7 @@ namespace mindspore {
namespace ops {
namespace {
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}

View File

@ -35,7 +35,7 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
auto block_shape_vector = GetValue<std::vector<int64_t>>(primitive->GetAttr(kBlockSize));
auto paddings = GetValue<std::vector<std::vector<int64_t>>>(primitive->GetAttr(kPaddings));
for (size_t i = 0; i < 2; i++) {
auto padded = output_shape[i + 2] + paddings[i][0] + paddings[i][1];
auto padded = LongToSize(output_shape[i + 2] + paddings[i][0] + paddings[i][1]);
CheckAndConvertUtils::CheckInteger("padded shape", padded % block_shape_vector.size(), kEqual, 0, prim_name);
output_shape[i + 2] = padded / block_shape_vector.size();
}
@ -58,8 +58,8 @@ void SpaceToBatch::set_paddings(const std::vector<std::vector<int64_t>> &padding
int64_t w = paddings[0].size();
std::vector<int64_t> temp_w = {2, 2};
CheckAndConvertUtils::Check(kPaddings, {h, w}, kEqual, "paddings_shape(2,2)", temp_w, this->name());
for (int64_t i = 0; i < h; i++) {
for (int64_t j = 0; j < w; j++) {
for (size_t i = 0; i < LongToSize(h); i++) {
for (size_t j = 0; j < LongToSize(w); j++) {
CheckAndConvertUtils::CheckInteger(kPadding, paddings[i][j], kGreaterEqual, 0, this->name());
}
}

View File

@ -33,9 +33,9 @@ AbstractBasePtr UnstackInfer(const abstract::AnalysisEnginePtr &, const Primitiv
if (axis < 0) {
axis = axis + dim;
}
auto output_num = x_shape[axis];
auto output_num = x_shape[LongToSize(axis)];
CheckAndConvertUtils::CheckInteger("output_num", output_num, kGreaterThan, 0, prim_name);
auto output_valid_check = x_shape[axis] - output_num;
auto output_valid_check = x_shape[LongToSize(axis)] - output_num;
CheckAndConvertUtils::CheckInteger("The dimension which to unstack divides output_num", output_valid_check, kEqual, 0,
prim_name);
std::vector<int64_t> infer_shape(x_shape.begin(), x_shape.begin() + axis);

View File

@ -39,10 +39,10 @@ AbstractBasePtr WhereInfer(const abstract::AnalysisEnginePtr &, const PrimitiveP
auto num1 = input_args[1]->BuildValue()->cast<tensor::TensorPtr>()->ElementsNum();
auto input2_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[2]->BuildShape())[kShape];
auto num2 = input_args[2]->BuildValue()->cast<tensor::TensorPtr>()->ElementsNum();
int64_t nummax = num > num1 ? num : (num1 > num2 ? num1 : num2);
int64_t axisout = 0;
int64_t temp = 0;
for (int64_t j = 0; j < (int64_t)input0_shape.size(); j++) {
size_t nummax = num > num1 ? num : (num1 > num2 ? num1 : num2);
size_t axisout = 0;
size_t temp = 0;
for (size_t j = 0; j < input0_shape.size(); j++) {
if (input0_shape[j] == input1_shape[j] && input0_shape[j] != input2_shape[j]) {
axisout = j;
break;
@ -56,7 +56,7 @@ AbstractBasePtr WhereInfer(const abstract::AnalysisEnginePtr &, const PrimitiveP
break;
}
temp += 1;
if (temp == (int64_t)input0_shape.size()) {
if (temp == input0_shape.size()) {
return std::make_shared<abstract::AbstractTensor>(input0_type, input0_shape);
}
}