!43271 fixing staic code warnings

Merge pull request !43271 from Wangsong95/9_30_zx_clean
This commit is contained in:
i-robot 2022-10-10 02:46:12 +00:00 committed by Gitee
commit 2ea5414a93
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
10 changed files with 52 additions and 48 deletions

View File

@ -173,10 +173,10 @@ int EmbeddingLookUpCpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the dimension of input must be 1-"
<< kEmbeddingLookUpInputParamsMaxDim << "D, but got " << input_params_shape.size() << "D.";
}
first_dim_size_ = input_params_shape[0];
first_dim_size_ = LongToSize(input_params_shape[0]);
outer_dim_size_ = 1;
for (size_t i = 1; i < input_params_shape.size(); ++i) {
outer_dim_size_ *= input_params_shape[i];
outer_dim_size_ *= LongToSize(input_params_shape[i]);
}
input_params_dtype_ = inputs[kIndex0]->GetDtype();
@ -194,11 +194,11 @@ int EmbeddingLookUpCpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
template <typename T, typename S, typename G>
bool EmbeddingLookUpCpuKernelMod::LaunchKernel(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs) {
T *input_params_addr = reinterpret_cast<T *>(inputs[0]->addr);
S *input_indices_addr = reinterpret_cast<S *>(inputs[1]->addr);
T *output_addr = reinterpret_cast<T *>(outputs[0]->addr);
T *input_params_addr = static_cast<T *>(inputs[0]->addr);
S *input_indices_addr = static_cast<S *>(inputs[1]->addr);
T *output_addr = static_cast<T *>(outputs[0]->addr);
if (inputs.size() == kEmbeddingLookupDynamicShapeInputsNum) {
G *input_offset_addr = reinterpret_cast<G *>(inputs[2]->addr);
G *input_offset_addr = static_cast<G *>(inputs[2]->addr);
memcpy(&offset_, input_offset_addr, sizeof(G));
}

View File

@ -31,9 +31,9 @@ bool HSigmoidCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &i
const std::vector<kernel::AddressPtr> &outputs) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kHSigmoidInputsNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kHSigmoidOutputsNum, kernel_name_);
T *x = reinterpret_cast<T *>(inputs[kIndex0]->addr);
T *x = static_cast<T *>(inputs[kIndex0]->addr);
MS_ERROR_IF_NULL_W_RET_VAL(x, false);
T *y = reinterpret_cast<T *>(outputs[kIndex0]->addr);
T *y = static_cast<T *>(outputs[kIndex0]->addr);
MS_ERROR_IF_NULL_W_RET_VAL(y, false);
auto zero = static_cast<T>(0);
auto one = static_cast<T>(1);

View File

@ -110,8 +110,8 @@ bool LinSpaceCpuKernelMod::LaunchVmapKernel(const std::vector<AddressPtr> &input
auto stops = reinterpret_cast<T *>(inputs[kIndex1]->addr);
const int64_t num = *reinterpret_cast<int64_t *>(inputs[kIndex2]->addr);
auto steps = reinterpret_cast<T *>(workspace[kIndex0]->addr);
auto output = reinterpret_cast<T *>(outputs[kIndex0]->addr);
auto steps = static_cast<T *>(workspace[kIndex0]->addr);
auto output = static_cast<T *>(outputs[kIndex0]->addr);
for (int64_t i = 0; i < batch_num_; ++i) {
steps[i] = ((stops[i] - starts[i]) / (num - 1));

View File

@ -249,7 +249,7 @@ void ScaleAndTranslateCpuKernelMod::ComputeSpansCore(const Kernel &kernel, const
spans->weights = std::make_shared<Eigen::Tensor<float, dim1>>(spans->span_size * output_size);
Eigen::TensorMap<Eigen::Tensor<int64_t, dim1>> starts_vec(spans->starts->data(), spans->starts->dimensions());
Eigen::TensorMap<Eigen::Tensor<float, dim1>> weights_vec(spans->weights->data(), spans->weights->dimensions());
weights_vec.setZero();
(void)weights_vec.setZero();
const float one_over_kernel_scale = 1.0f / kernel_scale;
int64_t max_span_size = 0;
std::vector<float> temp_weights;
@ -308,7 +308,7 @@ void ScaleAndTranslateGradCpuKernelMod::ComputeGradSpansCore(const Spans *spans,
int64_t input_index = starts_vec(output_index);
for (int64_t j = 0; j < spans->span_size; ++j, ++input_index) {
const float weight = weights_vec(output_index * spans->span_size + j);
if (weight != 0.0f && input_index < forward_input_size) {
if (weight != static_cast<float>(0.0f) && input_index < forward_input_size) {
grad_components[input_index].push_back(GradComponent{output_index, weight});
}
}
@ -331,7 +331,7 @@ void ScaleAndTranslateGradCpuKernelMod::ComputeGradSpansCore(const Spans *spans,
grad_spans->starts->dimensions());
Eigen::TensorMap<Eigen::Tensor<float, dim1>> grad_weights_vec(grad_spans->weights->data(),
grad_spans->weights->dimensions());
grad_weights_vec.setZero();
(void)grad_weights_vec.setZero();
auto shard_grad_input = [&grad_components, &grad_starts_vec, &grad_weights_vec, &grad_spans](int64_t start,
int64_t end) {
for (int64_t input_index = start; input_index < end; ++input_index) {
@ -449,12 +449,12 @@ bool ScaleAndTranslateCpuKernelMod::LaunchKernel(const std::vector<kernel::Addre
typename TTypes<float, dim4>::Tensor output_data(outputTensor.tensor<float, dim4>());
Spans col_spans;
ComputeSpans(sampling_kernel_type, output_width, input_width, col_scale, col_translation, antialias_, &col_spans,
kernel_name_);
(void)ComputeSpans(sampling_kernel_type, output_width, input_width, col_scale, col_translation, antialias_,
&col_spans, kernel_name_);
Spans row_spans;
ComputeSpans(sampling_kernel_type, output_height, input_height, row_scale, row_translation, antialias_, &row_spans,
kernel_name_);
(void)ComputeSpans(sampling_kernel_type, output_height, input_height, row_scale, row_translation, antialias_,
&row_spans, kernel_name_);
Eigen::Tensor<float, dim4> intermediate_tensor_middle(batch_size, output_height, input_width, channels);
Eigen::TensorMap<Eigen::Tensor<float, dim4>> intermediate_data(intermediate_tensor_middle.data(),
@ -501,11 +501,11 @@ bool ScaleAndTranslateGradCpuKernelMod::LaunchKernel(const std::vector<kernel::A
const int64_t forward_output_width = input_grad.dimension(2);
Spans col_spans;
ComputeGradSpans(sampling_kernel_type, forward_output_width, forward_input_width, col_scale, col_translation,
antialias_, &col_spans, kernel_name_);
(void)ComputeGradSpans(sampling_kernel_type, forward_output_width, forward_input_width, col_scale, col_translation,
antialias_, &col_spans, kernel_name_);
Spans row_spans;
ComputeGradSpans(sampling_kernel_type, forward_output_height, forward_input_height, row_scale, row_translation,
antialias_, &row_spans, kernel_name_);
(void)ComputeGradSpans(sampling_kernel_type, forward_output_height, forward_input_height, row_scale, row_translation,
antialias_, &row_spans, kernel_name_);
Eigen::Tensor<float, dim4> intermediate_tensor_middle(batch_size, forward_input_height, forward_output_width,
channels);

View File

@ -17,7 +17,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_UTILS_SAMPLING_KERNELS_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_UTILS_SAMPLING_KERNELS_H_
#include <stdio.h>
#include <cmath>
#include <string>
@ -76,7 +75,7 @@ struct ComputerBoxKernel {
input = std::abs(input);
if (input < 0.5f) {
result = kRValue1;
} else if (input == 0.5f) {
} else if (input == static_cast<float>(0.5f)) {
result = 0.5f;
} else {
result = kRValue0;

View File

@ -32,29 +32,29 @@ abstract::ShapePtr CompareAndBitpackInferShape(const PrimitivePtr &primitive,
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex0]->BuildShape())[kShape];
auto threshold_shape =
CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape())[kShape];
auto x_rank = SizeToLong(x_shape.size());
auto x_rank = x_shape.size();
// threshold must be a scalar tensor
const size_t kShapeSize_ = 0;
const size_t divisible_num = 8;
auto threshold_shape_size = threshold_shape.size();
(void)CheckAndConvertUtils::CheckInteger("threshold's rank'", threshold_shape_size, kEqual, kShapeSize_,
(void)CheckAndConvertUtils::CheckInteger("threshold's rank'", SizeToLong(threshold_shape_size), kEqual, kShapeSize_,
primitive->name());
// Input should be at least a vector
(void)CheckAndConvertUtils::CheckInteger("x's rank'", x_rank, kNotEqual, kShapeSize_, primitive->name());
(void)CheckAndConvertUtils::CheckInteger("x's rank'", SizeToLong(x_rank), kNotEqual, kShapeSize_, primitive->name());
// check the innermost dimension of `x`'s shape is disvisible by 8.
if (x_shape[x_rank - 1] != -1) {
(void)CheckAndConvertUtils::Check("x innermost dimension % 8", x_shape[x_rank - 1] % divisible_num, kEqual, 0,
primitive->name());
(void)CheckAndConvertUtils::Check("x innermost dimension % 8", x_shape[x_rank - 1] % SizeToLong(divisible_num),
kEqual, 0, primitive->name());
}
std::vector<int64_t> out_shape;
for (int dim = 0; dim < x_rank - 1; dim = dim + 1) {
(void)out_shape.emplace_back(x_shape[dim]);
for (int dim = 0; dim < SizeToLong(x_rank - 1); dim = dim + 1) {
(void)out_shape.emplace_back(x_shape[IntToSize(dim)]);
}
(void)out_shape.emplace_back(x_shape[x_rank - 1] / divisible_num);
(void)out_shape.emplace_back(x_shape[x_rank - 1] / SizeToLong(divisible_num));
auto return_shape = out_shape;
return std::make_shared<abstract::Shape>(return_shape);
}

View File

@ -67,12 +67,13 @@ class EmbeddingLookupInfer : public abstract::OpInferBase {
MS_EXCEPTION_IF_NULL(params_shape_ptr);
auto params_shape = params_shape_ptr->shape();
constexpr int64_t kEmbeddingLookupInputParamsMaxDim = 2;
CheckAndConvertUtils::CheckInRange<int64_t>("dimension of params", SizeToLong(params_shape.size()), kIncludeBoth,
{1, kEmbeddingLookupInputParamsMaxDim}, op_name);
(void)CheckAndConvertUtils::CheckInRange<int64_t>("dimension of params", SizeToLong(params_shape.size()),
kIncludeBoth, {1, kEmbeddingLookupInputParamsMaxDim}, op_name);
auto indices_shape_ptr = CheckAndConvertUtils::GetTensorInputShape(op_name, input_args, kInputIndex1);
MS_EXCEPTION_IF_NULL(indices_shape_ptr);
auto indices_shape = indices_shape_ptr->shape();
CheckAndConvertUtils::CheckValue<int64_t>("dimension of indices ", indices_shape.size(), kGreaterThan, 0, op_name);
(void)CheckAndConvertUtils::CheckValue<int64_t>("dimension of indices ", SizeToLong(indices_shape.size()),
kGreaterThan, 0, op_name);
ShapeVector out_shape;
if (!params_shape_ptr->IsDimUnknown() && !indices_shape_ptr->IsDimUnknown()) {

View File

@ -41,16 +41,18 @@ abstract::ShapePtr ScaleAndTranslateGradInferShape(const PrimitivePtr &primitive
const int64_t kShapeSize2 = 4;
const int64_t kElementsNumber = 2;
// check grads rank'4
(void)CheckAndConvertUtils::CheckInteger("grads's rank'", grads_shape.size(), kEqual, kShapeSize2, prim_name);
// check original_image's rank 4
(void)CheckAndConvertUtils::CheckInteger("original_image's rank'", original_image_shape.size(), kEqual, kShapeSize2,
(void)CheckAndConvertUtils::CheckInteger("grads's rank'", SizeToLong(grads_shape.size()), kEqual, kShapeSize2,
prim_name);
// check original_image's rank 4
(void)CheckAndConvertUtils::CheckInteger("original_image's rank'", SizeToLong(original_image_shape.size()), kEqual,
kShapeSize2, prim_name);
// check scale' rank must be 1, must have 2 elements
(void)CheckAndConvertUtils::CheckInteger("scale's rank'", scale_shape.size(), kEqual, kShapeSize1, prim_name);
(void)CheckAndConvertUtils::CheckInteger("scale's rank'", SizeToLong(scale_shape.size()), kEqual, kShapeSize1,
prim_name);
(void)CheckAndConvertUtils::CheckInteger("scale's elements'", scale_shape[0], kEqual, kElementsNumber, prim_name);
// check translation' rank must be 1, must have 2 elements
(void)CheckAndConvertUtils::CheckInteger("translation's rank'", translation_shape.size(), kEqual, kShapeSize1,
prim_name);
(void)CheckAndConvertUtils::CheckInteger("translation's rank'", SizeToLong(translation_shape.size()), kEqual,
kShapeSize1, prim_name);
(void)CheckAndConvertUtils::CheckInteger("translation's elements'", translation_shape[0], kEqual, kElementsNumber,
prim_name);
// infer output shape

View File

@ -30,8 +30,8 @@ namespace {
#define IsNoneOrAnyValue(value_ptr) ((value_ptr->isa<None>()) || (value_ptr->isa<AnyValue>()))
TypePtr LinSpaceInferType(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
auto prim_name = primitive->name();
CheckAndConvertUtils::CheckArgs<abstract::AbstractTensor>(prim_name, input_args, kInputIndex0);
CheckAndConvertUtils::CheckArgs<abstract::AbstractTensor>(prim_name, input_args, kInputIndex1);
(void)CheckAndConvertUtils::CheckArgs<abstract::AbstractTensor>(prim_name, input_args, kInputIndex0);
(void)CheckAndConvertUtils::CheckArgs<abstract::AbstractTensor>(prim_name, input_args, kInputIndex1);
auto start_dtype = input_args[kInputIndex0]->BuildType();
auto stop_dtype = input_args[kInputIndex1]->BuildType();

View File

@ -44,16 +44,18 @@ abstract::ShapePtr ScaleAndTranslateInferShape(const PrimitivePtr &primitive,
auto scale_shape_size = scale_shape.size();
auto translation_shape_size = translation_shape.size();
// check images' rank must be 4
(void)CheckAndConvertUtils::CheckInteger("images's rank'", images_shape_size, kEqual, kImagesShapeSize, prim_name);
(void)CheckAndConvertUtils::CheckInteger("images's rank'", SizeToLong(images_shape_size), kEqual, kImagesShapeSize,
prim_name);
// check size' rank must be 1, must have 2 elements
(void)CheckAndConvertUtils::CheckInteger("size's rank'", size_shape_size, kEqual, kShapeSize, prim_name);
(void)CheckAndConvertUtils::CheckInteger("size's rank'", SizeToLong(size_shape_size), kEqual, kShapeSize, prim_name);
(void)CheckAndConvertUtils::CheckInteger("size's elements'", size_shape[0], kEqual, kElementsNumber, prim_name);
// check scale' rank must be 1, must have 2 elements
(void)CheckAndConvertUtils::CheckInteger("scale's rank'", scale_shape_size, kEqual, kShapeSize, prim_name);
(void)CheckAndConvertUtils::CheckInteger("scale's rank'", SizeToLong(scale_shape_size), kEqual, kShapeSize,
prim_name);
(void)CheckAndConvertUtils::CheckInteger("scale's elements'", scale_shape[0], kEqual, kElementsNumber, prim_name);
// check translation' rank must be 1, must have 2 elements
(void)CheckAndConvertUtils::CheckInteger("translation's rank'", translation_shape_size, kEqual, kShapeSize,
prim_name);
(void)CheckAndConvertUtils::CheckInteger("translation's rank'", SizeToLong(translation_shape_size), kEqual,
kShapeSize, prim_name);
(void)CheckAndConvertUtils::CheckInteger("translation's elements'", translation_shape[0], kEqual, kElementsNumber,
prim_name);
// check scale greater than zero
@ -72,7 +74,7 @@ abstract::ShapePtr ScaleAndTranslateInferShape(const PrimitivePtr &primitive,
auto scale_tensor = scale_v->cast<tensor::TensorPtr>();
MS_EXCEPTION_IF_NULL(scale_tensor);
size_t data_size = scale_tensor->DataSize();
auto data_c = reinterpret_cast<float *>(scale_tensor->data_c());
auto data_c = static_cast<float *>(scale_tensor->data_c());
MS_EXCEPTION_IF_NULL(data_c);
for (size_t i = 0; i < data_size; i++) {
scale_value.push_back(static_cast<float>(*data_c));