diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/maxpool_grad_grad_cpu_kernel.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/maxpool_grad_grad_cpu_kernel.cc index c949998c2d4..36a5fb03d46 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/maxpool_grad_grad_cpu_kernel.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/maxpool_grad_grad_cpu_kernel.cc @@ -146,7 +146,7 @@ int MaxPoolGradGradCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, param_->output_channel_ = LongToInt(out_shapes_[kDim1]); param_->output_h_ = LongToInt(out_shapes_[height_index_]); param_->output_w_ = LongToInt(out_shapes_[width_index_]); - output_elements_ = std::accumulate(out_shapes_.begin(), out_shapes_.end(), 1, std::multiplies()); + output_elements_ = LongToSize(std::accumulate(out_shapes_.begin(), out_shapes_.end(), 1, std::multiplies())); if (dim_ == kMaxPool3DGradGradDim) { reinterpret_cast(param_)->input_d_ = LongToInt(in_shapes_[depth_index_]); diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/base/gather_d_base.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/base/gather_d_base.c index d7424aad751..da2454ec22c 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/base/gather_d_base.c +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/base/gather_d_base.c @@ -82,14 +82,14 @@ int InitCalVec(size_t *in_strides, size_t *out_strides, size_t *pos, const size_ #define COPY_TASK_IMPL(type0, type1) \ int CopyTask_Input_##type0##_Index_##type1( \ - type0 *output, const type0 *input, const type1 *index, size_t cur_dim, size_t *pos, const int dim, \ + type0 *output, const type0 *input, const type1 *index, size_t cur_dim, size_t *pos, const size_t dim, \ const size_t *output_shape, const size_t output_shape_size, const size_t *in_strides, const size_t *out_strides) { \ if (pos == NULL || out_strides == NULL || in_strides == NULL) { \ return NNACL_NULL_PTR; \ } \ for (size_t i = 0; i < output_shape[cur_dim]; ++i) { \ pos[cur_dim] = i; \ - if (cur_dim == (int)output_shape_size - 1) { \ + if (cur_dim == output_shape_size - 1) { \ size_t input_offset = 0; \ size_t out_offset = 0; \ for (size_t j = 0; j < output_shape_size; ++j) { \ diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/base/gather_d_base.h b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/base/gather_d_base.h index 6e1d125c881..28b9490b97e 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/base/gather_d_base.h +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/base/gather_d_base.h @@ -28,10 +28,10 @@ extern "C" { GatherD_Input_##type0##_Index_##type1(output, input, index, input_shape, input_shape_size, output_shape, \ output_shape_size, dim) -#define GATHER_D_IMPL_DECLARATION(type0, type1) \ - int GatherD_Input_##type0##_Index_##type1(type0 *output, const type0 *input, type1 *index, \ - const size_t *input_shape, const size_t input_shape_size, \ - const size_t *output_shape, const size_t output_shape_size, const int dim) +#define GATHER_D_IMPL_DECLARATION(type0, type1) \ + int GatherD_Input_##type0##_Index_##type1( \ + type0 *output, const type0 *input, type1 *index, const size_t *input_shape, const size_t input_shape_size, \ + const size_t *output_shape, const size_t output_shape_size, const size_t dim) GATHER_D_IMPL_DECLARATION(bool, int32_t); GATHER_D_IMPL_DECLARATION(bool, int64_t); diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/base/zeroslike_base.h b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/base/zeroslike_base.h index 9ba0d904a42..b9edb75e1bb 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/base/zeroslike_base.h +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/base/zeroslike_base.h @@ -22,7 +22,7 @@ extern "C" { #endif -static inline void ApproximateZerosLike(void *output, int data_size) { +static inline void ApproximateZerosLike(void *output, size_t data_size) { (void)memset(output, 0, data_size); return; } diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/invert_permutation_fp32.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/invert_permutation_fp32.c index 39f435d7354..32e2b87ce52 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/invert_permutation_fp32.c +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/invert_permutation_fp32.c @@ -18,11 +18,11 @@ #include "nnacl/errorcode.h" #include "nnacl/op_base.h" -int InvertPermutation(const int *input, int *output, int num) { +int InvertPermutation(const int *input, int *output, size_t num) { NNACL_CHECK_NULL_RETURN_ERR(input); NNACL_CHECK_NULL_RETURN_ERR(output); - for (int i = 0; i < num; i++) { - int index = input[i]; + for (size_t i = 0; i < num; i++) { + size_t index = (size_t)input[i]; if (index >= num) { return NNACL_ERR; } diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/invert_permutation_fp32.h b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/invert_permutation_fp32.h index 0b46009f50d..b0799b3f10d 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/invert_permutation_fp32.h +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/invert_permutation_fp32.h @@ -16,10 +16,12 @@ #ifndef MINDSPORE_NNACL_INVERT_PERMUTATION_FP32_H_ #define MINDSPORE_NNACL_INVERT_PERMUTATION_FP32_H_ +#include + #ifdef __cplusplus extern "C" { #endif -int InvertPermutation(const int *input, int *output, int num); +int InvertPermutation(const int *input, int *output, size_t num); #ifdef __cplusplus } #endif diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/winograd_transform.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/winograd_transform.c index 0f196093737..9401ddddba5 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/winograd_transform.c +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/winograd_transform.c @@ -155,7 +155,7 @@ void WinogradInputTransformOptStep(const float *input_data, float *trans_input, // input transform const int block_tile = C12NUM; int dst_ic8_offset = dst_plane_offset + ic * block_tile * input_unit * input_unit * channel_tile; - size_t dst_step = input_unit * block_tile * channel_tile; + size_t dst_step = (size_t)(input_unit * block_tile * channel_tile); float *trans_input_ptr = trans_input + dst_ic8_offset; func(tmp_data, trans_input_ptr, channel_tile, dst_step, block_tile * channel_tile); } diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/exp.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/exp.c index c9fc8204d62..3e748d404dc 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/exp.c +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/kernel/exp.c @@ -69,7 +69,6 @@ int exp_do_compute(void *param, int task_id, float lhs_scale, float rhs_scale) { void *output_data = exp_stru->base.out[0].data_; NNACL_CHECK_NULL_RETURN_ERR(output_data); int ret = exp_stru->base.funcs->ExpFusion(input_data, output_data, exp_param, task_id); - return ret; } diff --git a/mindspore/lite/src/litert/cxx_api/tensor/tensor_impl.h b/mindspore/lite/src/litert/cxx_api/tensor/tensor_impl.h index 254355359fb..7842f5a61d8 100644 --- a/mindspore/lite/src/litert/cxx_api/tensor/tensor_impl.h +++ b/mindspore/lite/src/litert/cxx_api/tensor/tensor_impl.h @@ -163,7 +163,7 @@ class LiteTensorImpl : public MutableTensorImpl { return lite_tensor_->format(); } - void SetFormat(mindspore::Format format) override { + void SetFormat(const mindspore::Format format) override { if (lite_tensor_ == nullptr) { MS_LOG(ERROR) << "Invalid tensor."; return; diff --git a/mindspore/lite/src/litert/kernel/cpu/base/group_convolution_creator.cc b/mindspore/lite/src/litert/kernel/cpu/base/group_convolution_creator.cc index d93fd940a36..f654a73bcfb 100644 --- a/mindspore/lite/src/litert/kernel/cpu/base/group_convolution_creator.cc +++ b/mindspore/lite/src/litert/kernel/cpu/base/group_convolution_creator.cc @@ -87,7 +87,7 @@ lite::Tensor *CreateConstTensor(const lite::Tensor *tensor, const std::vectordata(), reinterpret_cast(new_tensor_data), new_tensor->Size()); + (void)memcpy(new_tensor->data(), reinterpret_cast(new_tensor_data), new_tensor->Size()); return new_tensor; } diff --git a/mindspore/lite/src/litert/kernel/cpu/base/reduce_base.cc b/mindspore/lite/src/litert/kernel/cpu/base/reduce_base.cc index 0dbfe426363..fad32bb5f39 100644 --- a/mindspore/lite/src/litert/kernel/cpu/base/reduce_base.cc +++ b/mindspore/lite/src/litert/kernel/cpu/base/reduce_base.cc @@ -107,7 +107,7 @@ int ReduceBaseCPUKernel::Prepare() { MS_CHECK_TRUE_MSG(axes_tensor != nullptr, RET_ERROR, "axes-tensor is a nullptr."); MS_CHECK_FALSE_MSG((axes_tensor->data_type() != kNumberTypeInt && axes_tensor->data_type() != kNumberTypeInt32), RET_ERROR, "The data type of axes tensor should be int32"); - num_axes_ = axes_tensor->ElementsNum(); + num_axes_ = static_cast(axes_tensor->ElementsNum()); if (axes_tensor->data() != nullptr && (num_axes_ <= 0 || num_axes_ > MAX_SHAPE_SIZE)) { MS_LOG(ERROR) << "input axes invalid."; return RET_ERROR; diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32/affine_fp32.cc b/mindspore/lite/src/litert/kernel/cpu/fp32/affine_fp32.cc index 24ac898b1b6..ad67b18abeb 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32/affine_fp32.cc +++ b/mindspore/lite/src/litert/kernel/cpu/fp32/affine_fp32.cc @@ -348,7 +348,7 @@ OpParameter *AffineFp32CPUKernel::MatmulParameterCreate() { matmul_param->a_transpose_ = origin_matmul->a_transpose_; matmul_param->has_bias_ = origin_matmul->has_bias_; matmul_param->act_type_ = origin_matmul->act_type_; - matmul_param->op_parameter_.thread_num_ = this->context()->thread_num_; + matmul_param->op_parameter_.thread_num_ = op_parameter_->thread_num_; return reinterpret_cast(matmul_param); } diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32/batchnorm_fp32.h b/mindspore/lite/src/litert/kernel/cpu/fp32/batchnorm_fp32.h index 6015c2e1fb7..56b0b1d7126 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32/batchnorm_fp32.h +++ b/mindspore/lite/src/litert/kernel/cpu/fp32/batchnorm_fp32.h @@ -38,10 +38,7 @@ class BatchnormCPUKernel : public LiteKernel { int SetupVirtualBatch(int virtual_batch_multiplier, int param) override; virtual int InitConstTensor(); virtual int DoExecute(int task_id); - virtual int Batchnorm2Scale(const void *scale_data, const void *bias_data, const void *mean_data, - const void *var_data, float eps, int kernel_num) { - return RET_OK; - } + virtual int Batchnorm2Scale(const void *, const void *, const void *, const void *, float, int) { return RET_OK; } virtual int set_momentum(float momentum); virtual float get_momentum(); virtual int RestoreDefaultMomentum(); diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32/convolution_delegate_fp32.cc b/mindspore/lite/src/litert/kernel/cpu/fp32/convolution_delegate_fp32.cc index 536adc41e75..b374ab29663 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32/convolution_delegate_fp32.cc +++ b/mindspore/lite/src/litert/kernel/cpu/fp32/convolution_delegate_fp32.cc @@ -312,9 +312,8 @@ kernel::LiteKernel *ConvolutionDelegateCPUKernel::CpuConvFp32KernelSelect() { op_parameter_ = nullptr; return nullptr; } + kernel->set_name("act_" + name_); } - - kernel->set_name("act_" + name_); return kernel; } diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32/fill_fp32.cc b/mindspore/lite/src/litert/kernel/cpu/fp32/fill_fp32.cc index f68332acba0..95c053070b2 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32/fill_fp32.cc +++ b/mindspore/lite/src/litert/kernel/cpu/fp32/fill_fp32.cc @@ -43,7 +43,7 @@ int FillCPUKernel::ReSize() { } auto output = out_tensors_.front(); CHECK_NULL_RETURN(output); - data_size_ = output->ElementsNum(); + data_size_ = static_cast(output->ElementsNum()); thread_sz_count_ = MSMIN(thread_num_, data_size_); if (thread_sz_count_ != 0) { thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_); diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.cc b/mindspore/lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.cc index 5443519312b..62347c45bfc 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.cc +++ b/mindspore/lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; namespace mindspore::kernel { -int GroupConvolutionFp32CPUKernel::Separate(int task_id) { +int GroupConvolutionFp32CPUKernel::Separate(const int &task_id) const { auto plane_step = UP_DIV(in_plane_, in_thread_num_); MS_CHECK_INT_MUL_NOT_OVERFLOW(plane_step, task_id, RET_ERROR); auto begin_plane = plane_step * task_id; @@ -31,7 +31,7 @@ int GroupConvolutionFp32CPUKernel::Separate(int task_id) { auto src_ptr = sub_in_src_ + begin_plane * ori_in_channel_; auto dst_ptr = sub_in_dst_ + begin_plane * sub_in_channel_; for (int i = begin_plane; i < end_plane; ++i) { - memcpy(dst_ptr, src_ptr, sub_in_channel_ * sizeof(float)); + (void)memcpy(dst_ptr, src_ptr, sub_in_channel_ * sizeof(float)); src_ptr += ori_in_channel_; dst_ptr += sub_in_channel_; } @@ -63,7 +63,7 @@ int GroupConvolutionFp32CPUKernel::SeparateInput(int group_id) { return RET_OK; } -int GroupConvolutionFp32CPUKernel::Concat(int task_id) { +int GroupConvolutionFp32CPUKernel::Concat(const int &task_id) const { auto plane_step = UP_DIV(out_plane_, out_thread_num_); MS_CHECK_INT_MUL_NOT_OVERFLOW(plane_step, task_id, RET_ERROR); auto begin_plane = plane_step * task_id; @@ -73,7 +73,7 @@ int GroupConvolutionFp32CPUKernel::Concat(int task_id) { auto src_ptr = sub_out_src_ + begin_plane * sub_out_channel_; auto dst_ptr = sub_out_dst_ + begin_plane * ori_out_channel_; for (int i = begin_plane; i < end_plane; ++i) { - memcpy(dst_ptr, src_ptr, sub_out_channel_ * sizeof(float)); + (void)memcpy(dst_ptr, src_ptr, sub_out_channel_ * sizeof(float)); src_ptr += sub_out_channel_; dst_ptr += ori_out_channel_; } diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.h b/mindspore/lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.h index e41a2971a72..8c5c1fac682 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.h +++ b/mindspore/lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.h @@ -37,8 +37,8 @@ class GroupConvolutionFp32CPUKernel : public GroupConvolutionBaseCPUKernel { int SeparateInput(int group_id) override; int PostConcat(int group_id) override; - int Separate(int task_id); - int Concat(int task_id); + int Separate(const int &task_id) const; + int Concat(const int &task_id) const; private: float *sub_in_src_ = nullptr; diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32/groupnorm_fp32.cc b/mindspore/lite/src/litert/kernel/cpu/fp32/groupnorm_fp32.cc index cb7e68c4d38..5ae319fe1eb 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32/groupnorm_fp32.cc +++ b/mindspore/lite/src/litert/kernel/cpu/fp32/groupnorm_fp32.cc @@ -36,9 +36,9 @@ GroupnormCPUKernel::GroupnormCPUKernel(OpParameter *parameter, const std::vector } for (size_t i = 0; i < in_tensors_.size(); i++) { - Tensor2TensorC(in_tensors_.at(i), &(in_[i])); + (void)Tensor2TensorC(in_tensors_.at(i), &(in_[i])); } - Tensor2TensorC(out_tensors_.at(0), &(out_[0])); + (void)Tensor2TensorC(out_tensors_.at(0), &(out_[0])); } GroupnormCPUKernel::~GroupnormCPUKernel() { diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32/instance_norm_fp32.h b/mindspore/lite/src/litert/kernel/cpu/fp32/instance_norm_fp32.h index f5d65ccafcb..56926cec334 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32/instance_norm_fp32.h +++ b/mindspore/lite/src/litert/kernel/cpu/fp32/instance_norm_fp32.h @@ -44,7 +44,6 @@ class InstanceNormCPUKernel : public LiteKernel { } } - private: InstanceNormParameter *param_ = nullptr; float *src_data_ = nullptr; float *tmp_src_data_ = nullptr; diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32/invert_permutation_fp32.cc b/mindspore/lite/src/litert/kernel/cpu/fp32/invert_permutation_fp32.cc index 43b07b6571d..71829467b89 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32/invert_permutation_fp32.cc +++ b/mindspore/lite/src/litert/kernel/cpu/fp32/invert_permutation_fp32.cc @@ -59,7 +59,7 @@ int InvertPermutationCPUKernel::Run() { auto output_ptr = reinterpret_cast(out_tensor->data()); CHECK_NULL_RETURN(input_ptr); CHECK_NULL_RETURN(output_ptr); - auto ret = InvertPermutation(input_ptr, output_ptr, in_tensors_[0]->ElementsNum()); + auto ret = InvertPermutation(input_ptr, output_ptr, static_cast(in_tensors_[0]->ElementsNum())); if (ret != NNACL_OK) { MS_LOG(ERROR) << "null pointer dereferencing."; return RET_ERROR; diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32/layer_norm_fp32.cc b/mindspore/lite/src/litert/kernel/cpu/fp32/layer_norm_fp32.cc index b5e3364591f..df3dfb0f7fb 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32/layer_norm_fp32.cc +++ b/mindspore/lite/src/litert/kernel/cpu/fp32/layer_norm_fp32.cc @@ -40,10 +40,11 @@ int LayerNormCPUKernel::ReSize() { auto input = in_tensors_.front(); CHECK_NULL_RETURN(input); auto shape = input->shape(); - param_->begin_norm_axis_ = - param_->begin_norm_axis_ >= 0 ? param_->begin_norm_axis_ : param_->begin_norm_axis_ + shape.size(); - param_->begin_params_axis_ = - param_->begin_params_axis_ >= 0 ? param_->begin_params_axis_ : param_->begin_params_axis_ + shape.size(); + param_->begin_norm_axis_ = param_->begin_norm_axis_ >= 0 ? param_->begin_norm_axis_ + : param_->begin_norm_axis_ + static_cast(shape.size()); + param_->begin_params_axis_ = param_->begin_params_axis_ >= 0 + ? param_->begin_params_axis_ + : param_->begin_params_axis_ + static_cast(shape.size()); MS_CHECK_LT(param_->begin_norm_axis_, static_cast(shape.size()), RET_ERROR); MS_CHECK_LT(param_->begin_params_axis_, static_cast(shape.size()), RET_ERROR); param_->norm_outer_size_ = 1; diff --git a/mindspore/lite/src/litert/kernel/cpu/int8/group_convolution_int8.cc b/mindspore/lite/src/litert/kernel/cpu/int8/group_convolution_int8.cc index e26f2f23215..169439a8e90 100644 --- a/mindspore/lite/src/litert/kernel/cpu/int8/group_convolution_int8.cc +++ b/mindspore/lite/src/litert/kernel/cpu/int8/group_convolution_int8.cc @@ -21,10 +21,13 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; namespace mindspore::kernel { -int GroupConvolutionInt8CPUKernel::Separate(int task_id) { +int GroupConvolutionInt8CPUKernel::Separate(const int &task_id) const { auto plane_step = UP_DIV(in_plane_, in_thread_num_); + MS_CHECK_INT_MUL_NOT_OVERFLOW(plane_step, task_id, RET_ERROR); auto begin_plane = plane_step * task_id; auto end_plane = MSMIN(in_plane_, plane_step * (task_id + 1)); + MS_CHECK_INT_MUL_NOT_OVERFLOW(begin_plane, ori_in_channel_, RET_ERROR); + MS_CHECK_INT_MUL_NOT_OVERFLOW(begin_plane, sub_in_channel_, RET_ERROR); auto src_ptr = sub_in_src_ + begin_plane * ori_in_channel_; auto dst_ptr = sub_in_dst_ + begin_plane * sub_in_channel_; for (int i = begin_plane; i < end_plane; ++i) { @@ -59,10 +62,13 @@ int GroupConvolutionInt8CPUKernel::SeparateInput(int group_id) { return RET_OK; } -int GroupConvolutionInt8CPUKernel::Concat(int task_id) { +int GroupConvolutionInt8CPUKernel::Concat(const int &task_id) const { auto plane_step = UP_DIV(out_plane_, out_thread_num_); + MS_CHECK_INT_MUL_NOT_OVERFLOW(plane_step, task_id, RET_ERROR); auto begin_plane = plane_step * task_id; auto end_plane = MSMIN(out_plane_, plane_step * (task_id + 1)); + MS_CHECK_INT_MUL_NOT_OVERFLOW(begin_plane, sub_out_channel_, RET_ERROR); + MS_CHECK_INT_MUL_NOT_OVERFLOW(begin_plane, ori_out_channel_, RET_ERROR); auto src_ptr = sub_out_src_ + begin_plane * sub_out_channel_; auto dst_ptr = sub_out_dst_ + begin_plane * ori_out_channel_; for (int i = begin_plane; i < end_plane; ++i) { diff --git a/mindspore/lite/src/litert/kernel/cpu/int8/group_convolution_int8.h b/mindspore/lite/src/litert/kernel/cpu/int8/group_convolution_int8.h index 9b81489c018..58d9cf356da 100644 --- a/mindspore/lite/src/litert/kernel/cpu/int8/group_convolution_int8.h +++ b/mindspore/lite/src/litert/kernel/cpu/int8/group_convolution_int8.h @@ -37,8 +37,8 @@ class GroupConvolutionInt8CPUKernel : public GroupConvolutionBaseCPUKernel { int SeparateInput(int group_id) override; int PostConcat(int group_id) override; - int Separate(int task_id); - int Concat(int task_id); + int Separate(const int &task_id) const; + int Concat(const int &task_id) const; private: int8_t *sub_in_src_ = nullptr; diff --git a/mindspore/lite/src/litert/kernel_exec.cc b/mindspore/lite/src/litert/kernel_exec.cc index 101ac795e9c..4381eede94f 100644 --- a/mindspore/lite/src/litert/kernel_exec.cc +++ b/mindspore/lite/src/litert/kernel_exec.cc @@ -91,7 +91,7 @@ int KernelExec::DoExecute() { return ret; } -void KernelExec::RepalceKernel(std::shared_ptr kernel) { +void KernelExec::RepalceKernel(const std::shared_ptr kernel) { if (desc_.provider == kBuiltin) { std::static_pointer_cast(kernel_)->set_parameter(nullptr); // set nullptr, don't release op_parameter kernel_.reset(); diff --git a/mindspore/lite/src/litert/kernel_exec.h b/mindspore/lite/src/litert/kernel_exec.h index e94e47b5566..30d3703fe13 100644 --- a/mindspore/lite/src/litert/kernel_exec.h +++ b/mindspore/lite/src/litert/kernel_exec.h @@ -234,7 +234,7 @@ class KernelExec { MS_ASSERT(index < kernel_->inputs().size()); auto impl = std::make_shared(in_tensor); auto tensor_in = mindspore::MSTensor(impl); - kernel_->set_input(tensor_in, index); + kernel_->set_input(tensor_in, static_cast(index)); } } @@ -261,7 +261,7 @@ class KernelExec { MS_ASSERT(index < kernel_->outputs().size()); auto impl = std::make_shared(out_tensor); auto tensor_out = mindspore::MSTensor(impl); - kernel_->set_output(tensor_out, index); + kernel_->set_output(tensor_out, static_cast(index)); } } @@ -317,8 +317,8 @@ class KernelExec { } } - size_t FindInTensorIndex(lite::Tensor *tensor) { - int index = 0; + size_t FindInTensorIndex(const lite::Tensor *tensor) { + size_t index = 0; for (size_t i = 0; i < in_tensors().size(); i++) { if (tensor == in_tensors().at(i)) { index = i; @@ -328,8 +328,8 @@ class KernelExec { return index; } - size_t FindOutTensorIndex(lite::Tensor *tensor) { - int index = 0; + size_t FindOutTensorIndex(const lite::Tensor *tensor) { + size_t index = 0; for (size_t i = 0; i < out_tensors().size(); i++) { if (tensor == out_tensors().at(i)) { index = i; @@ -339,9 +339,9 @@ class KernelExec { return index; } - void RemoveInKernel(KernelExec *kernel) { lite::VectorErase(&(this->in_kernels_), kernel); } + void RemoveInKernel(KernelExec *kernel) { (void)lite::VectorErase(&(this->in_kernels_), kernel); } - void RemoveOutKernel(KernelExec *kernel) { lite::VectorErase(&(this->out_kernels_), kernel); } + void RemoveOutKernel(KernelExec *kernel) { (void)lite::VectorErase(&(this->out_kernels_), kernel); } void set_in_kernels(const std::vector &kernel) { this->in_kernels_ = kernel; } diff --git a/mindspore/lite/src/litert/kernel_exec_util.cc b/mindspore/lite/src/litert/kernel_exec_util.cc index 967dec20507..9b5db45524c 100644 --- a/mindspore/lite/src/litert/kernel_exec_util.cc +++ b/mindspore/lite/src/litert/kernel_exec_util.cc @@ -266,7 +266,7 @@ void KernelExecUtil::FindAllInoutKernelsInSubgraphKernel(const std::vectorin_kernels()) { if (lite::IsContain(in_kernel->out_tensors(), tensor)) { return in_kernel; @@ -275,7 +275,7 @@ KernelExec *KernelExecUtil::FindInKernelForInTensor(KernelExec *kernel, lite::Te return nullptr; } -std::vector KernelExecUtil::FindOutKernelsForOutTensor(KernelExec *kernel, lite::Tensor *tensor) { +std::vector KernelExecUtil::FindOutKernelsForOutTensor(const KernelExec *kernel, lite::Tensor *tensor) { std::vector out_kernels; for (auto out_kernel : kernel->out_kernels()) { if (lite::IsContain(out_kernel->in_tensors(), tensor)) { @@ -285,7 +285,7 @@ std::vector KernelExecUtil::FindOutKernelsForOutTensor(KernelExec return out_kernels; } -int KernelExecUtil::SetKernelTensorDataType(kernel::KernelExec *kernel) { +int KernelExecUtil::SetKernelTensorDataType(const kernel::KernelExec *kernel) { CHECK_NULL_RETURN(kernel); if (kernel->desc().arch != kernel::KERNEL_ARCH::kCPU) { return RET_OK; diff --git a/mindspore/lite/src/litert/kernel_exec_util.h b/mindspore/lite/src/litert/kernel_exec_util.h index 02fb4189cd2..7d7d02dcf44 100644 --- a/mindspore/lite/src/litert/kernel_exec_util.h +++ b/mindspore/lite/src/litert/kernel_exec_util.h @@ -45,9 +45,9 @@ class KernelExecUtil { // find in_kernels_ and out_kernels of kernel, sub_graph and nodes_ in sub_graph static void FindAllInoutKernels(const std::vector &kernels); static void FindAllInoutKernelsInSubgraphKernel(const std::vector &kernels); - static KernelExec *FindInKernelForInTensor(KernelExec *kernel, lite::Tensor *tensor); - static std::vector FindOutKernelsForOutTensor(KernelExec *kernel, lite::Tensor *tensor); - static int SetKernelTensorDataType(kernel::KernelExec *kernel); + static KernelExec *FindInKernelForInTensor(const KernelExec *kernel, lite::Tensor *tensor); + static std::vector FindOutKernelsForOutTensor(const KernelExec *kernel, lite::Tensor *tensor); + static int SetKernelTensorDataType(const kernel::KernelExec *kernel); static SubGraphKernel *CreateSubGraphKernel(const std::vector &kernels, const std::vector *in_tensors, const std::vector *out_tensors, SubGraphType type, diff --git a/mindspore/lite/src/litert/lite_kernel.h b/mindspore/lite/src/litert/lite_kernel.h index 91492cf1c7a..b7dd114b93a 100644 --- a/mindspore/lite/src/litert/lite_kernel.h +++ b/mindspore/lite/src/litert/lite_kernel.h @@ -164,11 +164,11 @@ class LiteKernel : public Abstractkernel { return mindspore::lite::RET_OK; } - virtual int SetupVirtualBatch(int virtual_batch_multiplier, int param) { return mindspore::lite::RET_OK; } + virtual int SetupVirtualBatch(int, int) { return mindspore::lite::RET_OK; } bool IsEval() const override { return !this->train_mode_; } - void SetTrainable(bool trainable = true) override { this->trainable_ = trainable; } + void SetTrainable(bool trainable) override { this->trainable_ = trainable; } bool IsTrainable() const override { return this->trainable_; } @@ -186,7 +186,6 @@ class LiteKernel : public Abstractkernel { workspace_ = ws; } } - const lite::InnerContext *context() const { return this->ms_context_; } bool ws_allocated_ = false; protected: diff --git a/mindspore/lite/src/litert/pass/decrease_transpose_algo.cc b/mindspore/lite/src/litert/pass/decrease_transpose_algo.cc index 1f1b15b4415..851de0f90d9 100644 --- a/mindspore/lite/src/litert/pass/decrease_transpose_algo.cc +++ b/mindspore/lite/src/litert/pass/decrease_transpose_algo.cc @@ -25,16 +25,12 @@ int TransFullyFusion(kernel::SubGraphKernel *subgraph, kernel::KernelExec *trans CHECK_NULL_RETURN(trans_kernel0); CHECK_NULL_RETURN(trans_kernel1); auto in_tensor = trans_kernel0->in_tensors().at(0); + auto out_tensor = trans_kernel1->out_tensors().at(0); auto in_kernel = kernel::KernelExecUtil::FindInKernelForInTensor(trans_kernel0, in_tensor); auto out_kernels = kernel::KernelExecUtil::FindOutKernelsForOutTensor(trans_kernel1, out_tensor); - auto ret = subgraph->UpdateInOutKernels(in_kernel, out_kernels, trans_kernel0, trans_kernel1); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Update kernel link failed when fusing kernel " << trans_kernel0->name() << " and " - << trans_kernel1->name(); - return RET_ERROR; - } - ret = subgraph->UpdateInOutTensors(in_kernel, out_kernels, in_tensor, out_tensor, true); + subgraph->UpdateInOutKernels(in_kernel, out_kernels, trans_kernel0, trans_kernel1); + auto ret = subgraph->UpdateInOutTensors(in_kernel, out_kernels, in_tensor, out_tensor, true); if (ret != RET_OK) { MS_LOG(ERROR) << "Update tensor failed when fusing kernel " << trans_kernel0->name() << " and " << trans_kernel1->name(); @@ -59,12 +55,7 @@ int TransHeadTailFusion(kernel::SubGraphKernel *subgraph, kernel::KernelExec *tr auto out_tensor = trans_kernel1->out_tensors().at(0); auto in_kernel = kernel::KernelExecUtil::FindInKernelForInTensor(trans_kernel0, in_tensor); auto out_kernels = kernel::KernelExecUtil::FindOutKernelsForOutTensor(trans_kernel1, out_tensor); - auto ret = subgraph->UpdateInOutKernels(in_kernel, out_kernels, trans_kernel0, trans_kernel1); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Update kernel link failed when fusing kernel " << trans_kernel0->name() << " and " - << trans_kernel1->name(); - return RET_ERROR; - } + subgraph->UpdateInOutKernels(in_kernel, out_kernels, trans_kernel0, trans_kernel1); // new trans kernel: src_format -> dst_format auto trans_name = trans_kernel0->name() + "_and_" + trans_kernel1->name() + "_fusion"; auto kernel = CreateFormatTranspose(in_tensor, out_tensor, trans_info, trans_name, ctx, desc); @@ -98,7 +89,7 @@ int DecreaseTransposeAlgo::TransTransFusion(kernel::SubGraphKernel *subgraph) { while (kernel_iter != kernels->end()) { auto &kernel = *kernel_iter; CHECK_NULL_RETURN(kernel); - kernel_iter++; + (void)kernel_iter++; if (kernel->in_kernels().size() == 0 || !IsContain(subgraph->nodes(), kernel->in_kernels().at(0))) { continue; @@ -227,11 +218,7 @@ int DoPreFusion(kernel::SubGraphKernel *subgraph, kernel::KernelExec *kernel, st } } else { auto pre_in_kernel = kernel::KernelExecUtil::FindInKernelForInTensor(in_kernel, in_kernel->in_tensors().at(0)); - ret = subgraph->UpdateInOutKernels(pre_in_kernel, {kernel}, in_kernel, in_kernel); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Update kernel link failed when removing kernel " << in_kernel->name(); - return RET_ERROR; - } + subgraph->UpdateInOutKernels(pre_in_kernel, {kernel}, in_kernel, in_kernel); ret = subgraph->UpdateInOutTensors(pre_in_kernel, {kernel}, in_kernel->in_tensors().at(0), in_tensor, true); if (ret != RET_OK) { MS_LOG(ERROR) << "Update tensor failed when removing kernel " << in_kernel->name(); @@ -248,7 +235,7 @@ int DoPreFusion(kernel::SubGraphKernel *subgraph, kernel::KernelExec *kernel, st return RET_OK; } -int DoPostFusion(kernel::SubGraphKernel *subgraph, kernel::KernelExec *kernel, std::vector *all_tensors, +int DoPostFusion(kernel::SubGraphKernel *subgraph, const kernel::KernelExec *kernel, std::vector *all_tensors, const TransInfoPair &post_trans) { for (size_t i = 0; i < kernel->out_tensors().size(); i++) { auto tensor = kernel->out_tensors().at(i); @@ -259,13 +246,13 @@ int DoPostFusion(kernel::SubGraphKernel *subgraph, kernel::KernelExec *kernel, s TransInfoPair out_kernel_trans; auto ret = GetTransposeInfo(out_kernel, &out_kernel_trans); if (ret == RET_OK && IsSameTranspose(post_trans, out_kernel_trans)) { - to_deletes.emplace_back(out_kernel); + (void)to_deletes.emplace_back(out_kernel); continue; } auto in_tensor_of_out_kernel_idx = out_kernel->FindInTensorIndex(tensor); - ret = InsertPreTranspose(subgraph, out_kernel, all_tensors, - TransInfoPair(post_trans.dst_format_, post_trans.src_format_), - static_cast(in_tensor_of_out_kernel_idx)); + ret = + InsertPreTranspose(subgraph, out_kernel, all_tensors, + TransInfoPair(post_trans.dst_format_, post_trans.src_format_), in_tensor_of_out_kernel_idx); if (ret != RET_OK) { MS_LOG(ERROR) << "Insert pre transpose kernel for op: " << out_kernel->name() << " input tensor " << in_tensor_of_out_kernel_idx << " failed."; @@ -293,7 +280,7 @@ int DecreaseTransposeAlgo::DecreaseTransposeForSingleKernel(kernel::SubGraphKern TransInfoPair pre_trans; TransInfoPair post_trans; if (!transpose_strategy_.CheckFusion(kernel, &pre_trans, &post_trans)) { - kernel_iter++; + (void)kernel_iter++; continue; } auto ret = transpose_strategy_.ChangeKernelAxis(kernel, post_trans); @@ -314,7 +301,7 @@ int DecreaseTransposeAlgo::DecreaseTransposeForSingleKernel(kernel::SubGraphKern return RET_ERROR; } kernel_iter = find(kernels->begin(), kernels->end(), kernel); - kernel_iter++; + (void)kernel_iter++; } return RET_OK; } diff --git a/mindspore/lite/src/litert/pass/delete_isolated_kernel.cc b/mindspore/lite/src/litert/pass/delete_isolated_kernel.cc index ecc7b131f36..cfabc5ef123 100644 --- a/mindspore/lite/src/litert/pass/delete_isolated_kernel.cc +++ b/mindspore/lite/src/litert/pass/delete_isolated_kernel.cc @@ -20,7 +20,7 @@ #include "src/litert/kernel_exec_util.h" namespace mindspore::lite::pass { -int DeleteIsolatedKernel::Run(kernel::SubGraphKernel *subgraph, std::vector *tensors) { +int DeleteIsolatedKernel::Run(kernel::SubGraphKernel *subgraph, std::vector *) { subgraph->SetInNodes(kernel::KernelExecUtil::SubgraphInputNodes(subgraph->nodes())); std::set visited; // record the kernel that will be executed @@ -28,7 +28,7 @@ int DeleteIsolatedKernel::Run(kernel::SubGraphKernel *subgraph, std::vectorin_nodes()) { kernel_queue.push(in_kernel); - visited.insert(in_kernel); + (void)visited.insert(in_kernel); } while (!kernel_queue.empty()) { auto kernel = kernel_queue.front(); @@ -39,7 +39,7 @@ int DeleteIsolatedKernel::Run(kernel::SubGraphKernel *subgraph, std::vector *tensors = nullptr) override; + int Run(kernel::SubGraphKernel *subgraph, std::vector *) override; }; } // namespace mindspore::lite::pass #endif // MINDSPORE_LITE_SRC_RUNTIME_PASS_DELETE_ISOLATED_KERNEL_H_ diff --git a/mindspore/lite/src/litert/pass/infershape_pass.cc b/mindspore/lite/src/litert/pass/infershape_pass.cc index 9e3724233de..e0c90cd4e09 100644 --- a/mindspore/lite/src/litert/pass/infershape_pass.cc +++ b/mindspore/lite/src/litert/pass/infershape_pass.cc @@ -19,7 +19,7 @@ #include "src/litert/kernel_exec_util.h" namespace mindspore::lite::pass { -int Infershape::Run(kernel::SubGraphKernel *subgraph, std::vector *tensors) { +int Infershape::Run(kernel::SubGraphKernel *subgraph, std::vector *) { auto kernels = &(subgraph->nodes()); for (const auto &kernel : *kernels) { CHECK_NULL_RETURN(kernel); diff --git a/mindspore/lite/src/litert/pass/infershape_pass.h b/mindspore/lite/src/litert/pass/infershape_pass.h index 2138316866b..e24a04372ed 100644 --- a/mindspore/lite/src/litert/pass/infershape_pass.h +++ b/mindspore/lite/src/litert/pass/infershape_pass.h @@ -25,7 +25,7 @@ class Infershape : public RuntimePass { public: Infershape() {} ~Infershape() override = default; - int Run(kernel::SubGraphKernel *subgraph, std::vector *tensors = nullptr) override; + int Run(kernel::SubGraphKernel *subgraph, std::vector *) override; }; } // namespace mindspore::lite::pass #endif // MINDSPORE_LITE_SRC_RUNTIME_PASS_INFERSHAPE_PASS_H_ diff --git a/mindspore/lite/src/litert/pass/pass_utils.cc b/mindspore/lite/src/litert/pass/pass_utils.cc index 735abf21055..88a48176d93 100644 --- a/mindspore/lite/src/litert/pass/pass_utils.cc +++ b/mindspore/lite/src/litert/pass/pass_utils.cc @@ -61,8 +61,8 @@ kernel::KernelExec *CreateFormatTranspose(Tensor *input, Tensor *output, const T MS_LOG(ERROR) << "Malloc FormatTransposeParameter failed."; return nullptr; } - memset(param, 0, sizeof(FormatTransposeParameter)); - param->op_parameter_.type_ = schema::PrimitiveType_FormatTranspose; + (void)memset(param, 0, sizeof(FormatTransposeParameter)); + param->op_parameter_.type_ = static_cast(schema::PrimitiveType_FormatTranspose); param->src_format_ = trans_info.src_format_; param->dst_format_ = trans_info.dst_format_; kernel::KernelKey format_transpose_key = desc; @@ -80,7 +80,7 @@ kernel::KernelExec *CreateFormatTranspose(Tensor *input, Tensor *output, const T return kernel; } -void SetShape(Tensor *src_tensor, Tensor *dst_tensor) { +void SetShape(const Tensor *src_tensor, Tensor *dst_tensor) { auto shape = src_tensor->shape(); auto invalid_shape = {-1}; if (shape.size() != DIMENSION_4D) { @@ -105,7 +105,7 @@ void SetShape(Tensor *src_tensor, Tensor *dst_tensor) { } int InsertPreTranspose(kernel::SubGraphKernel *subgraph, kernel::KernelExec *kernel, std::vector *all_tensors, - const TransInfoPair &trans_info, const int &index) { + const TransInfoPair &trans_info, const size_t &index) { auto trans_name = kernel->name() + "_pre_" + std::to_string(index); auto in_tensor = kernel->in_tensors().at(index); auto out_tensor = new (std::nothrow) Tensor(in_tensor->data_type(), {}, (Format)trans_info.dst_format_); @@ -126,7 +126,7 @@ int InsertPreTranspose(kernel::SubGraphKernel *subgraph, kernel::KernelExec *ker } int InsertPostTranspose(kernel::SubGraphKernel *subgraph, kernel::KernelExec *kernel, - std::vector *all_tensors, const TransInfoPair &trans_info, const int &index) { + std::vector *all_tensors, const TransInfoPair &trans_info, const size_t &index) { auto trans_name = kernel->name() + "_post_" + std::to_string(index); auto out_tensor = kernel->out_tensors().at(index); diff --git a/mindspore/lite/src/litert/pass/pass_utils.h b/mindspore/lite/src/litert/pass/pass_utils.h index ad97a9c1625..99bb9a7746e 100644 --- a/mindspore/lite/src/litert/pass/pass_utils.h +++ b/mindspore/lite/src/litert/pass/pass_utils.h @@ -41,13 +41,13 @@ kernel::KernelExec *CreateFormatTranspose(Tensor *input, Tensor *output, const T const std::string &name, const lite::InnerContext *ctx, const kernel::KernelKey &desc); -void SetShape(Tensor *src_tensor, Tensor *dst_tensor); +void SetShape(const Tensor *src_tensor, Tensor *dst_tensor); int InsertPreTranspose(kernel::SubGraphKernel *subgraph, kernel::KernelExec *kernel, std::vector *all_tensors, - const TransInfoPair &trans_info, const int &index); + const TransInfoPair &trans_info, const size_t &index); int InsertPostTranspose(kernel::SubGraphKernel *subgraph, kernel::KernelExec *kernel, - std::vector *all_tensors, const TransInfoPair &trans_info, const int &index); + std::vector *all_tensors, const TransInfoPair &trans_info, const size_t &index); int GetTransposeInfo(const kernel::KernelExec *kernel, TransInfoPair *trans_info); } // namespace mindspore::lite::pass diff --git a/mindspore/lite/src/litert/pass/runtime_optimizer.cc b/mindspore/lite/src/litert/pass/runtime_optimizer.cc index d1f73a900f0..19f9f661de6 100644 --- a/mindspore/lite/src/litert/pass/runtime_optimizer.cc +++ b/mindspore/lite/src/litert/pass/runtime_optimizer.cc @@ -21,7 +21,7 @@ RuntimeOptimizer::~RuntimeOptimizer() { passes_.clear(); } int RuntimeOptimizer::AddPass(RuntimePassPtr pass) { CHECK_NULL_RETURN(pass); - this->passes_.emplace_back(pass); + (void)this->passes_.emplace_back(pass); return RET_OK; } diff --git a/mindspore/lite/src/litert/pass/to_nchw_format.h b/mindspore/lite/src/litert/pass/to_nchw_format.h index 651ba75ae1e..a2584a422ca 100644 --- a/mindspore/lite/src/litert/pass/to_nchw_format.h +++ b/mindspore/lite/src/litert/pass/to_nchw_format.h @@ -36,7 +36,7 @@ namespace mindspore::lite::pass { class ToNCHWFormat : public RuntimePass { public: - ToNCHWFormat(Format src_format, Format dst_format, std::set to_trans_kernels) + ToNCHWFormat(const Format &src_format, const Format &dst_format, std::set to_trans_kernels) : src_format_(src_format), dst_format_(dst_format), to_trans_kernels_(to_trans_kernels) {} ~ToNCHWFormat() override = default; int Run(kernel::SubGraphKernel *subgraph, std::vector *tensors) override; diff --git a/mindspore/lite/src/litert/pass/transpose_strategy.cc b/mindspore/lite/src/litert/pass/transpose_strategy.cc index 61818192969..62ac16265fb 100644 --- a/mindspore/lite/src/litert/pass/transpose_strategy.cc +++ b/mindspore/lite/src/litert/pass/transpose_strategy.cc @@ -56,7 +56,7 @@ size_t TransposeStrategy::GetTransCount(const std::vector return count; } -bool CheckInTensorsShape(kernel::KernelExec *kernel, const Format &runtime_format) { +bool CheckInTensorsShape(const kernel::KernelExec *kernel, const Format &runtime_format) { // If teh fusion is valid, kernel will be executed in runtime_format. // Only check arithmetic (two input) kernel input tensors. // If broadcast for various formats is supported, this function can be deleted. @@ -87,7 +87,8 @@ bool CheckInTensorsShape(kernel::KernelExec *kernel, const Format &runtime_forma return true; } -bool TransposeStrategy::CheckFusion(kernel::KernelExec *kernel, TransInfoPair *pre_trans, TransInfoPair *post_trans) { +bool TransposeStrategy::CheckFusion(const kernel::KernelExec *kernel, TransInfoPair *pre_trans, + TransInfoPair *post_trans) { if (dynamic_format_kernel_lists.find(kernel->type()) == dynamic_format_kernel_lists.end()) { return false; } @@ -155,24 +156,24 @@ int TransFormAxis(int axis, const TransInfoPair &trans) { return axis; } -int HandleArgMinMaxKernel(kernel::KernelExec *kernel, const TransInfoPair &trans) { +int HandleArgMinMaxKernel(const kernel::KernelExec *kernel, const TransInfoPair &trans) { auto arg_min_max_param = reinterpret_cast(kernel->op_parameter()); CHECK_NULL_RETURN(arg_min_max_param); arg_min_max_param->axis_ = TransFormAxis(arg_min_max_param->axis_, trans); return RET_OK; } -int HandleConcatKernel(kernel::KernelExec *kernel, const TransInfoPair &trans) { +int HandleConcatKernel(const kernel::KernelExec *kernel, const TransInfoPair &trans) { auto concat_param = reinterpret_cast(kernel->op_parameter()); CHECK_NULL_RETURN(concat_param); concat_param->axis_ = TransFormAxis(concat_param->axis_, trans); return RET_OK; } -int HandleCropKernel(kernel::KernelExec *kernel, const TransInfoPair &trans) { +int HandleCropKernel(const kernel::KernelExec *kernel, const TransInfoPair &trans) { auto crop_param = reinterpret_cast(kernel->op_parameter()); CHECK_NULL_RETURN(crop_param); - crop_param->axis_ = TransFormAxis(crop_param->axis_, trans); + crop_param->axis_ = TransFormAxis(static_cast(crop_param->axis_), trans); return RET_OK; } diff --git a/mindspore/lite/src/litert/pass/transpose_strategy.h b/mindspore/lite/src/litert/pass/transpose_strategy.h index 4b98ca06cab..cbaecea7938 100644 --- a/mindspore/lite/src/litert/pass/transpose_strategy.h +++ b/mindspore/lite/src/litert/pass/transpose_strategy.h @@ -91,7 +91,7 @@ class TransposeStrategy { ~TransposeStrategy() = default; size_t GetTransCount(const std::vector &kernels, TransInfoPair *trans_info); - bool CheckFusion(kernel::KernelExec *kernel, TransInfoPair *pre_trans, TransInfoPair *post_trans); + bool CheckFusion(const kernel::KernelExec *kernel, TransInfoPair *pre_trans, TransInfoPair *post_trans); int ChangeKernelAxis(kernel::KernelExec *kernel, const TransInfoPair &post_trans); }; } // namespace mindspore::lite::pass diff --git a/mindspore/lite/src/litert/sub_graph_kernel.cc b/mindspore/lite/src/litert/sub_graph_kernel.cc index beecc0a0e6a..a612d83bf36 100644 --- a/mindspore/lite/src/litert/sub_graph_kernel.cc +++ b/mindspore/lite/src/litert/sub_graph_kernel.cc @@ -190,7 +190,7 @@ int SubGraphKernel::TopologicalSortNodes() { while (!kernel_queue.empty()) { auto cur_kernel = kernel_queue.front(); - nodes_.emplace_back(cur_kernel); + (void)nodes_.emplace_back(cur_kernel); kernel_queue.pop(); CHECK_NULL_RETURN(cur_kernel); auto next_kernels = cur_kernel->out_kernels(); @@ -218,7 +218,7 @@ int SubGraphKernel::TopologicalSortNodes() { return RET_OK; } -void SubGraphKernel::InsertInEdge(KernelExec *kernel, KernelExec *replace_kernel, const int &tensor_index) { +void SubGraphKernel::InsertInEdge(KernelExec *kernel, KernelExec *replace_kernel, const size_t &tensor_index) { // replace_kernel is a kernel with ont input tensor and output tensor auto in_kernel = KernelExecUtil::FindInKernelForInTensor(kernel, kernel->in_tensors().at(tensor_index)); if (in_kernel != nullptr) { @@ -234,7 +234,7 @@ void SubGraphKernel::InsertInEdge(KernelExec *kernel, KernelExec *replace_kernel nodes_.push_back(replace_kernel); } -void SubGraphKernel::InsertOutEdge(KernelExec *kernel, KernelExec *replace_kernel, const int &tensor_index) { +void SubGraphKernel::InsertOutEdge(KernelExec *kernel, KernelExec *replace_kernel, const size_t &tensor_index) { // replace_kernel is a kernel with ont input tensor and output tensor auto out_kernels = KernelExecUtil::FindOutKernelsForOutTensor(kernel, kernel->out_tensors().at(tensor_index)); for (const auto &post_kernel : out_kernels) { @@ -253,8 +253,8 @@ void SubGraphKernel::InsertOutEdge(KernelExec *kernel, KernelExec *replace_kerne // in_kernel -> in_post_kernel -> out_pre_kernel -> out_kernels. // remove in_post_kernel and out_pre_kernel, link in_kernel and out_kernels. // in_post_kernel and out_pre_kernel can be the same kernel sometimes. -int SubGraphKernel::UpdateInOutKernels(KernelExec *in_kernel, std::vector out_kernels, - KernelExec *in_post_kernel, KernelExec *out_pre_kernel) { +void SubGraphKernel::UpdateInOutKernels(KernelExec *in_kernel, std::vector out_kernels, + KernelExec *in_post_kernel, KernelExec *out_pre_kernel) { for (const auto &out_kernel : out_kernels) { out_kernel->RemoveInKernel(out_pre_kernel); out_pre_kernel->RemoveOutKernel(out_kernel); @@ -280,7 +280,7 @@ int SubGraphKernel::UpdateInOutKernels(KernelExec *in_kernel, std::vectorout_kernels().empty() && !lite::IsContain(out_nodes_, in_post_kernel)) { - lite::VectorErase(&in_nodes_, in_post_kernel); + (void)lite::VectorErase(&in_nodes_, in_post_kernel); } } @@ -288,10 +288,9 @@ int SubGraphKernel::UpdateInOutKernels(KernelExec *in_kernel, std::vectorin_kernels().empty() && !lite::IsContain(in_nodes_, out_pre_kernel)) { - lite::VectorErase(&out_nodes_, out_pre_kernel); + (void)lite::VectorErase(&out_nodes_, out_pre_kernel); } } - return RET_OK; } // Update tensor according to the subgraph. @@ -340,14 +339,10 @@ int SubGraphKernel::DeleteSingleWayNode(KernelExec *kernel, bool keep_input) { } // update kernel link - auto ret = UpdateInOutKernels(in_kernel, out_kernels, kernel, kernel); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Update kernel link failed when removing kernel " << kernel->name(); - return RET_ERROR; - } + UpdateInOutKernels(in_kernel, out_kernels, kernel, kernel); // update tensor link - ret = UpdateInOutTensors(in_kernel, out_kernels, in_tensor, out_tensor, keep_input); + auto ret = UpdateInOutTensors(in_kernel, out_kernels, in_tensor, out_tensor, keep_input); if (ret != RET_OK) { MS_LOG(ERROR) << "Update tensor failed when removing kernel " << kernel->name(); return RET_ERROR; diff --git a/mindspore/lite/src/litert/sub_graph_kernel.h b/mindspore/lite/src/litert/sub_graph_kernel.h index e3567efe91a..d034d37aa60 100644 --- a/mindspore/lite/src/litert/sub_graph_kernel.h +++ b/mindspore/lite/src/litert/sub_graph_kernel.h @@ -112,20 +112,20 @@ class SubGraphKernel : public KernelExec { std::vector out_nodes() { return this->out_nodes_; } - void SetInNodes(std::vector in_nodes) { in_nodes_ = in_nodes; } + void SetInNodes(const std::vector &in_nodes) { in_nodes_ = in_nodes; } - void SetOutNodes(std::vector out_nodes) { out_nodes_ = out_nodes; } + void SetOutNodes(const std::vector &out_nodes) { out_nodes_ = out_nodes; } void SetSchemaVersion(int schema_version) { schema_version_ = schema_version; } int TopologicalSortNodes(); - void InsertInEdge(KernelExec *kernel, KernelExec *replace_kernel, const int &tensor_index); + void InsertInEdge(KernelExec *kernel, KernelExec *replace_kernel, const size_t &tensor_index); - void InsertOutEdge(KernelExec *kernel, KernelExec *replace_kernel, const int &tensor_index); + void InsertOutEdge(KernelExec *kernel, KernelExec *replace_kernel, const size_t &tensor_index); - int UpdateInOutKernels(KernelExec *in_kernel, std::vector out_kernels, KernelExec *in_post_kernel, - KernelExec *out_pre_kernel); + void UpdateInOutKernels(KernelExec *in_kernel, std::vector out_kernels, KernelExec *in_post_kernel, + KernelExec *out_pre_kernel); int UpdateInOutTensors(KernelExec *in_kernel, std::vector out_kernels, lite::Tensor *in_tensor, lite::Tensor *out_tensor, bool keep_input); diff --git a/mindspore/lite/tools/common/meta_graph_serializer.cc b/mindspore/lite/tools/common/meta_graph_serializer.cc index 8cbd032d8d3..c8b559cd7f9 100644 --- a/mindspore/lite/tools/common/meta_graph_serializer.cc +++ b/mindspore/lite/tools/common/meta_graph_serializer.cc @@ -38,7 +38,7 @@ constexpr size_t kFlatbuffersBuilderInitSize = 1024; void ChangeMod(const std::string &file_path) { #ifndef _MSC_VER if (access(file_path.c_str(), F_OK) == 0) { - chmod(file_path.c_str(), S_IWUSR | S_IRUSR); + (void)chmod(file_path.c_str(), S_IWUSR | S_IRUSR); } #endif } diff --git a/mindspore/lite/tools/lite_exporter/fetch_content.cc b/mindspore/lite/tools/lite_exporter/fetch_content.cc index 6bf8a13c1f2..0c36826e11c 100644 --- a/mindspore/lite/tools/lite_exporter/fetch_content.cc +++ b/mindspore/lite/tools/lite_exporter/fetch_content.cc @@ -246,7 +246,7 @@ int FetchFromSequenceValue(const ValueNodePtr &value_node, DataInfo *data_info) } } // namespace -int FetchFromDefaultParam(const ParameterPtr ¶m_node, const converter::FmkType &fmk_type, DataInfo *data_info, +int FetchFromDefaultParam(const ParameterPtr ¶m_node, const converter::FmkType &, DataInfo *data_info, bool copy_data) { MS_ASSERT(param_node != nullptr && data_info != nullptr); ShapeVector shape_vector; diff --git a/mindspore/lite/tools/optimizer/const_fold/fold_utils.cc b/mindspore/lite/tools/optimizer/const_fold/fold_utils.cc index ce7d1bed863..c8f13f42a71 100644 --- a/mindspore/lite/tools/optimizer/const_fold/fold_utils.cc +++ b/mindspore/lite/tools/optimizer/const_fold/fold_utils.cc @@ -72,8 +72,9 @@ ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *tensor) { } return parameter; } + kernel::KernelExec *GetKernelExec(std::vector inputs, std::vector *outputs, const CNodePtr &cnode, - lite::InnerContext *context, mindspore::Context *ms_context) { + const lite::InnerContext *context, const mindspore::Context *ms_context) { MS_ASSERT(outputs != nullptr && cnode != nullptr && context != nullptr && ms_context != nullptr); OpParameter *parameter = nullptr; auto ret = lite::FetchOpParameterFromNode(cnode->input(0), ¶meter); @@ -92,8 +93,7 @@ kernel::KernelExec *GetKernelExec(std::vector inputs, std::vectordata_type(); - kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, NHWC, - static_cast(parameter->type_)}; + kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, NHWC, parameter->type_}; kernel::KernelExec *kernel_exec = nullptr; ret = lite::KernelRegistry::GetInstance()->GetKernelExec(inputs, *outputs, context, ms_context, desc, parameter, &kernel_exec); diff --git a/mindspore/lite/tools/optimizer/graph/decrease_transpose_algo.cc b/mindspore/lite/tools/optimizer/graph/decrease_transpose_algo.cc index 26e7d95984b..c463ca1c59c 100644 --- a/mindspore/lite/tools/optimizer/graph/decrease_transpose_algo.cc +++ b/mindspore/lite/tools/optimizer/graph/decrease_transpose_algo.cc @@ -588,7 +588,7 @@ int DecreaseTransposeAlgo::SetSubGraphInput(const CNodePtr &cnode, const FuncGra auto last_underline = node_name.find_last_of("_"); node_name = node_name.substr(0, last_underline); last_underline = node_name.find_last_of("_"); - auto index = 0; + size_t index = 0; try { index = std::stoi(node_name.substr(last_underline + 1)) + static_cast(kInputSizeThree); } catch (const std::exception &e) {