!48644 [MS][LITE] code clean

Merge pull request !48644 from jianghui58/codex_fuzz_master
This commit is contained in:
i-robot 2023-02-11 06:14:09 +00:00 committed by Gitee
commit 5c1ded4640
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
11 changed files with 29 additions and 12 deletions

View File

@ -34,6 +34,9 @@ extern "C" {
#endif #endif
#endif #endif
#define AVX_ACT_RELU 1
#define AVX_ACT_RELU6 3
// Signed saturating Add // Signed saturating Add
__m128i _mm_adds_epi32(__m128i a, __m128i b); __m128i _mm_adds_epi32(__m128i a, __m128i b);
@ -89,7 +92,7 @@ static inline void ActBlock8Avx(__m256 *v1, __m256 *v2, __m256 *v3, __m256 *v4,
__m256 relu6 = _mm256_set1_ps(6.0); __m256 relu6 = _mm256_set1_ps(6.0);
__m256 zero = _mm256_setzero_ps(); __m256 zero = _mm256_setzero_ps();
switch (relu_type) { switch (relu_type) {
case 3: case AVX_ACT_RELU6:
*v1 = _mm256_min_ps(*v1, relu6); *v1 = _mm256_min_ps(*v1, relu6);
*v2 = _mm256_min_ps(*v2, relu6); *v2 = _mm256_min_ps(*v2, relu6);
*v3 = _mm256_min_ps(*v3, relu6); *v3 = _mm256_min_ps(*v3, relu6);
@ -98,7 +101,7 @@ static inline void ActBlock8Avx(__m256 *v1, __m256 *v2, __m256 *v3, __m256 *v4,
*v6 = _mm256_min_ps(*v6, relu6); *v6 = _mm256_min_ps(*v6, relu6);
*v7 = _mm256_min_ps(*v7, relu6); *v7 = _mm256_min_ps(*v7, relu6);
*v8 = _mm256_min_ps(*v8, relu6); *v8 = _mm256_min_ps(*v8, relu6);
case 1: case AVX_ACT_RELU:
*v1 = _mm256_max_ps(*v1, zero); *v1 = _mm256_max_ps(*v1, zero);
*v2 = _mm256_max_ps(*v2, zero); *v2 = _mm256_max_ps(*v2, zero);
*v3 = _mm256_max_ps(*v3, zero); *v3 = _mm256_max_ps(*v3, zero);

View File

@ -76,7 +76,9 @@ STATUS HuffmanDecode::DoHuffmanDecode(const std::string &input_str, void *decode
} }
STATUS HuffmanDecode::RebuildHuffmanTree(std::string keys, std::string codes, const HuffmanNodePtr &root) { STATUS HuffmanDecode::RebuildHuffmanTree(std::string keys, std::string codes, const HuffmanNodePtr &root) {
HuffmanNodePtr cur_node, tmp_node, new_node; HuffmanNodePtr cur_node;
HuffmanNodePtr tmp_node;
HuffmanNodePtr new_node;
auto huffman_keys = Str2Vec(std::move(keys)); auto huffman_keys = Str2Vec(std::move(keys));
auto huffman_codes = Str2Vec(std::move(codes)); auto huffman_codes = Str2Vec(std::move(codes));

View File

@ -166,7 +166,8 @@ int StridedSliceCPUKernel::FastRunImpl(int task_id) {
if (cal_axis_num > cal_num_per_thread_) { if (cal_axis_num > cal_num_per_thread_) {
cal_axis_num = cal_num_per_thread_; cal_axis_num = cal_num_per_thread_;
} }
FastStride(cur_in_ptr, cur_out_ptr, cal_axis_num, param_->strides_[split_axis_], 1, inner_size_, 0); FastStride(cur_in_ptr, cur_out_ptr, static_cast<uint32_t>(cal_axis_num), param_->strides_[split_axis_], 1,
inner_size_, 0);
} }
return RET_OK; return RET_OK;
} }
@ -271,7 +272,7 @@ int StridedSliceCPUKernel::SoftCopyInputToOutput() {
auto out_start = output_data + task_id * block_size; auto out_start = output_data + task_id * block_size;
auto copy_size = block_size; auto copy_size = block_size;
if (task_id == (thread_num_ - 1)) { if (task_id == (thread_num_ - 1)) {
copy_size = size - task_id * block_size; copy_size = size - static_cast<size_t>(task_id) * block_size;
} }
(void)memcpy(out_start, in_start, copy_size); (void)memcpy(out_start, in_start, copy_size);
return RET_OK; return RET_OK;

View File

@ -85,7 +85,8 @@ int ConvolutionGradInputCPUKernelFp16::DoExecute(int task_id) {
auto w_addr = reinterpret_cast<float16_t *>(input_w->data()); auto w_addr = reinterpret_cast<float16_t *>(input_w->data());
auto dx_addr = reinterpret_cast<float16_t *>(out_dx->data()); auto dx_addr = reinterpret_cast<float16_t *>(out_dx->data());
int i, j; int i;
int j;
int in_ch = conv_param->input_channel_; int in_ch = conv_param->input_channel_;
int in_h = conv_param->input_h_; int in_h = conv_param->input_h_;
int nweights = input_w->ElementsNum(); int nweights = input_w->ElementsNum();

View File

@ -206,7 +206,8 @@ int ResizeCPUKernel::RunImpl(int task_id) {
calculate_, coordinate_transform_mode_, task_id, op_parameter_->thread_num_); calculate_, coordinate_transform_mode_, task_id, op_parameter_->thread_num_);
} }
case static_cast<int>(schema::ResizeMethod_CUBIC): { case static_cast<int>(schema::ResizeMethod_CUBIC): {
float *line_buffer = static_cast<float *>(line_buffer_) + new_width_ * c * sizeof(float) * task_id; float *line_buffer = static_cast<float *>(line_buffer_) +
static_cast<size_t>(new_width_ * c) * sizeof(float) * static_cast<size_t>(task_id);
return ResizeBicubic(input_data, output_data, input_shape.data(), out_tensors_.at(0)->shape().data(), return ResizeBicubic(input_data, output_data, input_shape.data(), out_tensors_.at(0)->shape().data(),
coordinate_.y_tops_, coordinate_.x_lefts_, static_cast<float *>(y_weights_), coordinate_.y_tops_, coordinate_.x_lefts_, static_cast<float *>(y_weights_),
static_cast<float *>(x_weights_), line_buffer, h_begin, h_end); static_cast<float *>(x_weights_), line_buffer, h_begin, h_end);

View File

@ -86,7 +86,8 @@ int ConvolutionGradInputCPUKernel::DoExecute(int task_id) {
auto w_addr = reinterpret_cast<float *>(input_w->MutableData()); auto w_addr = reinterpret_cast<float *>(input_w->MutableData());
auto dx_addr = reinterpret_cast<float *>(out_dx->MutableData()); auto dx_addr = reinterpret_cast<float *>(out_dx->MutableData());
int i, j; int i;
int j;
int batch = conv_param->output_batch_; int batch = conv_param->output_batch_;
int groups = conv_param->group_; int groups = conv_param->group_;
int in_ch = conv_param->input_channel_; int in_ch = conv_param->input_channel_;

View File

@ -83,7 +83,8 @@ int DeConvolutionGradFilterCPUKernel::DoExecute(int task_id) {
auto dw_addr = reinterpret_cast<float *>(out_dw->data()); auto dw_addr = reinterpret_cast<float *>(out_dw->data());
CHECK_NULL_RETURN(dw_addr); CHECK_NULL_RETURN(dw_addr);
int i, j; int i;
int j;
int out_w = conv_param->output_w_; int out_w = conv_param->output_w_;
int out_h = conv_param->output_h_; int out_h = conv_param->output_h_;
int out_ch = conv_param->output_channel_; int out_ch = conv_param->output_channel_;

View File

@ -1745,6 +1745,8 @@ const char *lite::LiteSession::LoadModelByPath(const std::string &file, mindspor
char *lite_buf = nullptr; char *lite_buf = nullptr;
auto buf_model_type = LoadModelByBuff(model_buf, buf_size, &lite_buf, size, model_type); auto buf_model_type = LoadModelByBuff(model_buf, buf_size, &lite_buf, size, model_type);
if (buf_model_type == mindspore::ModelType::kUnknownType || lite_buf == nullptr) { if (buf_model_type == mindspore::ModelType::kUnknownType || lite_buf == nullptr) {
delete[] model_buf;
model_buf = nullptr;
return nullptr; return nullptr;
} }

View File

@ -84,7 +84,9 @@ std::vector<std::string> SplitStringToVector(const std::string &raw_str, const c
} }
std::vector<std::string> SplitStringToVector(const std::string &raw_str, const std::string &delimiter) { std::vector<std::string> SplitStringToVector(const std::string &raw_str, const std::string &delimiter) {
size_t pos_start = 0, pos_end, delim_len = delimiter.length(); size_t pos_start = 0;
size_t pos_end = 0;
size_t delim_len = delimiter.length();
std::string token; std::string token;
std::vector<std::string> res; std::vector<std::string> res;

View File

@ -179,7 +179,7 @@ MSKernelCallBack BiasCorrectionStrategy::GetCPUFloatBeforeCallBack() {
MS_LOG(INFO) << "tensor type is " << tensor.DataType(); MS_LOG(INFO) << "tensor type is " << tensor.DataType();
return true; return true;
} }
size_t elem_count = tensor.ElementNum(); size_t elem_count = static_cast<size_t>(tensor.ElementNum());
MS_CHECK_GT(elem_count, 0, false); MS_CHECK_GT(elem_count, 0, false);
std::vector<float> fp32_op_input(elem_count); std::vector<float> fp32_op_input(elem_count);
auto ret = auto ret =

View File

@ -60,7 +60,10 @@ std::vector<int64_t> GetSplitPadList(const api::SharedPtr<ops::Conv2DFusion> &or
return {}; return {};
} }
std::vector<int64_t> new_pad_list; std::vector<int64_t> new_pad_list;
int64_t pad_up = 0, pad_down = 0, pad_left = 0, pad_right = 0; int64_t pad_up = 0;
int64_t pad_down = 0;
int64_t pad_left = 0;
int64_t pad_right = 0;
int64_t pad_h_all = int64_t pad_h_all =
(output_h - 1) * ori_conv_prim->get_stride().at(kIndexH) + (kernel_h - 1) * dilation_h + 1 - input_h; (output_h - 1) * ori_conv_prim->get_stride().at(kIndexH) + (kernel_h - 1) * dilation_h + 1 - input_h;
int64_t pad_w_all = int64_t pad_w_all =