From d2f6f2802e5d985e8eee3b5174fcbeac6f889210 Mon Sep 17 00:00:00 2001 From: gongdaguo Date: Tue, 18 Aug 2020 13:28:39 +0800 Subject: [PATCH] ReviewBot Check --- mindspore/lite/c_ops/deconv2d.cc | 1 - mindspore/lite/c_ops/scatter_nd.cc | 1 - mindspore/lite/c_ops/shape.cc | 1 - mindspore/lite/src/common/file_utils.cc | 1 - mindspore/lite/src/common/file_utils_ext.h | 1 - mindspore/lite/src/common/graph_util.cc | 1 - mindspore/lite/src/common/graph_util.h | 4 +--- mindspore/lite/src/common/graph_utils_extends.cc | 1 - mindspore/lite/src/common/log_adapter.cc | 2 +- mindspore/lite/src/ir/tensor.h | 1 - mindspore/lite/src/lite_session.cc | 1 - .../lite/src/runtime/kernel/arm/fp16/matrix_fp16.cc | 3 --- .../lite/src/runtime/kernel/arm/nnacl/fp16/split_fp16.c | 2 +- .../lite/src/runtime/kernel/arm/nnacl/fp16/split_fp16.h | 2 +- .../lite/src/runtime/kernel/arm/nnacl/int8/add_int8.c | 2 +- .../lite/src/runtime/kernel/arm/nnacl/int8/mul_int8.c | 2 +- mindspore/lite/src/runtime/kernel/arm/nnacl/int8/pad.c | 2 +- mindspore/lite/src/runtime/kernel/arm/nnacl/int8/pad.h | 2 +- .../lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.c | 4 ++-- .../lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.h | 4 ++-- .../lite/src/runtime/kernel/arm/nnacl/int8/sub_int8.c | 2 +- .../src/runtime/kernel/arm/nnacl/quantization/quantize.c | 6 +++--- .../src/runtime/kernel/arm/nnacl/winograd_transform.c | 1 - mindspore/lite/src/runtime/kernel/opencl/image_format.h | 1 - mindspore/lite/src/runtime/kernel/opencl/utils.cc | 1 - .../anf_populater/anf_node_populater_registry.cc | 1 - mindspore/lite/tools/common/graph_util.h | 1 - mindspore/lite/tools/common/tensor_util.h | 4 ++-- .../legacy_optimizer/const_fold/add_const_fold_pass.cc | 1 - .../const_fold/concat_v2_const_fold_pass.cc | 1 - .../legacy_optimizer/const_fold/rsqrt_const_fold_pass.cc | 1 - .../legacy_optimizer/const_fold/sub_const_fold_pass.cc | 1 - .../const_fold/transpose_const_fold_pass.cc | 1 - .../legacy_optimizer/fusion/format_trans_fusion_pass.cc | 2 -- .../legacy_optimizer/graph/eltwise_format_trans_pass.h | 1 - .../legacy_optimizer/graph/format_trans_pass.cc | 1 - .../tools/converter/parser/caffe/caffe_concat_parser.cc | 2 -- .../tools/converter/parser/caffe/caffe_crop_parser.cc | 1 - .../tools/converter/parser/caffe/caffe_eltwise_parser.cc | 1 - .../tools/converter/parser/caffe/caffe_flatten_parser.cc | 2 +- .../tools/converter/parser/caffe/caffe_interp_parser.cc | 1 - .../tools/converter/parser/caffe/caffe_node_parser.h | 1 - .../tools/converter/parser/caffe/caffe_permute_parser.cc | 6 +++--- .../tools/converter/parser/caffe/caffe_prelu_parser.cc | 1 - .../tools/converter/parser/caffe/caffe_reshape_parser.cc | 1 - .../converter/parser/tflite/tflite_activation_parser.cc | 1 - .../converter/parser/tflite/tflite_activation_parser.h | 2 -- .../converter/parser/tflite/tflite_arithmetic_parser.cc | 1 - .../converter/parser/tflite/tflite_arithmetic_parser.h | 2 -- .../parser/tflite/tflite_batch_to_space_parser.cc | 1 - .../parser/tflite/tflite_batch_to_space_parser.h | 1 - .../parser/tflite/tflite_depthwise_conv_parser.cc | 2 -- .../parser/tflite/tflite_fullyconnected_parser.cc | 2 +- .../converter/parser/tflite/tflite_logical_parser.cc | 1 - .../converter/parser/tflite/tflite_logical_parser.h | 1 - .../tools/converter/parser/tflite/tflite_pad_parser.cc | 2 +- .../converter/parser/tflite/tflite_reduce_parser.cc | 1 - .../tools/converter/parser/tflite/tflite_reduce_parser.h | 1 - .../converter/parser/tflite/tflite_reshape_parser.cc | 2 +- .../converter/parser/tflite/tflite_resize_parser.cc | 6 +++--- .../tools/converter/parser/tflite/tflite_resize_parser.h | 1 - .../converter/parser/tflite/tflite_scatter_nd_parser.cc | 2 +- .../lite/tools/converter/parser/tflite/tflite_util.cc | 9 ++++----- .../lite/tools/converter/quantizer/general_bitpacking.cc | 5 ++--- .../tools/converter/quantizer/post_training_quantizer.cc | 1 - mindspore/lite/tools/converter/quantizer/quantize_util.h | 2 -- .../lite/tools/converter/quantizer/weight_quantizer.cc | 1 - mindspore/lite/tools/optimizer/common/gllo_utils.cc | 2 +- mindspore/lite/tools/optimizer/common/gllo_utils.h | 2 +- .../lite/tools/optimizer/fusion/conv_activation_fusion.h | 4 ++-- mindspore/lite/tools/time_profile/time_profile.cc | 1 - mindspore/lite/tools/time_profile/time_profile.h | 1 - 72 files changed, 39 insertions(+), 98 deletions(-) diff --git a/mindspore/lite/c_ops/deconv2d.cc b/mindspore/lite/c_ops/deconv2d.cc index eb8777e4b56..f65ad24326e 100644 --- a/mindspore/lite/c_ops/deconv2d.cc +++ b/mindspore/lite/c_ops/deconv2d.cc @@ -122,7 +122,6 @@ int DeConv2D::InferShape(std::vector inputs_, std::vecto pad_d_ = GetPadDown(); pad_r_ = GetPadRight(); auto pad_mode = (schema::PadMode)GetPadMode(); - if (pad_mode == schema::PadMode_CAFFE) { output_h = (input_h - 1) * stride_h + ((kernel_h - 1) * dilate_h + 1) - pad_u_ - pad_d_; output_w = (input_w - 1) * stride_w + ((kernel_w - 1) * dilate_w + 1) - pad_l_ - pad_r_; diff --git a/mindspore/lite/c_ops/scatter_nd.cc b/mindspore/lite/c_ops/scatter_nd.cc index 6573fec69aa..e5f09fe496e 100644 --- a/mindspore/lite/c_ops/scatter_nd.cc +++ b/mindspore/lite/c_ops/scatter_nd.cc @@ -58,5 +58,4 @@ int ScatterND::InferShape(std::vector inputs_, std::vect return 0; } - } // namespace mindspore diff --git a/mindspore/lite/c_ops/shape.cc b/mindspore/lite/c_ops/shape.cc index 544e71f117e..377a03ccf48 100644 --- a/mindspore/lite/c_ops/shape.cc +++ b/mindspore/lite/c_ops/shape.cc @@ -20,7 +20,6 @@ namespace mindspore { namespace { constexpr int kShapeInputNum = 1; constexpr int kShapeOutputNum = 1; - } // namespace int Shape::InferShape(std::vector inputs_, std::vector outputs_) { if (inputs_.size() != kShapeInputNum) { diff --git a/mindspore/lite/src/common/file_utils.cc b/mindspore/lite/src/common/file_utils.cc index a221afaeda3..62e1d293e88 100644 --- a/mindspore/lite/src/common/file_utils.cc +++ b/mindspore/lite/src/common/file_utils.cc @@ -161,6 +161,5 @@ void CompareOutput(float *output_data, std::string file_path) { // } // return "/data/data/" + packageName + '/'; //} - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/common/file_utils_ext.h b/mindspore/lite/src/common/file_utils_ext.h index 28eea02e41a..f5441cbdf95 100644 --- a/mindspore/lite/src/common/file_utils_ext.h +++ b/mindspore/lite/src/common/file_utils_ext.h @@ -22,7 +22,6 @@ namespace mindspore { namespace lite { int CompareRelativeOutput(float *output_data, std::string file_path); - } } // namespace mindspore #endif // MINDSPORE_LITE_COMMON_FILE_UTILS_EXT_H_ diff --git a/mindspore/lite/src/common/graph_util.cc b/mindspore/lite/src/common/graph_util.cc index e094c744ab9..e6f2ae5dfd3 100755 --- a/mindspore/lite/src/common/graph_util.cc +++ b/mindspore/lite/src/common/graph_util.cc @@ -75,7 +75,6 @@ std::vector GetGraphOutputNodes(const schema::MetaGraph *meta_graph) { // std::unordered_set OpNode::GetAllInEdges() { return inEdges; } // // std::unordered_set OpNode::GetAllOutEdges() { return outEdges; } - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/common/graph_util.h b/mindspore/lite/src/common/graph_util.h index e9a9e994fe8..7b1abf36b77 100755 --- a/mindspore/lite/src/common/graph_util.h +++ b/mindspore/lite/src/common/graph_util.h @@ -82,7 +82,6 @@ int OpGraph::Build(const schema::MetaGraph *subGraphDef) { return RET_ERROR; } - auto opDefs = subGraphDef->nodes(); uint32_t opCount = opDefs->size(); @@ -104,7 +103,7 @@ int OpGraph::Build(const schema::MetaGraph *subGraphDef) { } template int OpGraph::AddEdge(const schema::CNode *srcNodeDef, - const flatbuffers::Vector> *nodeDefs) { + const flatbuffers::Vector> *nodeDefs) { MS_ASSERT(srcNodeDef != nullptr); MS_ASSERT(nodeDefs != nullptr); NODE_ID srcId = std::string(srcNodeDef->name()->c_str()); @@ -242,7 +241,6 @@ OpGraph::~OpGraph() { } nodes.clear(); } - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/common/graph_utils_extends.cc b/mindspore/lite/src/common/graph_utils_extends.cc index 7bf2993de95..2f741a98b8a 100644 --- a/mindspore/lite/src/common/graph_utils_extends.cc +++ b/mindspore/lite/src/common/graph_utils_extends.cc @@ -146,6 +146,5 @@ std::vector DeepUsedGraphSearch(const AnfNodePtr &root, const Includ std::vector DeepLinkedGraphSearch(const AnfNodePtr &root, const IncludeFunc &include) { return DeepLinkedGraphSearcher(include).Search(root); } - } // namespace mindspore diff --git a/mindspore/lite/src/common/log_adapter.cc b/mindspore/lite/src/common/log_adapter.cc index f14f42bd754..c37e14b1604 100644 --- a/mindspore/lite/src/common/log_adapter.cc +++ b/mindspore/lite/src/common/log_adapter.cc @@ -118,7 +118,7 @@ if (IsPrint(log_level_)) { // #ifdef USE_ANDROID_LOG #ifdef ENABLE_ARM __android_log_print(GetAndroidLogLevel(log_level_), ANDROID_LOG_TAG, "[%s:%d] %s] %s", location_.file_, - location_.line_, location_.func_, msg.str().c_str()); + location_.line_, location_.func_, msg.str().c_str()); #else printf("%s [%s:%d] %s] %s\n:", EnumStrForMsLogLevel(log_level_), location_.file_, location_.line_, location_.func_, msg.str().c_str()); diff --git a/mindspore/lite/src/ir/tensor.h b/mindspore/lite/src/ir/tensor.h index cc1ccafc33b..74bf07ec986 100644 --- a/mindspore/lite/src/ir/tensor.h +++ b/mindspore/lite/src/ir/tensor.h @@ -29,7 +29,6 @@ namespace mindspore { namespace lite { namespace tensor { - struct QuantArg { double scale; int32_t zeroPoint; diff --git a/mindspore/lite/src/lite_session.cc b/mindspore/lite/src/lite_session.cc index fa1c9eb8b9d..425308e4d61 100644 --- a/mindspore/lite/src/lite_session.cc +++ b/mindspore/lite/src/lite_session.cc @@ -362,5 +362,4 @@ session::LiteSession *session::LiteSession::CreateSession(lite::Context *context } return session; } - } // namespace mindspore diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/matrix_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/matrix_fp16.cc index ce7814f285e..3048b74527a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/matrix_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/matrix_fp16.cc @@ -33,7 +33,4 @@ void MatrixMultiplyFp16(const float16_t *matrix_a, const float16_t *matrix_b, fl } } } - - - } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/nnacl/fp16/split_fp16.c b/mindspore/lite/src/runtime/kernel/arm/nnacl/fp16/split_fp16.c index 6f57d384c6d..fd099a10a48 100644 --- a/mindspore/lite/src/runtime/kernel/arm/nnacl/fp16/split_fp16.c +++ b/mindspore/lite/src/runtime/kernel/arm/nnacl/fp16/split_fp16.c @@ -20,7 +20,7 @@ #include "nnacl/errorcode.h" int DoSplitFp16(float16_t *in_data, float16_t **out_data, const int *input_shape, int offset, int num_unit, - SplitParameter *split_param) { + SplitParameter *split_param) { if (in_data == NULL || out_data == NULL) { return NNACL_ERR; } diff --git a/mindspore/lite/src/runtime/kernel/arm/nnacl/fp16/split_fp16.h b/mindspore/lite/src/runtime/kernel/arm/nnacl/fp16/split_fp16.h index 780737c0c5d..fc27fdfbb7c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/nnacl/fp16/split_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/nnacl/fp16/split_fp16.h @@ -25,7 +25,7 @@ extern "C" { #endif int DoSplitFp16(float16_t *in_data, float16_t **out_data, const int *input_shape, int offset, int num_unit, - SplitParameter *split_param); + SplitParameter *split_param); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/add_int8.c b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/add_int8.c index 30599bed983..19bd1f2dd95 100644 --- a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/add_int8.c +++ b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/add_int8.c @@ -36,7 +36,7 @@ int32x4_t ClacScaledInput(int32x4_t input, int32x4_t left_shift_result_vec, int3 } int16x4_t AddClacSumHalfWord(int32x4_t scaled_input0, int32x4_t scaled_input1, int32x4_t left_shift_out_vec, - int32x4_t output_multiplier_vec, AddQuantParameter *para) { + int32x4_t output_multiplier_vec, AddQuantParameter *para) { int32x4_t raw_sum = vaddq_s32(scaled_input0, scaled_input1); raw_sum = RoundingDivideByPOTInt32x4(vqrdmulhq_s32(vmulq_s32(raw_sum, left_shift_out_vec), output_multiplier_vec), diff --git a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/mul_int8.c b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/mul_int8.c index 872ee83f684..3e519ba2870 100644 --- a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/mul_int8.c +++ b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/mul_int8.c @@ -25,7 +25,7 @@ #ifdef ENABLE_NEON int16x4_t ClacSumHalfWordMul(int32x4_t scaled_input0, int32x4_t scaled_input1, int32x4_t left_shift_out_vec, - int32x4_t output_multiplier_vec, MulQuantArg para) { + int32x4_t output_multiplier_vec, MulQuantArg para) { int32x4_t input_scale = vmulq_s32(scaled_input0, scaled_input1); int32x4_t raw_sum = RoundingDivideByPOTInt32x4( SaturatingRoundingDoublingHighMulInt32x4(vmulq_s32(input_scale, left_shift_out_vec), output_multiplier_vec), diff --git a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/pad.c b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/pad.c index 90d7bf56241..6353ebbb147 100644 --- a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/pad.c +++ b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/pad.c @@ -19,7 +19,7 @@ #include "nnacl/errorcode.h" int PadConstant4D(const int8_t *in_data, int8_t *out_data, const int32_t *in_dims, const int32_t *out_dims, - const int32_t *paddings, const int tid, const int thread_num) { + const int32_t *paddings, const int tid, const int thread_num) { int32_t copy_size = in_dims[3]; for (int n = 0; n < in_dims[0]; n++) { for (int h = tid; h < in_dims[1]; h += thread_num) { diff --git a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/pad.h b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/pad.h index 5f8a422d876..f34011181e7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/pad.h +++ b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/pad.h @@ -25,7 +25,7 @@ extern "C" { #endif int PadConstant4D(const int8_t *in_data, int8_t *out_data, const int32_t *in_dims, const int32_t *out_dims, - const int32_t *paddings, const int tid, const int thread_num); + const int32_t *paddings, const int tid, const int thread_num); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.c b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.c index a0e085f45a6..166d23a4746 100644 --- a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.c +++ b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.c @@ -99,9 +99,9 @@ int SliceInt8(const int8_t *input, int8_t *output, SliceParameter *param) { multiplier = input_scale / output_scale; } - for (n = 0; n< param->size_[0]; ++n) { + for (n = 0; n < param->size_[0]; ++n) { size_t out_offset0 = n * out_stride0; - size_t in_offset0 = (n+ param->begin_[0]) * in_stride0 + param->begin_[3]; + size_t in_offset0 = (n + param->begin_[0]) * in_stride0 + param->begin_[3]; for (h = 0; h < count_per_thread; ++h) { size_t k = h + thread_stride; if (k >= out_dim1) { diff --git a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.h b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.h index cc1bf78f76c..2e77447d958 100644 --- a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/slice_int8.h @@ -22,8 +22,8 @@ #ifdef __cplusplus extern "C" { #endif -int SliceInt8NoParallel(const int8_t*input, int8_t *output, SliceParameter *param); -int SliceInt8(const int8_t*input, int8_t *output, SliceParameter *param); +int SliceInt8NoParallel(const int8_t *input, int8_t *output, SliceParameter *param); +int SliceInt8(const int8_t *input, int8_t *output, SliceParameter *param); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/sub_int8.c b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/sub_int8.c index 3a822ac1ded..c7b4faf2afc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/sub_int8.c +++ b/mindspore/lite/src/runtime/kernel/arm/nnacl/int8/sub_int8.c @@ -24,7 +24,7 @@ #ifdef ENABLE_NEON int16x4_t DoClacSumHalfWord(int32x4_t scaled_input0, int32x4_t scaled_input1, int32x4_t left_shift_out_vec, - int32x4_t output_multiplier_vec, SubQuantArg *para) { + int32x4_t output_multiplier_vec, SubQuantArg *para) { int32x4_t raw_data = vsubq_s32(scaled_input0, scaled_input1); raw_data = RoundingDivideByPOTInt32x4(vqrdmulhq_s32(vmulq_s32(raw_data, left_shift_out_vec), output_multiplier_vec), diff --git a/mindspore/lite/src/runtime/kernel/arm/nnacl/quantization/quantize.c b/mindspore/lite/src/runtime/kernel/arm/nnacl/quantization/quantize.c index 3602d40c614..169d0647788 100644 --- a/mindspore/lite/src/runtime/kernel/arm/nnacl/quantization/quantize.c +++ b/mindspore/lite/src/runtime/kernel/arm/nnacl/quantization/quantize.c @@ -28,7 +28,7 @@ const int iMantissaBits = 31; void QuantizeMultiplierSmallerThanOne(double double_multiplier, int32_t *quantized_multiplier, - int *right_shift) { + int *right_shift) { if (quantized_multiplier == NULL || right_shift == NULL) { return; } @@ -38,7 +38,7 @@ void QuantizeMultiplierSmallerThanOne(double double_multiplier, int32_t *quantiz } void QuantizeRoundParameter(double double_multiplier, int32_t *quantized_multiplier, int *left_shift, - int *right_shift) { + int *right_shift) { int shift; QuantizeMultiplierSmallerThanOne(double_multiplier, quantized_multiplier, &shift); shift = -shift; @@ -56,7 +56,7 @@ uint8_t QuantizeToUint8(float real_value, float scale, int32_t zp) { return roun int32_t QuantizeToInt8(float real_value, float scale, int32_t zp) { return round(real_value / scale + zp); } void CalculateActivationRangeQuantized(bool is_relu, bool is_relu6, int32_t zp, float scale, int *mini, - int *maxi) { + int *maxi) { int32_t min = CHAR_MIN; int32_t max = CHAR_MAX; int32_t quantized_zero = QuantizeToInt8(0, scale, zp); diff --git a/mindspore/lite/src/runtime/kernel/arm/nnacl/winograd_transform.c b/mindspore/lite/src/runtime/kernel/arm/nnacl/winograd_transform.c index 3420c208bc2..780b69622ee 100644 --- a/mindspore/lite/src/runtime/kernel/arm/nnacl/winograd_transform.c +++ b/mindspore/lite/src/runtime/kernel/arm/nnacl/winograd_transform.c @@ -1364,7 +1364,6 @@ void Conv3x3Uint8OutputUnit(const int32_t *gemm_out, const int32_t *bias_data, i } } } - } else { for (int i = 0; i < C4NUM; i++) { const int32_t *local_ptr = gemm_out + i; diff --git a/mindspore/lite/src/runtime/kernel/opencl/image_format.h b/mindspore/lite/src/runtime/kernel/opencl/image_format.h index 4987afe49da..bbf4bacb5ad 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/image_format.h +++ b/mindspore/lite/src/runtime/kernel/opencl/image_format.h @@ -21,7 +21,6 @@ namespace mindspore { namespace kernel { - /** * MindSpore to OpenCL channel order. * @param num_channels diff --git a/mindspore/lite/src/runtime/kernel/opencl/utils.cc b/mindspore/lite/src/runtime/kernel/opencl/utils.cc index 41ed6df6cf1..a72ad23c0bd 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/utils.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/utils.cc @@ -37,7 +37,6 @@ kernel::LiteKernel *GetOpenCLKernel(const std::vector &in_tens namespace mindspore { namespace kernel { - std::vector GetCommonGlobalSize(const std::vector &local, const std::vector &global) { std::vector result(3, 1); for (int i = 0; i < 3; ++i) { diff --git a/mindspore/lite/tools/anf_importer/anf_populater/anf_node_populater_registry.cc b/mindspore/lite/tools/anf_importer/anf_populater/anf_node_populater_registry.cc index d877e48c972..9cf794e4a40 100644 --- a/mindspore/lite/tools/anf_importer/anf_populater/anf_node_populater_registry.cc +++ b/mindspore/lite/tools/anf_importer/anf_populater/anf_node_populater_registry.cc @@ -31,6 +31,5 @@ AnfNodePopulater *AnfNodePopulaterRegistry::GetNodePopulater(const std::string & void AnfNodePopulaterRegistry::SetNodePopulater(const std::string &name, AnfNodePopulater *populater) { populaters[name] = populater; } - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/common/graph_util.h b/mindspore/lite/tools/common/graph_util.h index f3b4b97e3ad..ce40fd1ac76 100644 --- a/mindspore/lite/tools/common/graph_util.h +++ b/mindspore/lite/tools/common/graph_util.h @@ -99,7 +99,6 @@ class OpGraphT : public OpGraph { int AddEdge(NODE_ID srcId, NODE_ID dstId); int AddEdge(const schema::CNodeT *srcNodeDef, const std::vector> *nodeDefs); }; - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/common/tensor_util.h b/mindspore/lite/tools/common/tensor_util.h index ef3b58a8423..dddb34d1b3c 100644 --- a/mindspore/lite/tools/common/tensor_util.h +++ b/mindspore/lite/tools/common/tensor_util.h @@ -55,8 +55,8 @@ size_t GetRefCount(schema::MetaGraphT *graphT, uint32_t tensorIdx); std::unique_ptr CopyQuantParamT(const std::unique_ptr &srcQuantParam); -std::unique_ptr \ - CopyQuantParamArrayT(const std::unique_ptr &srcQuantParamArray); +std::unique_ptr CopyQuantParamArrayT( + const std::unique_ptr &srcQuantParamArray); std::unique_ptr GetInTensorQuantParamArray(const schema::MetaGraphT &graphT, size_t tensorIdx); diff --git a/mindspore/lite/tools/converter/legacy_optimizer/const_fold/add_const_fold_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/const_fold/add_const_fold_pass.cc index b4df8f946f6..e7c2f4ead94 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/const_fold/add_const_fold_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/const_fold/add_const_fold_pass.cc @@ -20,7 +20,6 @@ namespace mindspore { namespace lite { - STATUS AddConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } STATUS AddConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/const_fold/concat_v2_const_fold_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/const_fold/concat_v2_const_fold_pass.cc index 17cc102a506..2c21a83a88a 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/const_fold/concat_v2_const_fold_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/const_fold/concat_v2_const_fold_pass.cc @@ -19,7 +19,6 @@ namespace mindspore { namespace lite { - STATUS ConcatV2ConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } STATUS ConcatV2ConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/const_fold/rsqrt_const_fold_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/const_fold/rsqrt_const_fold_pass.cc index 849eca570eb..832e42279df 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/const_fold/rsqrt_const_fold_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/const_fold/rsqrt_const_fold_pass.cc @@ -20,7 +20,6 @@ namespace mindspore { namespace lite { - STATUS RsqrtConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } STATUS RsqrtConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/const_fold/sub_const_fold_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/const_fold/sub_const_fold_pass.cc index 8575c8d4ffb..c114318b045 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/const_fold/sub_const_fold_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/const_fold/sub_const_fold_pass.cc @@ -23,7 +23,6 @@ namespace mindspore { namespace lite { - STATUS SubConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } STATUS SubConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/const_fold/transpose_const_fold_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/const_fold/transpose_const_fold_pass.cc index d76a2ca5813..848279e0810 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/const_fold/transpose_const_fold_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/const_fold/transpose_const_fold_pass.cc @@ -20,7 +20,6 @@ namespace mindspore { namespace lite { - STATUS TransposeConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); } STATUS TransposeConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc index edad265bff5..70c425aa48d 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc @@ -187,5 +187,3 @@ STATUS FormatTransFusionPass::DoFusion(schema::MetaGraphT *graph, const std::str } } // namespace lite } // namespace mindspore - - diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/eltwise_format_trans_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/eltwise_format_trans_pass.h index 3580aa92601..1f62883d797 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/eltwise_format_trans_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/eltwise_format_trans_pass.h @@ -24,7 +24,6 @@ namespace mindspore { namespace lite { - class EltwiseFormatTransPass : public FormatTransPass { public: EltwiseFormatTransPass() : FormatTransPass() {} diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc index 58ed5cc2ce2..00d7e207195 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc @@ -200,6 +200,5 @@ NodeIter FormatTransPass::InsertFormatTransNode(schema::MetaGraphT *graph, NodeI void FormatTransPass::SetQuantType(QuantType quantType) { this->quantType = quantType; } void FormatTransPass::SetFmk(converter::FmkType fmkType) { this->fmkType = fmkType; } - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc index cd4ce95c7aa..ce9590f9812 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc @@ -28,7 +28,6 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto, op->name = proto.name(); std::unique_ptr attr(new schema::ConcatT()); const caffe::ConcatParameter concatParam = proto.concat_param(); - if (concatParam.has_axis() && concatParam.has_concat_dim()) { // MS_LOGE("Concat param in caffe have concat_dim and axis simultaneously,return fail"); return RET_ERROR; @@ -37,7 +36,6 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto, if (concatParam.has_concat_dim()) { // MS_LOGD("Concat dim , set axis:%d", concatParam.concat_dim()); int32_t concat_dim_value = (int32_t)concatParam.concat_dim(); - if (concat_dim_value < 0) { // MS_LOGE("concat_dim value in model is smaller than 0:%d", concat_dim_value); return RET_ERROR; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc index 106fb3ad71e..71d894d1480 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc @@ -32,7 +32,6 @@ STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto, attr->offsets = offsets; } else { const caffe::CropParameter cropParam = proto.crop_param(); - if (cropParam.has_axis()) { if (cropParam.axis() == -1) { // MS_LOGW("axis with -1 may lead to calculation errors when input less than 4 dims."); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc index 61df34e039e..0880dba8788 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc @@ -34,7 +34,6 @@ STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe } const caffe::EltwiseParameter eltwiseParam = proto.eltwise_param(); - if (eltwiseParam.coeff_size() != 0 && eltwiseParam.coeff_size() != proto.bottom_size()) { MS_LOG(ERROR) << "Coeff size(" << eltwiseParam.coeff_size() << ") check fail, Eltwise Layer takes one coefficient per bottom blob."; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.cc index f24264cd584..7f78c42b98c 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.cc @@ -19,7 +19,7 @@ namespace mindspore { namespace lite { STATUS CaffeFlattenParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, - schema::CNodeT *op, std::vector *weightVec) { + schema::CNodeT *op, std::vector *weightVec) { if (op == nullptr) { // MS_LOG(ERROR) << "null pointer dereferencing."; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc index 2ade9ec54db..e2459061c35 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc @@ -23,7 +23,6 @@ STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, const caffe: schema::CNodeT *op, std::vector *weightVec) { std::unique_ptr attr(new schema::ResizeT()); const caffe::InterpParameter interpParam = proto.interp_param(); - if (interpParam.has_height()) { int64_t height = interpParam.height(); if (height < 0) { diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h index e7b4f3d82b8..c410b21e271 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h @@ -27,7 +27,6 @@ namespace mindspore { namespace lite { - class CaffeNodeParser { public: explicit CaffeNodeParser(const std::string &nodeName) : name(nodeName) {} diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc index 3e42c03ad6f..a4c548cd0e7 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc @@ -20,9 +20,9 @@ namespace mindspore { namespace lite { STATUS CaffePermuteParser::Parse(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight, - schema::CNodeT *op, - std::vector *weightVec) { + const caffe::LayerParameter &weight, + schema::CNodeT *op, + std::vector *weightVec) { op->name = proto.name(); std::unique_ptr attr(new schema::TransposeT()); const caffe::PermuteParameter permuteParam = proto.permute_param(); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc index 1458d9415d7..2f07077e649 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc @@ -25,7 +25,6 @@ STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto, std::vector *weightVec) { std::unique_ptr attr(new schema::CaffePReLUT()); const caffe::PReLUParameter pReluParam = proto.prelu_param(); - if (pReluParam.has_channel_shared()) { attr->channelShared = pReluParam.channel_shared(); } else { diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc index ee0e461e984..d59036c89bc 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc @@ -27,7 +27,6 @@ STATUS CaffeReshapeParser::Parse(const caffe::LayerParameter &proto, attr->format = schema::Format_NCHW; const caffe::ReshapeParameter reshapeParam = proto.reshape_param(); - if (!reshapeParam.has_shape()) { // MS_LOGE("Reshape has no shape info, ret fail"); return RET_ERROR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc index 50fb64f71d4..b5babf04ce9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc @@ -150,6 +150,5 @@ TfliteNodeRegister g_TfliteHardSwishParser("HardSwish", new TfliteHardSwishParse TfliteNodeRegister g_tfliteLogisticParser("Logistic", new TfliteLogisticParser()); TfliteNodeRegister g_tflitePreluParser("Prelu", new TflitePreluParser()); TfliteNodeRegister g_TfliteLeakyReluParser("LeakyRelu", new TfliteLeakyReluParser()); - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h index 0223bcffac6..3f4ae27ad40 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h @@ -25,7 +25,6 @@ namespace mindspore { namespace lite { - class TfliteActivationParser : public TfliteNodeParser { public: TfliteActivationParser() : TfliteNodeParser("node_name") {} @@ -89,7 +88,6 @@ class TfliteLeakyReluParser : public TfliteNodeParser { std::vector *tensors_format, std::map *tensors_id_map) override; }; - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc index 8a7eb162366..2ebaa61131d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc @@ -311,7 +311,6 @@ TfliteNodeRegister g_tfliteGreaterEParser("Greater", new TfliteGreaterParser()); TfliteNodeRegister g_tfliteGreaterEqualParser("GreaterEqual", new TfliteGreaterEqualParser()); TfliteNodeRegister g_tfliteLessParser("Less", new TfliteLessParser()); TfliteNodeRegister g_tfliteLessEqualParser("LessEqual", new TfliteLessEqualParser()); - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h index d79da7a58ac..d61b742b19b 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h @@ -25,7 +25,6 @@ namespace mindspore { namespace lite { - class TfliteDoubleInputOpParser : public TfliteNodeParser { public: TfliteDoubleInputOpParser() : TfliteNodeParser("node_name") {} @@ -206,7 +205,6 @@ class TfliteLessEqualParser : public TfliteCompareOpParser { public: TfliteLessEqualParser() : TfliteCompareOpParser() {} }; - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc index b1b4d5fad61..3c490ac4e44 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc @@ -72,6 +72,5 @@ STATUS TfliteBatchToSpaceParser::Parse(const std::unique_ptr TfliteNodeRegister g_tfliteBatchToSpaceParser("BatchToSpace", new TfliteBatchToSpaceParser()); TfliteNodeRegister g_TfliteBatchToSpaceNDParser("BatchToSpaceND", new TfliteBatchToSpaceNDParser()); - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h index 8e28f3b4cfb..2e3723b04fd 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h @@ -42,7 +42,6 @@ class TfliteBatchToSpaceNDParser : public TfliteBatchToSpaceParser { public: TfliteBatchToSpaceNDParser() : TfliteBatchToSpaceParser() {} }; - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc index 13354b61d1a..5ebe2583730 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc @@ -22,8 +22,6 @@ namespace mindspore { namespace lite { - - STATUS TfliteDepthwiseConv2DParser::Parse(const std::unique_ptr &tflite_op, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc index ba02dc35ef9..f84bff6bdf7 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc @@ -68,7 +68,7 @@ STATUS TfliteFullyConnectedParser::Parse(const std::unique_ptrinputs[1], tensors_id->size(), tflite_tensors.size(), schema::Format_KHWC); AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc index 5f4a4d18353..c6677327664 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc @@ -71,6 +71,5 @@ STATUS TfliteLogicalParser::Parse(const std::unique_ptr &tfli TfliteNodeRegister g_TfliteLogicalAndParser("LogicalAnd", new TfliteLogicalAndParser()); TfliteNodeRegister g_TfliteLogicalNotParser("LogicalNot", new TfliteLogicalNotParser()); TfliteNodeRegister g_TfliteLogicalOrParser("LogicalOr", new TfliteLogicalOrParser()); - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h index a56de847e6c..377b8717907 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h @@ -25,7 +25,6 @@ namespace mindspore { namespace lite { - class TfliteLogicalParser : public TfliteNodeParser { public: TfliteLogicalParser() : TfliteNodeParser("node_name") {} diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc index 8f4535b7459..db828d1fcf1 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc @@ -59,7 +59,7 @@ STATUS TflitePadParser::Parse(const std::unique_ptr &tflite_o op->primitive->value.value = attr.release(); AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc index 9cfbceed32a..690ec75fd34 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc @@ -96,6 +96,5 @@ TfliteNodeRegister g_TfliteReduceMaxParser("ReduceMax", new TfliteReduceMaxParse TfliteNodeRegister g_TfliteReduceMinParser("ReduceMin", new TfliteReduceMinParser()); TfliteNodeRegister g_TfliteReduceProdParser("ReduceProd", new TfliteReduceProdParser()); TfliteNodeRegister g_TfliteReduceAnyParser("ReduceAny", new TfliteReduceAnyParser()); - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h index 7960143948f..35c49fb25a1 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h @@ -67,7 +67,6 @@ class TfliteReduceAnyParser : public TfliteReduceParser { public: TfliteReduceAnyParser() : TfliteReduceParser() {} }; - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc index 7bdc2d83184..3aa06063dc6 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc @@ -49,7 +49,7 @@ STATUS TfliteReshapeParser::Parse(const std::unique_ptr &tfli return RET_ERROR; } auto shape_tensor_index = tflite_op->inputs[1]; - const auto & shape_tensor = tflite_tensors[shape_tensor_index]; + const auto &shape_tensor = tflite_tensors[shape_tensor_index]; if (shape_tensor == nullptr) { MS_LOG(ERROR) << "shape_tensor is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc index 4bb37bb93c4..3c5516b4974 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc @@ -71,13 +71,13 @@ STATUS TfliteResizeParser::Parse(const std::unique_ptr &tflit attr->preserveAspectRatio = false; auto tfliteResizeTensorIndex = tflite_op->inputs[1]; - const auto & shape_tensor = tflite_tensors[tfliteResizeTensorIndex]; + const auto &shape_tensor = tflite_tensors[tfliteResizeTensorIndex]; if (shape_tensor == nullptr) { MS_LOG(ERROR) << "shape_tensor is null"; return RET_NULL_PTR; } auto resizeTensorBufferIndex = shape_tensor->buffer; - const auto & buff = tflite_model_buffer.at(resizeTensorBufferIndex); + const auto &buff = tflite_model_buffer.at(resizeTensorBufferIndex); if (buff == nullptr) { MS_LOG(ERROR) << "buff_data is null"; return RET_NULL_PTR; @@ -92,7 +92,7 @@ STATUS TfliteResizeParser::Parse(const std::unique_ptr &tflit op->primitive->value.value = attr.release(); AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h index 14245ba3e9e..1b9b5d91aee 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h @@ -47,7 +47,6 @@ class TfliteResizeNearestNeighborParser : public TfliteResizeParser { public: TfliteResizeNearestNeighborParser() : TfliteResizeParser() {} }; - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc index e86e7c7a758..39568e47105 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc @@ -54,7 +54,7 @@ STATUS TfliteScatterNdParser::Parse(const std::unique_ptr &tf // in tflite, kIndices = 0, kUpdates = 1, kShape = 2 // in mslite, kScatterShapeIndex = 0, kScatterIndicesIndex = 1, kScatterUpdateIndex = 2; AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); + tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); AddOpInput(op, tensors_id, tensors_format, tensors_id_map, tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); AddOpInput(op, tensors_id, tensors_format, tensors_id_map, diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc index 3a4f1b8a9ed..06311b37975 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc @@ -192,10 +192,10 @@ size_t GetDataTypeSize(const TypeId &data_type) { } STATUS getPaddingParam(const std::unique_ptr &tensor, - schema::PadMode pad_mode, - int strideH, int strideW, - int windowH, int windowW, - std::vector *params) { + schema::PadMode pad_mode, + int strideH, int strideW, + int windowH, int windowW, + std::vector *params) { if (tensor == nullptr) { MS_LOG(ERROR) << "the input tensor is null"; return RET_ERROR; @@ -239,6 +239,5 @@ void Split(const std::string &src_str, std::vector *dst_str, const dst_str->push_back(src_str.substr(p1)); } } - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/quantizer/general_bitpacking.cc b/mindspore/lite/tools/converter/quantizer/general_bitpacking.cc index 3893b21c766..6e1e59cab23 100644 --- a/mindspore/lite/tools/converter/quantizer/general_bitpacking.cc +++ b/mindspore/lite/tools/converter/quantizer/general_bitpacking.cc @@ -73,14 +73,13 @@ void BitPack::BitPacking(const std::vector& originDataVec, std::vector< } size_t remainBitData = bitDataVec.size(); - if ( 8 > remainBitData && remainBitData > 0 ) { - for ( int i = 0; i < 8 - remainBitData; i++ ) { + if (8 > remainBitData && remainBitData > 0) { + for (int i = 0; i < 8 - remainBitData; i++) { bitDataVec.push(0); } PackFromOriginToUint8(bitDataVec, packedDataVec); } } - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc b/mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc index d9eba3480e7..83431dc609e 100644 --- a/mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc +++ b/mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc @@ -42,7 +42,6 @@ using std::vector; namespace mindspore { namespace lite { namespace quant { - struct DivergInfo { std::vector histogram; CNodePtr cnode; diff --git a/mindspore/lite/tools/converter/quantizer/quantize_util.h b/mindspore/lite/tools/converter/quantizer/quantize_util.h index ceb88227794..5277c7124d3 100644 --- a/mindspore/lite/tools/converter/quantizer/quantize_util.h +++ b/mindspore/lite/tools/converter/quantizer/quantize_util.h @@ -33,7 +33,6 @@ namespace mindspore { namespace lite { namespace quant { - static constexpr size_t UINT8_QUANTIZATION = 8; /** @@ -124,7 +123,6 @@ STATUS QuantFilter(ParamValueLitePtr &weightPtr, QuantType quantType, int quant_ size_t bitNum = UINT8_QUANTIZATION, bool per_channel = false); STATUS PostBitPack(float *weights, size_t shapeSize, size_t bitNum = UINT8_QUANTIZATION); - } // namespace quant } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc b/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc index 362ff693f5d..54d796f6670 100644 --- a/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc +++ b/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc @@ -26,7 +26,6 @@ using std::vector; namespace mindspore { namespace lite { namespace quant { - WeightQuantizer::WeightQuantizer(FuncGraphPtr graph, const string &weightSize, const std::string &convWeightChannelThreshold, const std::string &bitNum) : Quantizer(graph) { diff --git a/mindspore/lite/tools/optimizer/common/gllo_utils.cc b/mindspore/lite/tools/optimizer/common/gllo_utils.cc index f665f63ee05..ecbc441a7a9 100644 --- a/mindspore/lite/tools/optimizer/common/gllo_utils.cc +++ b/mindspore/lite/tools/optimizer/common/gllo_utils.cc @@ -284,7 +284,7 @@ void CheckLeastInputSize(const CNodePtr &node, const int size) { } ParameterPtr AddNewBiasNode(float *bias_data, const FuncGraphPtr &func_graph, int kernel_num, - const ParamValueLitePtr &weight_tensor) { + const ParamValueLitePtr &weight_tensor) { auto bias_parameter = func_graph->add_parameter(); MS_ASSERT(bias_parameter != nullptr); std::vector shape = {kernel_num}; diff --git a/mindspore/lite/tools/optimizer/common/gllo_utils.h b/mindspore/lite/tools/optimizer/common/gllo_utils.h index def65d0537c..e970c0babd8 100644 --- a/mindspore/lite/tools/optimizer/common/gllo_utils.h +++ b/mindspore/lite/tools/optimizer/common/gllo_utils.h @@ -48,7 +48,7 @@ void CheckIfNodeIsParam(const AnfNodePtr &node); void CheckLeastInputSize(const CNodePtr &node, int size); ParameterPtr AddNewBiasNode(float *bias_data, const FuncGraphPtr &func_graph, int kernel_num, - const ParamValueLitePtr &weight_tensor); + const ParamValueLitePtr &weight_tensor); schema::PrimitiveType GetCNodeType(const BaseRef &node); diff --git a/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.h index 8be5afc403c..bc211ca48f7 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.h @@ -27,8 +27,8 @@ class ConvActivationFusion : public PatternProcessPass { public: explicit ConvActivationFusion(bool multigraph = true, const std::string &name = "conv_activation_fusion", schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, - schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) : primitive_type( - primitive), activation_type(activation), PatternProcessPass(name, multigraph) {} + schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) + : primitive_type(primitive), activation_type(activation), PatternProcessPass(name, multigraph) {} ~ConvActivationFusion() override = default; const BaseRef DefinePattern() const override; const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; diff --git a/mindspore/lite/tools/time_profile/time_profile.cc b/mindspore/lite/tools/time_profile/time_profile.cc index 1f5f8593933..4fe3c60644e 100644 --- a/mindspore/lite/tools/time_profile/time_profile.cc +++ b/mindspore/lite/tools/time_profile/time_profile.cc @@ -394,6 +394,5 @@ int RunTimeProfile(int argc, const char **argv) { return RET_OK; } - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/time_profile/time_profile.h b/mindspore/lite/tools/time_profile/time_profile.h index eaad720d349..6668c5edd60 100644 --- a/mindspore/lite/tools/time_profile/time_profile.h +++ b/mindspore/lite/tools/time_profile/time_profile.h @@ -34,7 +34,6 @@ namespace mindspore { namespace lite { - class MS_API TimeProfileFlags : public virtual FlagParser { public: TimeProfileFlags() {