diff --git a/mindspore/core/utils/log_adapter.h b/mindspore/core/utils/log_adapter.h index 7aba9194e95..4b50be12b8b 100644 --- a/mindspore/core/utils/log_adapter.h +++ b/mindspore/core/utils/log_adapter.h @@ -147,7 +147,7 @@ class LogWriter { LogWriter(const LocationInfo &location, MsLogLevel log_level, SubModuleId submodule, ExceptionType excp_type = NoExceptionType) - : location_(location), log_level_(log_level), submodule_(submodule), exception_type_(excp_type) {} + : location_(location), log_level_(log_level), exception_type_(excp_type) {} ~LogWriter() = default; void operator<(const LogStream &stream) const noexcept __attribute__((visibility("default"))); @@ -161,7 +161,6 @@ class LogWriter { LocationInfo location_; MsLogLevel log_level_; - SubModuleId submodule_; ExceptionType exception_type_; inline static ExceptionHandler exception_handler_ = nullptr; diff --git a/mindspore/lite/CMakeLists.txt b/mindspore/lite/CMakeLists.txt index cca41cd96f5..42acb906bdc 100644 --- a/mindspore/lite/CMakeLists.txt +++ b/mindspore/lite/CMakeLists.txt @@ -65,22 +65,21 @@ set(CMAKE_VERBOSE_MAKEFILE on) add_compile_definitions(USE_ANDROID_LOG) add_compile_definitions(NO_DLIB) add_compile_options(-fPIC) -if (NOT PLATFORM_ARM64 AND NOT PLATFORM_ARM32) - if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default") - else () - ## enable for binscope for release - set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations ${CMAKE_C_FLAGS}") - set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations ${CMAKE_CXX_FLAGS}") - if (NOT WIN32) - set(CMAKE_SHARED_LINKER_FLAGS "-Wl,-z,relro,-z,now -Wl,-z,noexecstack ${CMAKE_SHARED_LINKER_FLAGS}") - set(CMAKE_EXE_LINKER_FLAGS "-Wl,-z,relro,-z,now -Wl,-z,noexecstack ${CMAKE_EXE_LINKER_FLAGS}") - endif() - string(REPLACE " -g " " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - endif () + +if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default") +else () + ## enable for binscope for release + set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_C_FLAGS}") + set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") + if (NOT WIN32) + set(CMAKE_SHARED_LINKER_FLAGS "-Wl,-z,relro,-z,now -Wl,-z,noexecstack ${CMAKE_SHARED_LINKER_FLAGS}") + set(CMAKE_EXE_LINKER_FLAGS "-Wl,-z,relro,-z,now -Wl,-z,noexecstack ${CMAKE_EXE_LINKER_FLAGS}") + endif() + string(REPLACE " -g " " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") endif () if (BUILD_DEVICE) diff --git a/mindspore/lite/nnacl/arithmetic_common.h b/mindspore/lite/nnacl/arithmetic_common.h index 34aab3e4a56..cedb59cd711 100644 --- a/mindspore/lite/nnacl/arithmetic_common.h +++ b/mindspore/lite/nnacl/arithmetic_common.h @@ -51,6 +51,8 @@ void TileOneDimension(float *inData, float *outData, int dim, size_t ndim, int * int *outStrides, int *multiple); void ComputeStrides(int *shape, int *strides, int ndim); +void CalcMultiplesAndStrides(ArithmeticParameter *param); + void TileDimensions(float *data0, float *data1, float *tile_data0, float *tile_data1, ArithmeticParameter *param); void TileDimensionsUint8(uint8_t *data0, uint8_t *data1, uint8_t *tile_data0, uint8_t *tile_data1, ArithmeticParameter *param); diff --git a/mindspore/lite/nnacl/fp16/conv_fp16.c b/mindspore/lite/nnacl/fp16/conv_fp16.c index 91bac43931e..3a93351aed8 100644 --- a/mindspore/lite/nnacl/fp16/conv_fp16.c +++ b/mindspore/lite/nnacl/fp16/conv_fp16.c @@ -395,7 +395,6 @@ void Conv3x3Fp16(float16_t *input_data, float16_t *transed_weight, const float16 int input_batch = conv_param->input_batch_; for (int batch = 0; batch < input_batch; batch++) { - int in_batch_offset = batch * ic4 * C4NUM * conv_param->input_h_ * conv_param->input_w_; int tmp_out_batch_offset = batch * oc8 * C8NUM * out_w_block * out_h_block * output_unit * output_unit; for (int thread_id = task_id; thread_id < output_tile_count; thread_id += thread_count) { int start_index = thread_id * tile_num; diff --git a/mindspore/lite/nnacl/fp16/pack_fp16.c b/mindspore/lite/nnacl/fp16/pack_fp16.c index 6039528fdcb..e4d2f863f2a 100644 --- a/mindspore/lite/nnacl/fp16/pack_fp16.c +++ b/mindspore/lite/nnacl/fp16/pack_fp16.c @@ -55,7 +55,6 @@ void Im2ColPackUnitFp16(float16_t *input_data, ConvParameter *conv_param, float1 int in_w = conv_param->input_w_; int out_w = conv_param->output_w_; int channel_block = UP_DIV(in_channel, 4); - int kernel_plane = kernel_h * kernel_w; for (int i = 0; i < real_cal_num; i++) { int block_start = block_index + i; diff --git a/mindspore/lite/nnacl/fp16/winograd_transform_fp16.c b/mindspore/lite/nnacl/fp16/winograd_transform_fp16.c index 4e683efe0cb..38f402c2747 100644 --- a/mindspore/lite/nnacl/fp16/winograd_transform_fp16.c +++ b/mindspore/lite/nnacl/fp16/winograd_transform_fp16.c @@ -607,7 +607,7 @@ void WinogradInputTransformFp16(const float16_t *input_data, float16_t *trans_in for (int j = 0; j < (interval_x_e - interval_x_s); j++) { int src_x_offset = src_y_offset + j * ic8 * C8NUM; int dst_x_offset = dst_y_offset + j * C8NUM; - float16_t *src_addr = input_data + src_x_offset; + const float16_t *src_addr = input_data + src_x_offset; float16_t *dst_addr = tmp_data + dst_x_offset; #ifdef ENABLE_NEON vst1q_f16(dst_addr, vld1q_f16(src_addr)); diff --git a/mindspore/lite/nnacl/int8/conv_int8.c b/mindspore/lite/nnacl/int8/conv_int8.c index 93b71bd0c95..068864181f5 100644 --- a/mindspore/lite/nnacl/int8/conv_int8.c +++ b/mindspore/lite/nnacl/int8/conv_int8.c @@ -28,7 +28,7 @@ void IndirectGemmInt8(int8_t *dst, int32_t *tmp_dst, const int8_t *src, const in int32_t out_zp = conv_param->conv_quant_arg_.output_quant_args_[0].zp_; int32_t act_min = conv_param->conv_quant_arg_.out_act_min_[0]; int32_t act_max = conv_param->conv_quant_arg_.out_act_max_[0]; - int oc4 = UP_DIV(output_channel, C4NUM); + #ifdef ENABLE_ARM64 size_t asymmetric = conv_param->conv_quant_arg_.asymmetric_ & FILTER_ASYMMETRIC; size_t per_channel = conv_param->conv_quant_arg_.per_channel_ & FILTER_PER_CHANNEL; @@ -36,6 +36,7 @@ void IndirectGemmInt8(int8_t *dst, int32_t *tmp_dst, const int8_t *src, const in output_channel * sizeof(int8_t), input_sum, act_min, act_max, out_zp, out_multiplier, shift_before, shift_after, asymmetric, per_channel); #else + int oc4 = UP_DIV(output_channel, C4NUM); int tile_num = conv_param->tile_num_; int plane_c4 = UP_DIV(kernel_plane, C4NUM); for (int oc = 0; oc < output_channel; oc++) { diff --git a/mindspore/lite/nnacl/int8/matmul_int8.c b/mindspore/lite/nnacl/int8/matmul_int8.c index 467b03bfd9d..18d1c8c2800 100644 --- a/mindspore/lite/nnacl/int8/matmul_int8.c +++ b/mindspore/lite/nnacl/int8/matmul_int8.c @@ -63,16 +63,17 @@ void RowMajor2Row16x4MajorInt8(void *src_ptr, void *dst_ptr, int row, int col) { for (int ri = 0; ri < row_4div; ri += C4NUM) { for (int ci = 0; ci < col_16div; ci += C16NUM) { #ifdef ENABLE_ARM64 + size_t col_offset = col; int8_t *src_c = src_r + ci; int8_t *dst_c = dst_r + ci * C4NUM; asm volatile( "mov x10, %[src_c] \n" "mov x11, %[dst_c] \n" - "ld1 {v0.16b}, [x10], %[col]\n" - "ld1 {v1.16b}, [x10], %[col]\n" - "ld1 {v2.16b}, [x10], %[col]\n" - "ld1 {v3.16b}, [x10], %[col]\n" + "ld1 {v0.16b}, [x10], %[col_offset]\n" + "ld1 {v1.16b}, [x10], %[col_offset]\n" + "ld1 {v2.16b}, [x10], %[col_offset]\n" + "ld1 {v3.16b}, [x10], %[col_offset]\n" "st1 {v0.16b}, [x11], #16\n" "st1 {v1.16b}, [x11], #16\n" @@ -80,7 +81,7 @@ void RowMajor2Row16x4MajorInt8(void *src_ptr, void *dst_ptr, int row, int col) { "st1 {v3.16b}, [x11], #16\n" : - : [ dst_c ] "r"(dst_c), [ src_c ] "r"(src_c), [ col ] "r"(col) + : [ dst_c ] "r"(dst_c), [ src_c ] "r"(src_c), [ col_offset ] "r"(col_offset) : "x10", "x11", "v0", "v1", "v2", "v3"); #else MatrixPack4x16UnitInt8(src_r + ci, dst_r + ci * C4NUM, C4NUM, C16NUM, col); diff --git a/mindspore/lite/nnacl/winograd_transform.c b/mindspore/lite/nnacl/winograd_transform.c index dbadf3155ad..3ff4b449463 100644 --- a/mindspore/lite/nnacl/winograd_transform.c +++ b/mindspore/lite/nnacl/winograd_transform.c @@ -1225,9 +1225,9 @@ void Conv3x3Uint8OutputUnit(const int32_t *gemm_out, const int32_t *bias_data, i ls = vld1q_s32(left_shift); rs = vld1q_s32(right_shift); } else { - out_multiplier = vdupq_n_s32(quant_multiplier); - ls = vdupq_n_s32(left_shift); - rs = vdupq_n_s32(right_shift); + out_multiplier = vdupq_n_s32(quant_multiplier[0]); + ls = vdupq_n_s32(left_shift[0]); + rs = vdupq_n_s32(right_shift[0]); } int32x4_t out_zp = vdupq_n_s32(output_zp); int32x4_t output_min = vdupq_n_s32(out_min); diff --git a/mindspore/lite/src/common/graph_util.cc b/mindspore/lite/src/common/graph_util.cc index 1ff45db63d2..ad1e4841052 100755 --- a/mindspore/lite/src/common/graph_util.cc +++ b/mindspore/lite/src/common/graph_util.cc @@ -43,7 +43,7 @@ std::vector GetGraphInputNodes(const schema::MetaGraph *meta_graph) { } } } - return std::move(ret); + return ret; } std::vector GetGraphOutputNodes(const schema::MetaGraph *meta_graph) { @@ -64,7 +64,7 @@ std::vector GetGraphOutputNodes(const schema::MetaGraph *meta_graph) { } } } - return std::move(ret); + return ret; } // NODE_ID OpNode::ID() { return id; } diff --git a/mindspore/lite/src/ops/resize.cc b/mindspore/lite/src/ops/resize.cc index 22a50774168..d9973a3bd82 100644 --- a/mindspore/lite/src/ops/resize.cc +++ b/mindspore/lite/src/ops/resize.cc @@ -54,7 +54,10 @@ int Resize::InferShape(std::vector inputs_, std::vector< if (input == nullptr) { return 1; } - MS_ASSERT(input->shape().size() == kInputRank); + if (input->shape().size() != kInputRank) { + MS_LOG(ERROR) << "Size of input shape is wrong."; + return RET_ERROR; + } auto output = outputs_.front(); if (output == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc index e354183e5b8..518a7896b9a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc @@ -40,8 +40,14 @@ int PriorBoxCPUKernel::Init() { return RET_NULL_PTR; } - MS_ASSERT(in_tensors_.size() == kInputNum); - MS_ASSERT(out_tensors_.size() == kOutputNum); + if (in_tensors_.size() != kInputNum) { + MS_LOG(ERROR) << "Size of input tensors is wrong."; + return RET_ERROR; + } + if (in_tensors_.size() != kOutputNum) { + MS_LOG(ERROR) << "Size of input tensors is wrong."; + return RET_ERROR; + } if (!InferShapeDone()) { return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h index e16dd03e2b4..75a8f7b32aa 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h @@ -46,7 +46,6 @@ class ArithmeticFP16CPUKernel : public LiteKernel { private: void FreeTmpBuffer(); int break_pos_; - int outside_; int out_thread_stride_; int out_count_; float16_t *tile_data0_ = nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h index 9c824947b1b..f94102f5aec 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h @@ -44,7 +44,6 @@ class ReduceFp16CPUKernel : public ReduceBaseCPUKernel { private: Reducer reducer_ = nullptr; std::vector data_buffers_; - const float *src_data_ = nullptr; float *dst_data_ = nullptr; float16_t *fp16_input_ = nullptr; const float16_t *fp16_src_data_ = nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc index 5174e10bafb..8182d75f289 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/split_fp16.cc @@ -111,8 +111,8 @@ int SplitFp16CPUKernel::Run() { context_->allocator->Free(output_ptr_[i]); output_ptr_[i] = nullptr; } - return RET_OK; } + return RET_OK; } kernel::LiteKernel *CpuSplitFp16KernelCreator(const std::vector &inputs, diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc index 0333c368701..20e15f0fbf2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc @@ -30,10 +30,6 @@ using mindspore::lite::RET_OP_EXECUTE_FAILURE; using mindspore::schema::PrimitiveType_Transpose; namespace mindspore::kernel { -namespace { -constexpr int kTransposeInputNum = 1; -constexpr int kTransposeOutputNum = 1; -} // namespace int TransposeFp16CPUKernel::Init() { TransposeParameter *param = reinterpret_cast(this->op_parameter_); num_unit_ = static_cast(in_tensors_[kInputIndex]->shape().at(param->perm_[kNHWC_H])); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h index 4fb1b5ff97b..18fbd93d6d7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h @@ -46,7 +46,7 @@ class ArithmeticSelfCPUKernel : public LiteKernel { explicit ArithmeticSelfCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { + : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { switch (parameter->type_) { case PrimitiveType_Abs: arithmeticSelf_run_ = ElementAbs; @@ -102,7 +102,6 @@ class ArithmeticSelfCPUKernel : public LiteKernel { size_t data_size_; ArithmeticSelfParameter *arithmeticSelfParameter_; ArithmeticSelfRun arithmeticSelf_run_; - const Context *ctx_; int thread_count_; float *in_ptr_; float *out_ptr_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc index 5e353cb7a89..54d49ef0177 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.cc @@ -28,12 +28,6 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ConstantOfShape; namespace mindspore::kernel { - -namespace { -constexpr int kInputNum = 1; -constexpr int kOutputNum = 1; -} // namespace - int ConstantOfShapeCPUKernel::Init() { return RET_OK; } int ConstantOfShapeCPUKernel::ReSize() { return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.h b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.h index 85f1cddeb45..eb545601d45 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.h @@ -32,7 +32,7 @@ class ExpandDimsCPUKernel : public LiteKernel { ExpandDimsCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} + : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~ExpandDimsCPUKernel() override = default; int Init() override; @@ -46,7 +46,6 @@ class ExpandDimsCPUKernel : public LiteKernel { size_t data_size_; float *in_ptr_; float *out_ptr_; - const Context *ctx_; int thread_count_; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc index 359ecf7d2a5..561d92b6ee4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.cc @@ -28,12 +28,6 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Fill; namespace mindspore::kernel { - -namespace { -constexpr int kInputNum = 1; -constexpr int kOutputNum = 1; -} // namespace - int FillCPUKernel::Init() { if (!InferShapeDone()) { return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.h b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.h index bc73bbbbbdf..b92948a453c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fill.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fill.h @@ -30,7 +30,7 @@ class FillCPUKernel : public LiteKernel { FillCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} + : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~FillCPUKernel() override = default; int Init() override; @@ -44,7 +44,6 @@ class FillCPUKernel : public LiteKernel { int data_size_; float src_data_; float *out_ptr_; - const Context *ctx_; int thread_count_; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.h b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.h index 4b8f016d04b..22261d14936 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd.h @@ -32,7 +32,7 @@ class GatherNdCPUKernel : public LiteKernel { GatherNdCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} + : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~GatherNdCPUKernel() override; int Init() override; @@ -48,7 +48,6 @@ class GatherNdCPUKernel : public LiteKernel { int *in_offset_ = nullptr; float *in_ptr_; float *out_ptr_; - const Context *ctx_; int thread_count_; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/power.h b/mindspore/lite/src/runtime/kernel/arm/fp32/power.h index c5bc0fde7c7..c08a06d1bda 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/power.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/power.h @@ -30,7 +30,6 @@ class PowerCPUKernel : public PowerBaseCPUKernel { const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) : PowerBaseCPUKernel(param, inputs, outputs, ctx, primitive), - ctx_(ctx), thread_count_(ctx->thread_num_), power_(reinterpret_cast(op_parameter_)->power_), scale_(reinterpret_cast(op_parameter_)->scale_), @@ -43,7 +42,6 @@ class PowerCPUKernel : public PowerBaseCPUKernel { int RunImpl(int task_id); private: - const lite::Context *ctx_; int thread_count_; float power_; float scale_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc index 2b2c0e8feae..5d19341a0a4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/range.cc @@ -27,12 +27,6 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Range; namespace mindspore::kernel { - -namespace { -constexpr int kInputNum = 0; -constexpr int kOutputNum = 1; -} // namespace - int RangeCPUKernel::Init() { return RET_OK; } int RangeCPUKernel::ReSize() { return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc index ae350e150c8..5c23b278458 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/rank.cc @@ -27,12 +27,6 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Rank; namespace mindspore::kernel { - -namespace { -constexpr int kInputNum = 1; -constexpr int kOutputNum = 1; -} // namespace - int RankCPUKernel::Init() { return RET_OK; } int RankCPUKernel::ReSize() { return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h index 0051afde7bc..c4016199388 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h @@ -31,7 +31,7 @@ class ReverseCPUKernel : public LiteKernel { ReverseCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::Context *ctx, const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} + : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} ~ReverseCPUKernel() { if (tmp_ != nullptr) { free(tmp_); @@ -52,7 +52,6 @@ class ReverseCPUKernel : public LiteKernel { int strides_[REVERSE_STRIDE_MAX_SIZE]; int inCount_[REVERSE_STRIDE_MAX_SIZE]; int outCount_[REVERSE_STRIDE_MAX_SIZE]; - const Context *ctx_; int thread_count_; int *tmp_ = nullptr; float *in_ptr_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc index f62acfe4aff..28b7a3816db 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc @@ -30,8 +30,6 @@ using mindspore::schema::PrimitiveType_ScatterND; namespace mindspore::kernel { namespace { -constexpr int kScatterNDInputNum = 3; -constexpr int kScatterNDOutputNum = 1; constexpr int kScatterShapeIndex = 0; constexpr int kScatterIndicesIndex = 1; constexpr int kScatterUpdateIndex = 2; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc index 266f0bd5797..5c67b293b0a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc @@ -26,10 +26,6 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Shape; namespace mindspore::kernel { -namespace { -constexpr int kShapeInputNum = 1; -constexpr int kShapeOutputNum = 1; -} // namespace int ShapeCPUKernel::Init() { return RET_OK; } int ShapeCPUKernel::ReSize() { return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc index b334b723666..f666a4db298 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/squeeze.cc @@ -27,11 +27,6 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Squeeze; namespace mindspore::kernel { -namespace { -constexpr int kSqueezeInputNum = 1; -constexpr int kSqueezeOutputNum = 1; -} // namespace - int SqueezeCPUKernel::Init() { return RET_OK; } int SqueezeCPUKernel::ReSize() { return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc index a8fff82ddf4..4985aa6d5a0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc @@ -29,10 +29,6 @@ using mindspore::lite::RET_OP_EXECUTE_FAILURE; using mindspore::schema::PrimitiveType_Transpose; namespace mindspore::kernel { -namespace { -constexpr int kTransposeInputNum = 1; -constexpr int kTransposeOutputNum = 1; -} // namespace int TransposeCPUKernel::Init() { TransposeParameter *param = reinterpret_cast(this->op_parameter_); num_unit_ = static_cast(in_tensors_[kInputIndex]->shape().at(param->perm_[kNHWC_H])); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc index 9a2fe6a11f6..a11f9ab864b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike.cc @@ -27,9 +27,6 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ZerosLike; namespace mindspore::kernel { -constexpr int kInputNum = 1; -constexpr int kOutputNum = 1; - int ZerosLikeCPUKernel::Init() { return RET_OK; } int ZerosLikeCPUKernel::Run() { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc index b4a8a341b1c..8b03aaa9e38 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc @@ -92,7 +92,7 @@ int QuantizedAddCPUKernel::Run() { input0_data_ = static_cast(ctx_->allocator->Malloc(out_tensors_.at(0)->Size())); input1_data_ = static_cast(ctx_->allocator->Malloc(out_tensors_.at(0)->Size())); - ArithmeticParameter tile_para = {0}; + ArithmeticParameter tile_para; tile_para.ndim_ = out_tensors_.at(0)->shape().size(); for (size_t i = 0; i < tile_para.ndim_; i++) { tile_para.in_shape0_[i] = in_tensors_.at(0)->DimensionSize(i); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h index d9e7ee75158..79e15be2055 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h @@ -45,7 +45,7 @@ class ArithmeticSelfInt8CPUKernel : public LiteKernel { explicit ArithmeticSelfInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { + : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { switch (parameter->type_) { case PrimitiveType_Round: arithmeticSelf_run_ = Int8ElementRound; @@ -98,7 +98,6 @@ class ArithmeticSelfInt8CPUKernel : public LiteKernel { size_t data_size_; ArithmeticSelfParameter *para_; ArithmeticSelfInt8Run arithmeticSelf_run_; - const Context *ctx_; int thread_count_; int8_t *in_ptr_; int8_t *out_ptr_; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc index 1c8754b5b4e..3550bace2f5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc @@ -104,7 +104,7 @@ int DivInt8CPUKernel::Run() { } if (broadcast_) { - ArithmeticParameter tile_para = {0}; + ArithmeticParameter tile_para; tile_para.ndim_ = out_tensors_.at(0)->shape().size(); for (size_t i = 0; i < tile_para.ndim_; i++) { tile_para.in_shape0_[i] = in_tensors_.at(0)->DimensionSize(i); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc index e8c67c4c79c..42ddfe3d6f3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc @@ -77,7 +77,7 @@ int MulInt8CPUKernel::Run() { input0_data_ = static_cast(ctx_->allocator->Malloc(out_tensors_.at(0)->Size())); input1_data_ = static_cast(ctx_->allocator->Malloc(out_tensors_.at(0)->Size())); - ArithmeticParameter tile_para = {0}; + ArithmeticParameter tile_para; tile_para.ndim_ = out_tensors_.at(0)->shape().size(); for (size_t i = 0; i < tile_para.ndim_; i++) { tile_para.in_shape0_[i] = in_tensors_.at(0)->DimensionSize(i); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc index fd1581b56c5..d824e7f3695 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc @@ -30,12 +30,6 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; namespace mindspore::kernel { -namespace { -constexpr int kInputNum = 1; -constexpr int kOutputNum = 1; -constexpr size_t kRank = 4; -} // namespace - int ResizeInt8CPUKernel::Init() { auto ret = ResizeBaseCPUKernel::Init(); if (ret != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc index 4a68c8034fe..9ffca499acf 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc @@ -128,7 +128,7 @@ int SubInt8CPUKernel::Run() { } if (broadcast_) { - ArithmeticParameter tile_para = {0}; + ArithmeticParameter tile_para; tile_para.ndim_ = out_tensors_.at(0)->shape().size(); for (size_t i = 0; i < tile_para.ndim_; i++) { tile_para.in_shape0_[i] = in_tensors_.at(0)->DimensionSize(i); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h index 9e127f3d82d..0e3a580cd74 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h @@ -30,7 +30,7 @@ class Unsqueezeint8CPUKernel : public LiteKernel { Unsqueezeint8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const Context *ctx, const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { + : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { Unsq_para_ = reinterpret_cast(op_parameter_); Unsq_para_->thread_count_ = op_parameter_->thread_num_; } @@ -42,14 +42,12 @@ class Unsqueezeint8CPUKernel : public LiteKernel { int DoUnsqueeze(int task_id); private: - UnSqueezeQuantArg *quant_Unsqueeze_parm_; UnSqueezeParameter *Unsq_para_; int thread_sz_count_; int thread_sz_stride_; int data_size_; float *in_ptr_; float *out_ptr_; - const Context *ctx_; int thread_count_; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h b/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h index 32d0f701255..59e41af9458 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h +++ b/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h @@ -57,7 +57,6 @@ class SubGraphOpenCLKernel : public SubGraphKernel { std::vector> *out_kernels, bool is_from); private: - SubGraphOpenCLParameter *subgraph_ocl_parameter_; lite::opencl::OpenCLAllocator *allocator_; std::vector in_convert_tensors_; std::vector out_convert_tensors_; diff --git a/mindspore/lite/tools/time_profile/CMakeLists.txt b/mindspore/lite/tools/time_profile/CMakeLists.txt index fce3655d3a1..dd494ae01cb 100644 --- a/mindspore/lite/tools/time_profile/CMakeLists.txt +++ b/mindspore/lite/tools/time_profile/CMakeLists.txt @@ -17,5 +17,10 @@ else() target_link_libraries(timeprofile mindspore-lite pthread) endif() -install(TARGETS timeprofile - RUNTIME DESTINATION ${MAIN_DIR}/time_profile COMPONENT ${COMPONENT_NAME}) \ No newline at end of file +if (PLATFORM_ARM32 OR PLATFORM_ARM64) + install(TARGETS timeprofile + RUNTIME DESTINATION ${MAIN_DIR}/time_profile COMPONENT ${COMPONENT_NAME}) +else() + install(TARGETS timeprofile + RUNTIME DESTINATION ${MAIN_DIR}/time_profile COMPONENT ${RUN_X86_COMPONENT_NAME}) +endif()