diff --git a/mindspore/lite/src/inner_context.cc b/mindspore/lite/src/inner_context.cc index a136aa11cb6..96b35468c53 100644 --- a/mindspore/lite/src/inner_context.cc +++ b/mindspore/lite/src/inner_context.cc @@ -305,4 +305,15 @@ NpuDeviceInfo InnerContext::GetNpuInfo() const { // Support CPU backend to judge whether it supports Float16. bool InnerContext::IsSupportFloat16() const { return fp16_flag_; } + +ActorThreadPool *InnerContext::thread_pool() const { return thread_pool_; } + +int ParallelLaunch(const Context *context, const Func &func, Content content, int task_num) { + ActorThreadPool *pool = static_cast(context)->thread_pool(); + if (pool == nullptr) { + MS_LOG(ERROR) << "thread pool is nullptr"; + return RET_NULL_PTR; + } + return pool->ParallelLaunch(func, content, task_num); +} } // namespace mindspore::lite diff --git a/mindspore/lite/src/inner_context.h b/mindspore/lite/src/inner_context.h index 412d81e01c5..dc7bb3e902e 100644 --- a/mindspore/lite/src/inner_context.h +++ b/mindspore/lite/src/inner_context.h @@ -30,9 +30,6 @@ namespace mindspore::lite { struct InnerContext : public Context { - public: - ActorThreadPool *thread_pool_{nullptr}; - public: InnerContext() = default; @@ -64,6 +61,8 @@ struct InnerContext : public Context { int IsValid() const; + ActorThreadPool *thread_pool() const; + virtual ~InnerContext(); private: @@ -83,6 +82,8 @@ struct InnerContext : public Context { bool fp16_flag_ = false; + ActorThreadPool *thread_pool_{nullptr}; + #ifdef ENABLE_ARM #ifndef MS_COMPILE_IOS CpuInfo *cpu_info_ = nullptr; @@ -95,6 +96,9 @@ struct InnerContext : public Context { #endif #endif }; + +int ParallelLaunch(const Context *context, const Func &func, Content content, int task_num); + } // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_INNER_CONTEXT_H diff --git a/mindspore/lite/src/lite_mindrt.cc b/mindspore/lite/src/lite_mindrt.cc index 79da692dc6b..2cfa50f16e5 100644 --- a/mindspore/lite/src/lite_mindrt.cc +++ b/mindspore/lite/src/lite_mindrt.cc @@ -323,7 +323,7 @@ std::vector> CreateOpActor(const std::vector> actors; std::unordered_map partial_map{}; - auto thread_pool = ctx->thread_pool_; + auto thread_pool = ctx->thread_pool(); if (thread_pool == nullptr) { MS_LOG(ERROR) << "thread pool is nullptr"; return actors; diff --git a/mindspore/lite/src/lite_session.cc b/mindspore/lite/src/lite_session.cc index de20753d0bb..b13d5bc571e 100644 --- a/mindspore/lite/src/lite_session.cc +++ b/mindspore/lite/src/lite_session.cc @@ -881,7 +881,7 @@ int LiteSession::Resize(const std::vector &inputs } int LiteSession::InitGPURuntime() { - ActorThreadPool *thread_pool = this->context_->thread_pool_; + ActorThreadPool *thread_pool = this->context_->thread_pool(); if (thread_pool == nullptr) { MS_LOG(ERROR) << "thread pool is nullptr"; is_running_.store(false); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/constant_of_shape.cc b/mindspore/lite/src/runtime/kernel/arm/base/constant_of_shape.cc index 455367c4017..4ad13b7c7f6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/constant_of_shape.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/constant_of_shape.cc @@ -76,8 +76,7 @@ int ConstantOfShapeCPUKernel::Run() { } thread_stride_ = UP_DIV(param_->element_size_, thread_count); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConstantOfShapeRun, this, thread_count); + auto ret = ParallelLaunch(this->context_, ConstantOfShapeRun, this, thread_count); if (ret != RET_OK) { MS_LOG(ERROR) << "ConstantOfShapeRun error error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/detection_post_process_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/detection_post_process_base.cc index 7121c9e829b..4b321b83b1f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/detection_post_process_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/detection_post_process_base.cc @@ -236,8 +236,7 @@ int DetectionPostProcessBaseCPUKernel::Run() { return status; } } else { - status = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(NmsMultiClassesFastCoreRun, this, op_parameter_->thread_num_); + status = ParallelLaunch(this->context_, NmsMultiClassesFastCoreRun, this, op_parameter_->thread_num_); if (status != RET_OK) { MS_LOG(ERROR) << "NmsMultiClassesFastCoreRun error error_code[" << status << "]"; FreeAllocatedBuffer(); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc index 1dc64f051f7..ccdb6289c4b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.cc @@ -165,8 +165,7 @@ int RunPriorBox(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int PriorBoxCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(RunPriorBox, this, thread_count_); + int error_code = ParallelLaunch(this->context_, RunPriorBox, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "PriorBox run error, error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc index a51e5571fcc..07875c463ce 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc @@ -175,8 +175,7 @@ int QuantDTypeCastCPUKernel::Run() { uint8_ptr_ = reinterpret_cast(out_tensors_[0]->data_c()); } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(QuantDTypeCastRun, this, thread_n_num_); + auto ret = ParallelLaunch(this->context_, QuantDTypeCastRun, this, thread_n_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Scale error error_code[" << ret << "]"; if (in_tensors_[0]->data_type() == TypeId::kNumberTypeInt8 && diff --git a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc index 76ec897a6b3..8b1a27ff5b6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.cc @@ -70,8 +70,7 @@ int ReshapeRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { int ReshapeBaseCPUKernel::Run() { input_ptr_ = reinterpret_cast(in_tensors_.at(kInputIndex)->data_c()); output_ptr_ = reinterpret_cast(out_tensors_.at(kOutputIndex)->data_c()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ReshapeRun, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, ReshapeRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Reshape run error error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc index 53055eba035..f3100f6fb98 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc @@ -82,8 +82,7 @@ int SliceCPUKernel::Run() { lite::DataTypeSize(in_tensors_.at(0)->data_type())); return RET_OK; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SliceLaunch, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, SliceLaunch, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "slice launch fail!ret: " << ret; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc index 528f724ef72..21c081f98f9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/split_base.cc @@ -125,8 +125,7 @@ int SplitBaseCPUKernel::Run() { output_ptr_.at(i) = output_tensor->data_c(); } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SplitRun, this, thread_n_num_); + auto ret = ParallelLaunch(this->context_, SplitRun, this, thread_n_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "split error error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/split_with_over_lap_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/split_with_over_lap_base.cc index 29de5ef0d4f..53fca6db096 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/split_with_over_lap_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/split_with_over_lap_base.cc @@ -117,8 +117,7 @@ int SplitWithOverlapBaseCPUKernel::Run() { inner_stride_ *= input_shape[i]; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SplitWithOverlapRun, this, context_->thread_num_); + auto ret = ParallelLaunch(this->context_, SplitWithOverlapRun, this, context_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ParallelLaunch for SplitWIthOverlapRun run fail. errorcode:[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/stack_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/stack_base.cc index eade79b41b3..58c3e610160 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/stack_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/stack_base.cc @@ -100,8 +100,7 @@ int StackBaseCPUKernel::Run() { } // run stack num_threads_ = MSMIN(UP_DIV(outer_size_, 64), op_parameter_->thread_num_); - auto ret = - static_cast(this->context_)->thread_pool_->ParallelLaunch(StackRun, this, num_threads_); + auto ret = ParallelLaunch(this->context_, StackRun, this, num_threads_); if (ret != RET_OK) { MS_LOG(ERROR) << "StackBaseCPUKernel Run error: error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc index b13b37ddbb7..24aa6c7ba4b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc @@ -162,8 +162,7 @@ int StridedSliceCPUKernel::FastRun() { } input_ptr_ = reinterpret_cast(in_tensors_.front()->data_c()); output_ptr_ = reinterpret_cast(out_tensors_.front()->data_c()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(StrideRun, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, StrideRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Stride run error error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/tile_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/tile_base.cc index 36530647e2a..157fe846854 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/tile_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/tile_base.cc @@ -128,8 +128,7 @@ int TileCPUKernel::SimpleTileImpl(int task_id) { } int TileCPUKernel::RunSimpleTile() { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SimpleTile, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, SimpleTile, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "RunSimpleTile error code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.cc index 3425d5e0dab..4aaf0b2eb18 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.cc @@ -103,8 +103,7 @@ int ActivationFp16CPUKernel::Run() { fp16_input_ = reinterpret_cast(input_tensor->data_c()); fp16_output_ = reinterpret_cast(output_tensor->data_c()); - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ActivationFp16Run, this, thread_count_); + int error_code = ParallelLaunch(this->context_, ActivationFp16Run, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Activation function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/addn_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/addn_fp16.cc index 0cadaece402..d5e53c25ea0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/addn_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/addn_fp16.cc @@ -88,8 +88,7 @@ int AddNFp16CPUKernel::Run() { in1_addr_ = input0_data; in2_addr_ = input1_data; out_addr_ = out_data; - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(AddNLaunch, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, AddNLaunch, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "addn launch fail!ret: " << ret; return RET_ERROR; @@ -97,8 +96,7 @@ int AddNFp16CPUKernel::Run() { for (size_t i = 2; i < in_tensors_.size(); ++i) { in1_addr_ = reinterpret_cast(in_tensors_[i]->MutableData()); in2_addr_ = out_data; - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(AddNLaunch, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, AddNLaunch, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "addn launch fail!ret: " << ret << ", input index: " << i; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_compare_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_compare_fp16.cc index 682d1760110..2c9f7c19d2c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_compare_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_compare_fp16.cc @@ -168,8 +168,7 @@ int ArithmeticCompareFP16CPUKernel::Run() { FreeTmpBuffer(); return RET_ERROR; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ArithmeticsRunFp16, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, ArithmeticsRunFp16, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ArithmeticsRunFp16 run error error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc index 64bb50be966..b87b65acf4b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc @@ -182,8 +182,7 @@ int ArithmeticFP16CPUKernel::Run() { FreeFp16Buffer(); return RET_ERROR; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ArithmeticsRun, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, ArithmeticsRun, this, op_parameter_->thread_num_); if (out_tensors_.at(0)->data_type() == kNumberTypeFloat32) { Float16ToFloat32(static_cast(output_ptr_), reinterpret_cast(output_tensor->MutableData()), output_tensor->ElementsNum()); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_self_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_self_fp16.cc index 7f1d203f13f..26e32cfa7dc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_self_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_self_fp16.cc @@ -84,8 +84,7 @@ int ArithmeticSelfFp16CPUKernel::Run() { } output_fp16_ptr_ = reinterpret_cast(output_tensor->data_c()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ArithmeticSelfRun, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, ArithmeticSelfRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ArithmeticSelfRun error error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.cc index 17224780809..5952bd4d76a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.cc @@ -63,8 +63,7 @@ int BatchnormFp16CPUKernel::Run() { return RET_ERROR; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(BatchNormRun, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, BatchNormRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "BatchnormRun error error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc index c4b0dd21b60..749f09c2dc2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.cc @@ -131,8 +131,7 @@ int CastFp16CPUKernel::Run() { if (data_num_ == 0) { return RET_OK; } - return static_cast(this->context_) - ->thread_pool_->ParallelLaunch(CastFp16Run, this, op_parameter_->thread_num_); + return ParallelLaunch(this->context_, CastFp16Run, this, op_parameter_->thread_num_); } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Cast, LiteKernelCreator) diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.cc index 2c74cfef369..605482cadf1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.cc @@ -261,16 +261,14 @@ int Convolution1x1FP16CPUKernel::Run() { int ret = RET_ERROR; if (multi_thread_by_hw_) { - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(Convolution1x1Fp16RunHw, this, thread_count_); + ret = ParallelLaunch(this->context_, Convolution1x1Fp16RunHw, this, thread_count_); } else { #ifdef ENABLE_ARM64 RowMajor2Col16MajorFp16Opt(input_ptr_, pack_input_, matmul_param_->row_, matmul_param_->deep_); #else RowMajor2Col12MajorFp16Opt(input_ptr_, pack_input_, matmul_param_->row_, matmul_param_->deep_); #endif - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(Convolution1x1Fp16RunOc, this, thread_count_); + ret = ParallelLaunch(this->context_, Convolution1x1Fp16RunOc, this, thread_count_); } if (ret != RET_OK) { MS_LOG(ERROR) << "ParallelLaunch failed."; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc index 1c6ad7b9a26..1754f3ec4dd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc @@ -117,8 +117,7 @@ int ConvolutionDepthwiseFp16CPUKernel::Run() { } is_repack_ = false; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvDwFp16Run, this, conv_param_->thread_num_); + auto ret = ParallelLaunch(this->context_, ConvDwFp16Run, this, conv_param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvDwFp16Run error: error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc index 8f0218b5375..a71ea54f331 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc @@ -169,8 +169,7 @@ int ConvolutionDepthwiseSWFp16CPUKernel::Run() { } is_repack_ = false; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvDwSWFp16Run, this, conv_param_->thread_num_); + ret = ParallelLaunch(this->context_, ConvDwSWFp16Run, this, conv_param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvDwSWFp16Run error: error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc index f01c9eca977..b55e9bc0939 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc @@ -160,8 +160,7 @@ int ConvolutionFP16CPUKernel::Run() { } is_repack_ = false; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvolutionFp16Impl, this, thread_count_); + ret = ParallelLaunch(this->context_, ConvolutionFp16Impl, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "conv fp16 error ret[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc index 2467e44c1f4..d4f48a777e4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.cc @@ -237,8 +237,7 @@ int ConvolutionWinogradFP16CPUKernel::Run() { } is_repack_ = false; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvolutionWinogradFp16Impl, this, thread_count_); + ret = ParallelLaunch(this->context_, ConvolutionWinogradFp16Impl, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "conv winograd error error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc index a3c9c54d09e..27c25cae54e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc @@ -52,8 +52,7 @@ int CropFp16CPUKernel::Run() { input_ptr_ = reinterpret_cast(input_tensor->data_c()); output_ptr_ = reinterpret_cast(output_tensor->data_c()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(CropFp16Run, this, crop_para_->thread_count_); + auto ret = ParallelLaunch(this->context_, CropFp16Run, this, crop_para_->thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "ParallelLaunch failed: " << ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc index 5eaa9c6f518..36a1562ec7a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc @@ -179,8 +179,7 @@ int DeconvolutionDepthwiseFp16CPUKernel::Run() { memset(output_ptr, 0, out_tensors_.at(kOutputIndex)->ElementsNum() * sizeof(float16_t)); packed_output_ = output_ptr; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(DeconvDwFp16Run, this, conv_param_->thread_num_); + ret = ParallelLaunch(this->context_, DeconvDwFp16Run, this, conv_param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "DeconvDwFp16Run error: error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc index a5fa02eae0a..626f1ae631f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc @@ -222,8 +222,7 @@ int DeConvolutionFp16CPUKernel::Run() { RowMajor2Col16MajorFp16Opt(batch_input_, pack_input_, input_plane_, conv_param_->input_channel_); - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(DeConvFp16Run, this, thread_count_); + error_code = ParallelLaunch(this->context_, DeConvFp16Run, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "deconv fp16 run error! error_code[" << error_code << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.cc index 7079a787c00..416bf515a94 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.cc @@ -399,15 +399,13 @@ int DeConvWinogradFp16CPUKernel::Run() { nhwc_output_ = output_ptr + batch_index * deconv_param_->output_plane_ * conv_param_->output_channel_; ::memset(nc4hw4_output_, 0, deconv_param_->output_plane_ * deconv_param_->oc_div4_ * C4NUM * sizeof(float16_t)); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(DeConvWgFp16Run, this, deconv_param_->thread_num_); + auto ret = ParallelLaunch(this->context_, DeConvWgFp16Run, this, deconv_param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "DeConvWgFp16Run failed!"; return ret; } // post bias activate and nhwc - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(DeConvWgPostFp16Run, this, thread_num_hw_); + ret = ParallelLaunch(this->context_, DeConvWgPostFp16Run, this, thread_num_hw_); if (ret != RET_OK) { MS_LOG(ERROR) << "DeConvWgPostFp16Run failed!"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc index e75eec93fb3..e4ca5b7f37c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc @@ -147,8 +147,7 @@ int GatherFp16CPUKernel::Run() { Float32ToFloat16(reinterpret_cast(input_tensor->data_c()), input_data_, input_tensor->ElementsNum()); } } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(GatherRunFp16, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, GatherRunFp16, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Gather function error error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/instance_norm_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/instance_norm_fp16.cc index 0b0980760fe..8f4ab8e9d44 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/instance_norm_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/instance_norm_fp16.cc @@ -108,8 +108,7 @@ int InstanceNormFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_sca int InstanceNormFp16CPUKernel::Run() { src_data_ = reinterpret_cast(in_tensors_[0]->data_c()); dst_data_ = reinterpret_cast(out_tensors_[0]->data_c()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(InstanceNormFp16Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, InstanceNormFp16Run, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "InstanceNormFp16Run error error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/layer_norm_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/layer_norm_fp16.cc index d39e424727a..548599a0c65 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/layer_norm_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/layer_norm_fp16.cc @@ -95,8 +95,7 @@ int LayerNormFp16CPUKernel::Run() { var_data_ = reinterpret_cast(context_->allocator->Malloc(param_->norm_outer_size_ * sizeof(float16_t))); } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(LayerNormFp16Run, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, LayerNormFp16Run, this, op_parameter_->thread_num_); if (out_tensors_.size() != 3) { context_->allocator->Free(mean_data_); context_->allocator->Free(var_data_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/log_softmax_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/log_softmax_fp16.cc index 80f055bf1b5..e5baf4143a5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/log_softmax_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/log_softmax_fp16.cc @@ -95,8 +95,7 @@ int LogSoftmaxLastAxisFp16Run(void *cdata, int task_id, float lhs_scale, float r int LogSoftmaxFp16CPUKernel::Run() { if (in_plane_size_ == 1) { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(LogSoftmaxLastAxisFp16Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, LogSoftmaxLastAxisFp16Run, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "LogSoftmaxFp16CPUKernel ParallelLaunch failed, ret: " << ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_base_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_base_fp16.cc index 175ee9587b9..8cf61adbfd6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_base_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_base_fp16.cc @@ -295,8 +295,7 @@ int MatmulBaseFP16CPUKernel::Run() { batch_b_ptr_ = b_pack_ptr_ + i * params_->deep_ * params_->col_align_; batch_c_ptr_ = c_ptr + i * params_->row_ * params_->col_; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(MatmulBaseFP16Run, this, thread_count_); + auto ret = ParallelLaunch(this->context_, MatmulBaseFP16Run, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "MatmulBaseFloatRun failed"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.cc index 455048d6ade..758e2c80248 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.cc @@ -101,8 +101,7 @@ int PadFp16CPUKernel::Run() { output_[i] = pad_param_->constant_value_; } } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(PadImpl, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, PadImpl, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "BatchnormRun error error_code[" << ret << "]"; } @@ -114,8 +113,7 @@ int PadFp16CPUKernel::Run() { return ret; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(MirrorPadImpl, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, MirrorPadImpl, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Pad Reflect or Symmetric mode run error, error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc index 915c502cf96..37ae3857414 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc @@ -89,8 +89,7 @@ int PoolingFp16CPUKernel::Run() { fp16_input_ = reinterpret_cast(input_tensor->data_c()); fp16_output_ = reinterpret_cast(output_tensor->data_c()); - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(PoolingFp16Impl, this, thread_count_); + int error_code = ParallelLaunch(this->context_, PoolingFp16Impl, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "pooling error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/power_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/power_fp16.cc index 176bcb101af..9663e1c0ce0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/power_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/power_fp16.cc @@ -86,8 +86,7 @@ int PowerFp16CPUKernel::Run() { return ret; } } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(PowerImplFp16, this, thread_count_); + auto ret = ParallelLaunch(this->context_, PowerImplFp16, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "PowerFp16CPUKernel error: " << ret; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc index c2dc7adbc7f..445c43b078b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc @@ -163,8 +163,7 @@ int QuantDTypeCastFp16CPUKernel::Run() { return RET_ERROR; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(QuantDTypeCastFP16Run, this, thread_n_num_); + auto ret = ParallelLaunch(this->context_, QuantDTypeCastFP16Run, this, thread_n_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Scale error error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc index 89c11ae4df1..ace7716a413 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc @@ -91,8 +91,7 @@ int ReduceFp16CPUKernel::Run() { outer_size_ = outer_sizes_.at(i); inner_size_ = inner_sizes_.at(i); axis_size_ = axis_sizes_.at(i); - auto error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ReduceFp16Impl, this, op_parameter_->thread_num_); + auto error_code = ParallelLaunch(this->context_, ReduceFp16Impl, this, op_parameter_->thread_num_); if (error_code != RET_OK) { FreeTmpBuffer(); MS_LOG(ERROR) << "Reduce run error, error_code[" << error_code << "]"; @@ -107,8 +106,7 @@ int ReduceFp16CPUKernel::Run() { outer_size_ = outer_sizes_.back(); inner_size_ = inner_sizes_.back(); axis_size_ = axis_sizes_.back(); - auto error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ReduceFp16Impl, this, op_parameter_->thread_num_); + auto error_code = ParallelLaunch(this->context_, ReduceFp16Impl, this, op_parameter_->thread_num_); if (error_code != RET_OK) { FreeTmpBuffer(); MS_LOG(ERROR) << "Reduce run error, error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc index 2a317b23d32..355833caf07 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc @@ -117,8 +117,7 @@ int ScaleFp16CPUKernel::Run() { return ret; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ScaleFp16Run, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, ScaleFp16Run, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Scale error error_code[" << ret << "]"; FreeTmpBuffer(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/slice_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/slice_fp16.cc index 28e5f1b668a..59affdb76fc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/slice_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/slice_fp16.cc @@ -63,8 +63,7 @@ int SliceFp16CPUKernel::Run() { DoSliceNoParallel(input_data, out_tensors_.at(0)->data_c(), param_, lite::DataTypeSize(kNumberTypeFloat16)); return RET_OK; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SliceFp16Launch, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, SliceFp16Launch, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "fp16 slice launch fail!ret: " << ret; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc index e193ad85544..66842446296 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc @@ -95,8 +95,7 @@ int SoftmaxLastAxisFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_ int SoftmaxFp16CPUKernel::Run() { if (in_plane_size_ == 1) { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SoftmaxLastAxisFp16Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, SoftmaxLastAxisFp16Run, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "SoftmaxFp16CPUKernel ParallelLaunch failed, ret: " << ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.cc index 45f99bfcfc0..80cb15b325f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.cc @@ -101,8 +101,7 @@ int StackFp16CPUKernel::Run() { } // run stack num_threads_ = MSMIN(UP_DIV(outer_size_, 64), this->op_parameter_->thread_num_); - ret = - static_cast(this->context_)->thread_pool_->ParallelLaunch(StackRun, this, num_threads_); + ret = ParallelLaunch(this->context_, StackRun, this, num_threads_); if (ret != RET_OK) { MS_LOG(ERROR) << "StackBaseCPUKernel Run error: error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/activation_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/activation_fp16_grad.cc index 5091d5b1a55..95ff91477b2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/activation_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/activation_fp16_grad.cc @@ -94,8 +94,7 @@ int ActivationGradRunFp16(void *cdata, int task_id, float lhs_scale, float rhs_s } int ActivationGradCPUKernelFp16::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ActivationGradRunFp16, this, thread_count_); + int error_code = ParallelLaunch(this->context_, ActivationGradRunFp16, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Activation Grad function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_grad.cc index 4a23150b3bf..2d398aded54 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_grad.cc @@ -75,8 +75,7 @@ int ArithmeticGradRunFp16(void *cdata, int task_id, float lhs_scale, float rhs_s } int ArithmeticGradCPUKernelFp16::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ArithmeticGradRunFp16, this, 1); + int error_code = ParallelLaunch(this->context_, ArithmeticGradRunFp16, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "Arithmetic Grad function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad.cc index 4ff31aec04a..1b3d7bb013f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad.cc @@ -72,8 +72,7 @@ int ArithmeticSelfGradFp16Run(void *cdata, int task_id, float lhs_scale, float r } int ArithmeticSelfGradFp16CPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ArithmeticSelfGradFp16Run, this, thread_count_); + int error_code = ParallelLaunch(this->context_, ArithmeticSelfGradFp16Run, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Activation Grad function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bias_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bias_fp16_grad.cc index 7b74f607e63..8586e069e3a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bias_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bias_fp16_grad.cc @@ -83,8 +83,7 @@ int BiasGradFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) } int BiasGradCPUKernelFp16::Run() { - int error_code = - static_cast(this->context_)->thread_pool_->ParallelLaunch(BiasGradFp16Run, this, 1); + int error_code = ParallelLaunch(this->context_, BiasGradFp16Run, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "bias function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bn_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bn_fp16_grad.cc index 85d74646295..251290ee5db 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bn_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bn_fp16_grad.cc @@ -141,8 +141,7 @@ int BNGradCPUKernelFp16::Run() { stage_ = 0; thread_num_ = context_->thread_num_; if (thread_num_ == 1) { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(BNGradFp16Run, this, thread_num_); + int error_code = ParallelLaunch(this->context_, BNGradFp16Run, this, thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "BN function error error_code[" << error_code << "]"; return RET_ERROR; @@ -151,8 +150,7 @@ int BNGradCPUKernelFp16::Run() { const std::vector threads = {thread_num_, 1, thread_num_}; for (size_t stage = 0; stage < threads.size(); stage++) { stage_ = static_cast(stage); - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(BNGradFp16Run, this, threads.at(stage)); + int error_code = ParallelLaunch(this->context_, BNGradFp16Run, this, threads.at(stage)); if (error_code != RET_OK) { MS_LOG(ERROR) << "BN function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_filter.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_filter.cc index 6b9f47874fd..587b6719743 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_filter.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_filter.cc @@ -191,8 +191,7 @@ int ConvolutionGradFilterCPUKernelFp16::Run() { auto *out_dw = out_tensors_.at(0); auto dw_addr = reinterpret_cast(out_dw->data_c()); memset(dw_addr, 0, out_dw->Size()); - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvolutionGradFilterFp16Run, this, context_->thread_num_); + int error_code = ParallelLaunch(this->context_, ConvolutionGradFilterFp16Run, this, context_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "conv filter function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.cc index 213c92955b2..7c6be63e49c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.cc @@ -176,8 +176,7 @@ int ConvolutionGradInputCPUKernelFp16::Run() { auto *out_dx = out_tensors_.at(0); auto dx_addr = reinterpret_cast(out_dx->data_c()); memset(dx_addr, 0, sizeof(float16_t) * batch * in_ch * in_h * in_w); - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvolutionGradInputFp16Run, this, context_->thread_num_); + int error_code = ParallelLaunch(this->context_, ConvolutionGradInputFp16Run, this, context_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "bias function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/dropout_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/dropout_fp16_grad.cc index 67648d59450..ac8e8b1f5f6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/dropout_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/dropout_fp16_grad.cc @@ -82,8 +82,7 @@ int RunDropoutFp16Grad(void *cdata, int task_id, float lhs_scale, float rhs_scal } int DropoutGradCPUKernelFp16::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(RunDropoutFp16Grad, this, thread_count_); + int error_code = ParallelLaunch(this->context_, RunDropoutFp16Grad, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Dropout Grad function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/layernorm_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/layernorm_fp16_grad.cc index d56d94500e4..f5a44b6fb4d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/layernorm_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/layernorm_fp16_grad.cc @@ -96,8 +96,7 @@ int LayerNormF16GradRun(void *cdata, int task_id, float lhs_scale, float rhs_sca } int LayerNormGradCPUKernelFp16::Run() { - int error_code = - static_cast(this->context_)->thread_pool_->ParallelLaunch(LayerNormF16GradRun, this, 1); + int error_code = ParallelLaunch(this->context_, LayerNormF16GradRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "LayerNorm function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/neg_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/neg_fp16_grad.cc index a04b6ff02b7..df227e93fe3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/neg_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/neg_fp16_grad.cc @@ -55,8 +55,7 @@ int NegGradCPUKernelFp16::DoNegGrad(int task_id) { int NegGradCPUKernelFp16::ReSize() { return RET_OK; } int NegGradCPUKernelFp16::Run() { - int ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(NegGradRun, this, thread_count_); + int ret = ParallelLaunch(this->context_, NegGradRun, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "parallel launch fail!ret: " << ret; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/pooling_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/pooling_fp16_grad.cc index 367aa8dfe80..9ec45e6ffdc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/pooling_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/pooling_fp16_grad.cc @@ -99,8 +99,7 @@ int PoolingFp16GradImpl(void *cdata, int task_id, float lhs_scale, float rhs_sca int PoolingGradCPUKernelFp16::Run() { thread_num_ = context_->thread_num_; - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(PoolingFp16GradImpl, this, thread_num_); + int error_code = ParallelLaunch(this->context_, PoolingFp16GradImpl, this, thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "pooling error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/resize_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/resize_fp16_grad.cc index 63d554326be..ab5aeaa51fc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/resize_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/resize_fp16_grad.cc @@ -91,8 +91,7 @@ int ResizeGradCPUKernelFp16::Run() { auto out_addr = reinterpret_cast(out_tensors_.at(0)->data_c()); size_t elem_number = out_tensors_.at(0)->ElementsNum(); std::fill(out_addr, out_addr + elem_number, 0.f); - int error_code = - static_cast(this->context_)->thread_pool_->ParallelLaunch(ResizeFp16GradRun, this, 1); + int error_code = ParallelLaunch(this->context_, ResizeFp16GradRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "ResizeGradCPUKernelFp16 function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/strided_slice_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/strided_slice_fp16_grad.cc index 213ca109a36..a4b7b69565d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/strided_slice_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/strided_slice_fp16_grad.cc @@ -123,8 +123,7 @@ int StridedSliceFp16GradImpl(void *cdata, int task_id, float lhs_scale, float rh } int StridedSliceGradCPUKernelFp16::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(StridedSliceFp16GradImpl, this, 1); + int error_code = ParallelLaunch(this->context_, StridedSliceFp16GradImpl, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "Strided slice error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/unsorted_segment_sum_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/unsorted_segment_sum_fp16.cc index 95a26470efb..14410d9616a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/unsorted_segment_sum_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/unsorted_segment_sum_fp16.cc @@ -67,8 +67,7 @@ int UnsortedSegmentSumFp16Run(void *cdata, int task_id, float lhs_scale, float r } int UnsortedSegmentSumCPUKernelFp16::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(UnsortedSegmentSumFp16Run, this, 1); + int error_code = ParallelLaunch(this->context_, UnsortedSegmentSumFp16Run, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "Strided slice error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/activation_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/activation_fp32.cc index 0663876550d..94f10a606f3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/activation_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/activation_fp32.cc @@ -107,8 +107,7 @@ int ActivationRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int ActivationCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ActivationRun, this, thread_count_); + int error_code = ParallelLaunch(this->context_, ActivationRun, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Activation function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/adder_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/adder_fp32.cc index ac417c91811..a5405830dec 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/adder_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/adder_fp32.cc @@ -121,8 +121,7 @@ int AdderCPUKernel::Run() { return RET_ERROR; } - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(AdderImpl, this, thread_count_); + int error_code = ParallelLaunch(this->context_, AdderImpl, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "adder error error_code[" << error_code << "]"; FreeTmpBuffer(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/addn_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/addn_fp32.cc index 8ab98a9448e..7d3e90e72fd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/addn_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/addn_fp32.cc @@ -88,8 +88,7 @@ int AddNCPUKernel::Run() { in1_addr_ = input0_data; in2_addr_ = input1_data; out_addr_ = output_data; - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(AddNLaunch, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, AddNLaunch, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "addn launch fail!ret: " << ret; return RET_ERROR; @@ -97,8 +96,7 @@ int AddNCPUKernel::Run() { for (size_t i = 2; i < in_tensors_.size(); ++i) { in1_addr_ = reinterpret_cast(in_tensors_[i]->MutableData()); in2_addr_ = output_data; - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(AddNLaunch, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, AddNLaunch, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "addn launch fail!ret: " << ret << ", input index: " << i; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc index 3a17f34deaf..65ead5f2b38 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc @@ -419,8 +419,7 @@ int ArithmeticCPUKernel::Run() { input1_ptr_ = in_tensors_[1]->data_c(); } output_ptr_ = out_tensors_[0]->data_c(); - return static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ArithmeticsRun, this, op_parameter_->thread_num_); + return ParallelLaunch(this->context_, ArithmeticsRun, this, op_parameter_->thread_num_); } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_MulFusion, LiteKernelCreator) diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self_fp32.cc index 48325ae48fb..5ffb795608f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self_fp32.cc @@ -114,8 +114,7 @@ int ArithmeticSelfRun(void *cdata, int task_id, float lhs_scale, float rhs_scale } int ArithmeticSelfCPUKernel::Run() { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ArithmeticSelfRun, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, ArithmeticSelfRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ArithmeticSelfRun error error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm_fp32.cc index 308f0ec732b..ff12822e551 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm_fp32.cc @@ -75,8 +75,7 @@ int BatchnormCPUKernel::InitConstTensor() { } int BatchnormCPUKernel::Run() { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(BatchNormRun, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, BatchNormRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "BatchnormRun error error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/cast_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/cast_fp32.cc index b5185ba6082..52c2b12a118 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/cast_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/cast_fp32.cc @@ -140,8 +140,7 @@ int CastCPUKernel::Run() { if (data_num_ == 0) { return RET_OK; } - return static_cast(this->context_) - ->thread_pool_->ParallelLaunch(CastRun, this, op_parameter_->thread_num_); + return ParallelLaunch(this->context_, CastRun, this, op_parameter_->thread_num_); } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Cast, LiteKernelCreator) diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/concat_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/concat_fp32.cc index dbd7c29d949..74d93bf14f2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/concat_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/concat_fp32.cc @@ -69,8 +69,7 @@ int ConcatRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int ConcatCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConcatRun, this, op_parameter_->thread_num_); + int error_code = ParallelLaunch(this->context_, ConcatRun, this, op_parameter_->thread_num_); return error_code; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1_fp32.cc index d742df813ed..ec62d0aef66 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1_fp32.cc @@ -256,12 +256,10 @@ int Convolution1x1CPUKernel::Run() { } if (multi_thread_by_hw_) { - static_cast(this->context_) - ->thread_pool_->ParallelLaunch(Convolution1x1RunHw, this, thread_count_); + ParallelLaunch(this->context_, Convolution1x1RunHw, this, thread_count_); } else { PackMatmulInput(input_ptr_, pack_input_, matmul_param_->row_, matmul_param_->deep_); - static_cast(this->context_) - ->thread_pool_->ParallelLaunch(Convolution1x1Run, this, thread_count_); + ParallelLaunch(this->context_, Convolution1x1Run, this, thread_count_); } } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.cc index 5f1a86d1338..6fd0d644204 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.cc @@ -133,8 +133,7 @@ int ConvolutionDepthwise3x3CPUKernel::Run() { auto output_tensor = out_tensors_.at(kOutputIndex); output_ptr_ = reinterpret_cast(output_tensor->data_c()); MS_ASSERT(output_ptr_ != nullptr); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvDw3x3Run, this, conv_param_->thread_num_); + auto ret = ParallelLaunch(this->context_, ConvDw3x3Run, this, conv_param_->thread_num_); ctx_->allocator->Free(buffer_); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvDw3x3Run error: error_code[" << ret << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.cc index 3fe2a93af5a..cb5549fa8b8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.cc @@ -116,8 +116,7 @@ int ConvolutionDepthwiseCPUKernel::Run() { output_ptr_ = reinterpret_cast(output_tensor->data_c()); MS_ASSERT(output_ptr_ != nullptr); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvDwRun, this, conv_param_->thread_num_); + auto ret = ParallelLaunch(this->context_, ConvDwRun, this, conv_param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvDwRun error: error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.cc index c24a637c467..91c0b56da37 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.cc @@ -203,8 +203,7 @@ int ConvolutionDepthwiseIndirectCPUKernel::Run() { MS_ASSERT(output_ptr_ != nullptr); ConvDwInitIndirection(indirect_buffer_, packed_input_, zero_ptr_, conv_param_, step_h, step_w); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvDwIndirectRun, this, conv_param_->thread_num_); + auto ret = ParallelLaunch(this->context_, ConvDwIndirectRun, this, conv_param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvDwIndirectRun error: error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.cc index 68c1d5f2496..148b304762e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.cc @@ -171,8 +171,7 @@ int ConvolutionDepthwiseSWCPUKernel::Run() { packed_output_ = output_ptr; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvDwSWRun, this, conv_param_->thread_num_); + ret = ParallelLaunch(this->context_, ConvDwSWRun, this, conv_param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvDwSWRun error: error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_x86_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_x86_fp32.cc index 94821a6cbcf..a451eef3cb8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_x86_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_x86_fp32.cc @@ -168,8 +168,7 @@ int ConvolutionDepthwiseSWCPUKernelX86::Run() { packed_output_ = output_ptr; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvDwSWAvxRun, this, conv_param_->thread_num_); + ret = ParallelLaunch(this->context_, ConvDwSWAvxRun, this, conv_param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvDwSWAvxRun error: error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.cc index 44377df3e73..8ee3571370e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.cc @@ -151,8 +151,7 @@ int ConvolutionCPUKernel::Run() { PackWeight(); } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvolutionImpl, this, thread_count_); + ret = ParallelLaunch(this->context_, ConvolutionImpl, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "conv error error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow_fp32.cc index 9543d1de170..4cc85beb9b9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_slidewindow_fp32.cc @@ -183,8 +183,7 @@ int ConvolutionSWCPUKernel::Run() { FreeTmpBuffer(); return ret; } - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvolutionSWImpl, this, thread_count_); + int error_code = ParallelLaunch(this->context_, ConvolutionSWImpl, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "conv error error_code[" << error_code << "]"; FreeTmpBuffer(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd_fp32.cc index afe32c8d49b..0edcb1de17f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd_fp32.cc @@ -224,8 +224,7 @@ int ConvolutionWinogradCPUKernel::Run() { } } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvolutionWinogradImpl, this, thread_count_); + ret = ParallelLaunch(this->context_, ConvolutionWinogradImpl, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "conv winograd error error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/crop_and_resize_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/crop_and_resize_fp32.cc index 095d1b9abb0..7a3adbcf965 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/crop_and_resize_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/crop_and_resize_fp32.cc @@ -158,8 +158,7 @@ int CropAndResizeCPUKernel::Run() { return ret; } - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(CropAndResizeImpl, this, op_parameter_->thread_num_); + int error_code = ParallelLaunch(this->context_, CropAndResizeImpl, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "CropAndResize run error, error_code[" << error_code << "]"; FreeTmpBuffer(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/crop_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/crop_fp32.cc index dbc4b166ad6..12cd93c4981 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/crop_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/crop_fp32.cc @@ -61,8 +61,7 @@ int CropCPUKernel::Run() { return RET_OK; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(CropLaunch, this, crop_para_->thread_count_); + auto ret = ParallelLaunch(this->context_, CropLaunch, this, crop_para_->thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "Crop launch fail!ret: " << ret; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/cumsum_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/cumsum_fp32.cc index 496b93b1599..3081f6c62d5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/cumsum_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/cumsum_fp32.cc @@ -136,8 +136,7 @@ int CumSumCPUKernel::DoCumsumInt(int task_id) { } int CumSumCPUKernel::Run() { - int ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(CumsumLaunch, this, op_parameter_->thread_num_); + int ret = ParallelLaunch(this->context_, CumsumLaunch, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Crop launch fail!ret: " << ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.cc index 2bf4e4b56e8..21173f1bc50 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.cc @@ -177,8 +177,7 @@ int DeconvolutionDepthwiseCPUKernel::Run() { packed_output_ = output_addr; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(DeconvDwRun, this, conv_param_->thread_num_); + ret = ParallelLaunch(this->context_, DeconvDwRun, this, conv_param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "DeconvDwRun error: error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_fp32.cc index 066a57a9019..1e657d9d6c3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_fp32.cc @@ -233,8 +233,7 @@ int DeConvolutionCPUKernel::Run() { RowMajor2Col12Major(input_ptr_, pack_input_, matmul_param_->row_, matmul_param_->deep_); #endif - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(DeConvFp32Run, this, thread_count_); + error_code = ParallelLaunch(this->context_, DeConvFp32Run, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "deconv fp32 run error! error_code[" << error_code << "]"; FreeRunBuf(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd_fp32.cc index afce3809c05..789a0613f75 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd_fp32.cc @@ -411,8 +411,7 @@ int DeConvolutionWinogradCPUKernel::Run() { nhwc_output_ = src_out + batch_index * deconv_param_->output_plane_ * conv_param_->output_channel_; ::memset(nc4hw4_output_, 0, deconv_param_->output_plane_ * deconv_param_->oc_div4_ * C4NUM * sizeof(float)); - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(DeConvWgFp32Run, this, deconv_param_->thread_num_); + ret = ParallelLaunch(this->context_, DeConvWgFp32Run, this, deconv_param_->thread_num_); if (ret != RET_OK) { FreeRunBuf(); MS_LOG(ERROR) << "DeConvWgFp32Run failed!"; @@ -420,8 +419,7 @@ int DeConvolutionWinogradCPUKernel::Run() { } /* post bias activate and nhwc */ - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(DeConvWgPostFp32Run, this, thread_num_hw_); + ret = ParallelLaunch(this->context_, DeConvWgPostFp32Run, this, thread_num_hw_); if (ret != RET_OK) { FreeRunBuf(); MS_LOG(ERROR) << "DeConvWgPostFp32Run failed!"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/elu_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/elu_fp32.cc index b82345fad72..b84d6512e7e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/elu_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/elu_fp32.cc @@ -58,8 +58,7 @@ int EluRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int EluCPUKernel::Run() { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(EluRun, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, EluRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Elu error: error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup_fp32.cc index 1a2032ca29e..ade17026072 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup_fp32.cc @@ -86,8 +86,7 @@ int EmbeddingLookupCPUKernel::Run() { memcpy(input_addr_ + dest_loc, input_t, sizeof(float) * in_tensors_.at(i)->ElementsNum()); dest_loc += in_tensors_.at(i)->ElementsNum(); } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(EmbeddingLookupRun, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, EmbeddingLookupRun, this, op_parameter_->thread_num_); FreeRunBuff(); if (ret != RET_OK) { MS_LOG(ERROR) << "EmbeddingLookup error: error_code[" << ret << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/exp_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/exp_fp32.cc index 524b861513e..cd2281a94ab 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/exp_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/exp_fp32.cc @@ -72,8 +72,7 @@ int ExpCPUKernel::Run() { output_addr_ = reinterpret_cast(out_tensors_.front()->MutableData()); exp_parameter_->element_num_ = in_tensors_.front()->ElementsNum(); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ExpRun, this, exp_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, ExpRun, this, exp_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Exp error: error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fill_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/fill_fp32.cc index dccb9f48ad7..9158d89ce5b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fill_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fill_fp32.cc @@ -90,8 +90,7 @@ int FillCPUKernel::Run() { MS_LOG(ERROR) << "unsupported fill data type " << fill_input->data_type(); return RET_ERROR; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(FillRun, this, thread_sz_count_); + auto ret = ParallelLaunch(this->context_, FillRun, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "FillRun error error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm_fp32.cc index 3cdf260e328..85bd3887740 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm_fp32.cc @@ -91,8 +91,7 @@ int FusedBatchnormCPUKernel::Run() { trained_ = true; // trained at least once } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(BatchNormRun, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, BatchNormRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "BatchnormRun error error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd_fp32.cc index 337595f93e2..f2522f23087 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd_fp32.cc @@ -127,8 +127,7 @@ int GatherNdCPUKernel::Run() { in_ptr_ = reinterpret_cast(in_tensors_.front()->MutableData()); out_ptr_ = reinterpret_cast(out_tensors_.front()->MutableData()); InitOffset(); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(GatherNdRun, this, thread_sz_count_); + auto ret = ParallelLaunch(this->context_, GatherNdRun, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "gatherNd error error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.cc index cb439862599..8d3bfb494f2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.cc @@ -91,8 +91,7 @@ int GatherCPUKernel::Run() { return ret; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(GatherRun, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, GatherRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Gather function error error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/instance_norm_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/instance_norm_fp32.cc index 9706ac50ef4..daa3e176cef 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/instance_norm_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/instance_norm_fp32.cc @@ -66,8 +66,7 @@ int InstanceNormCPUKernel::Run() { gamma_data_ = reinterpret_cast(in_tensors_.at(1)->data_c()); beta_data_ = reinterpret_cast(in_tensors_.at(2)->data_c()); dst_data_ = reinterpret_cast(out_tensors_.at(0)->data_c()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(InstanceNormRun, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, InstanceNormRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "InstanceNormRun error error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.cc index b1d43f2fd28..6addfde2a40 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.cc @@ -146,8 +146,7 @@ int L2NormCPUKernel::Run() { int ret; if (l2_norm_param_->axis_num_ == 0 || l2_norm_param_->axis_num_ == input_shape.size()) { // all axis - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SquareSumRun, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, SquareSumRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "L2Norm error: error_code[" << ret << "]"; return RET_ERROR; @@ -157,15 +156,13 @@ int L2NormCPUKernel::Run() { sum += tmp_sum_[i]; } sqrt_sum_ = sqrt(sum > l2_norm_param_->epsilon_ ? sum : l2_norm_param_->epsilon_); - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(L2NormRun, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, L2NormRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "L2Norm error: error_code[" << ret << "]"; return RET_ERROR; } } else if (l2_norm_param_->axis_num_ == 1 && l2_norm_param_->axis_[0] == static_cast(input_shape.size()) - 1) { - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(L2NormTrailingAxisRun, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, L2NormTrailingAxisRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "L2Norm error: error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/layer_norm_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/layer_norm_fp32.cc index 69bc4f47fc8..3c787fe5601 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/layer_norm_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/layer_norm_fp32.cc @@ -92,8 +92,7 @@ int LayerNormCPUKernel::Run() { mean_data_ = reinterpret_cast(context_->allocator->Malloc(param_->norm_outer_size_ * sizeof(float))); var_data_ = reinterpret_cast(context_->allocator->Malloc(param_->norm_outer_size_ * sizeof(float))); } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(LayerNormRun, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, LayerNormRun, this, op_parameter_->thread_num_); if (out_tensors_.size() != 3) { context_->allocator->Free(mean_data_); context_->allocator->Free(var_data_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm_fp32.cc index 539bea5fe49..30b6e7e631b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm_fp32.cc @@ -72,8 +72,7 @@ int LocalResponseNormRun(void *cdata, int task_id, float lhs_scale, float rhs_sc } int LocalResponseNormCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(LocalResponseNormRun, this, thread_count_); + int error_code = ParallelLaunch(this->context_, LocalResponseNormRun, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "LocalResponseNorm function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/log_softmax_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/log_softmax_fp32.cc index 9752bb56fd5..0af4c872e72 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/log_softmax_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/log_softmax_fp32.cc @@ -96,8 +96,7 @@ int LogSoftmaxLastAxisRun(void *cdata, int task_id, float lhs_scale, float rhs_s int LogSoftmaxCPUKernel::Run() { int ret = RET_OK; if (in_plane_size_ == 1) { - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(LogSoftmaxLastAxisRun, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, LogSoftmaxLastAxisRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "LogSoftmaxCPUKernel ParallelLaunch failed, ret: " << ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection_fp32.cc index e8814a74e5d..69bdbef9aa6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection_fp32.cc @@ -60,8 +60,7 @@ int LshProjectionCPUKernel::Run() { if (ret != RET_OK) { return ret; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(LshProjectionRun, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, LshProjectionRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "LshProjection kernel parallel launch failed"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32_base.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32_base.cc index 8ea899c6b11..c5f7d907c74 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32_base.cc @@ -426,8 +426,7 @@ int MatmulFp32BaseCPUKernel::Run() { // need not aligned batch_c_ptr_ = output_data_ + i * params_->row_ * params_->col_; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(MatmulBaseFloatRun, this, thread_count_); + ret = ParallelLaunch(this->context_, MatmulBaseFloatRun, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "MatmulBaseFloatRun failed"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot_fp32.cc index 24dc785f610..e3c0e07a6e2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot_fp32.cc @@ -180,8 +180,7 @@ int OneHotCPUKernel::GetParams() { } int OneHotCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(RunOneHot, this, op_parameter_->thread_num_); + int error_code = ParallelLaunch(this->context_, RunOneHot, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "OneHot function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pad_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/pad_fp32.cc index fc1b1eef1d9..d8936677b6c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pad_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pad_fp32.cc @@ -408,8 +408,7 @@ int PadCPUKernel::Run() { output_data[i] = pad_param_->constant_value_; } } - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(PadImpl, this, op_parameter_->thread_num_); + error_code = ParallelLaunch(this->context_, PadImpl, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Pad run error, error_code[" << error_code << "]"; return RET_ERROR; @@ -422,8 +421,7 @@ int PadCPUKernel::Run() { return error_code; } - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(MirrorPadImpl, this, op_parameter_->thread_num_); + error_code = ParallelLaunch(this->context_, MirrorPadImpl, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Pad Reflect or Symmetric mode run error, error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling_fp32.cc index b32f3c1ad1c..10f690fe328 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling_fp32.cc @@ -84,8 +84,7 @@ int PoolingImpl(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int PoolingCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(PoolingImpl, this, thread_count_); + int error_code = ParallelLaunch(this->context_, PoolingImpl, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "pooling error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.cc index 0d40c7c1b2f..d9453501621 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.cc @@ -40,8 +40,7 @@ int PowerImpl(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int PowerCPUKernel::Run() { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(PowerImpl, this, thread_count_); + auto ret = ParallelLaunch(this->context_, PowerImpl, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "PowerCPUKernel error: " << ret; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu_fp32.cc index 96d439d24d5..f919b8065ae 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu_fp32.cc @@ -92,8 +92,7 @@ int PReluCPUKernel::Run() { auto negative_slope_tensor = in_tensors_.at(1); prelu_param_->slope_ = reinterpret_cast(negative_slope_tensor->data_c()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(PReluRun, this, prelu_param_->op_parameter_.thread_num_); + auto ret = ParallelLaunch(this->context_, PReluRun, this, prelu_param_->op_parameter_.thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "PRelu Run error: error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce_fp32.cc index 9ffd7c149ad..e143ad83479 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce_fp32.cc @@ -120,8 +120,7 @@ int ReduceCPUKernel::Run() { MS_LOG(ERROR) << "axis_size_ is must not be zero!"; return RET_ERROR; } - auto error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ReduceImpl, this, op_parameter_->thread_num_); + auto error_code = ParallelLaunch(this->context_, ReduceImpl, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Reduce run error, error_code[" << error_code << "]"; FreeTmpBuffer(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/resize_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/resize_fp32.cc index 6b586b8104c..f5c25655781 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/resize_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/resize_fp32.cc @@ -204,8 +204,7 @@ int ResizeCPUKernel::RunImpl(int task_id) { } int ResizeCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ResizeImpl, this, op_parameter_->thread_num_); + int error_code = ParallelLaunch(this->context_, ResizeImpl, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Resize run error, error_code[" << error_code << "]"; FreeTmpBuffer(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_fp32.cc index 24e14eae3a1..157d08184b2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_fp32.cc @@ -132,8 +132,7 @@ int ReverseCPUKernel::DoReverse(int task_id) { int ReverseCPUKernel::Run() { in_ptr_ = reinterpret_cast(in_tensors_[0]->MutableData()); out_ptr_ = reinterpret_cast(out_tensors_[0]->MutableData()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ReverseRun, this, thread_sz_count_); + auto ret = ParallelLaunch(this->context_, ReverseRun, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "Reverse run error error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling_fp32.cc index 935af9a97f1..adc96aeada2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling_fp32.cc @@ -99,8 +99,7 @@ int ROIPoolingCPUKernel::Run() { in_ptr_ = reinterpret_cast(in_tensors_.front()->MutableData()); out_ptr_ = reinterpret_cast(out_tensors_.front()->MutableData()); roi_ptr_ = reinterpret_cast(in_tensors_.at(1)->MutableData()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ROIPoolingRun, this, param_->thread_num_); + auto ret = ParallelLaunch(this->context_, ROIPoolingRun, this, param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ROIPooling error: error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.cc index 0b73219c68d..b4ec360d5bf 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.cc @@ -191,8 +191,7 @@ int ScaleCPUKernel::Run() { auto out_tensor = out_tensors_.front(); output_ptr_ = reinterpret_cast(out_tensor->MutableData()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ScaleRun, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, ScaleRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Scale error error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.cc index 68af60ad8a4..1aa03632a42 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.cc @@ -155,8 +155,7 @@ int ScatterNDRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int ScatterNDCPUKernel::Run() { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ScatterNDRun, this, thread_n_num_); + auto ret = ParallelLaunch(this->context_, ScatterNDRun, this, thread_n_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ScatterND error error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax_fp32.cc index 1e00dae12f9..bf96c2f915f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax_fp32.cc @@ -96,8 +96,7 @@ int SoftmaxLastAxisRun(void *cdata, int task_id, float lhs_scale, float rhs_scal int SoftmaxCPUKernel::Run() { int ret = RET_OK; if (in_plane_size_ == 1) { - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SoftmaxLastAxisRun, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, SoftmaxLastAxisRun, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "SoftmaxCPUKernel ParallelLaunch failed, ret: " << ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch_fp32.cc index 868722434f3..5245bf8d2f8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch_fp32.cc @@ -102,8 +102,7 @@ int SpaceToBatchCPUKernel::Run() { } } - static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SpaceToBatchFp32Run, this, op_parameter_->thread_num_); + ParallelLaunch(this->context_, SpaceToBatchFp32Run, this, op_parameter_->thread_num_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth_fp32.cc index 46dc6eece66..66fe7a8a49c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth_fp32.cc @@ -94,8 +94,7 @@ int SpaceToDepthCPUKernel::Run() { input_ptr_ = reinterpret_cast(in_tensors_.at(0)->data_c()); output_ptr_ = reinterpret_cast(out_tensors_.at(0)->data_c()); if (in_tensors_.at(0)->format() == mindspore::NHWC) { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SpaceToDepthRun, this, thread_h_num_); + auto ret = ParallelLaunch(this->context_, SpaceToDepthRun, this, thread_h_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "SpaceToDepth error error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.cc index 89651a0d9dd..10a5c667687 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.cc @@ -174,8 +174,7 @@ int SparseToDenseCPUKernel::Run() { } output_data = reinterpret_cast(out_tensors_.at(0)->MutableData()); count_unit_ = thread_count_ > 1 ? UP_DIV(index_num, thread_count_) : index_num; - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SparseToDenseRun, this, s2d_param->thread_num_); + ret = ParallelLaunch(this->context_, SparseToDenseRun, this, s2d_param->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "SparseToDenseRun error: error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.cc index 9808178ea53..08488e09519 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.cc @@ -167,14 +167,12 @@ int TransposeCPUKernel::Run() { } GetNHNCTransposeFunc(in_tensor, out_tensor); if (NHNCTransposeFunc_ != nullptr) { - return static_cast(this->context_) - ->thread_pool_->ParallelLaunch(TransposeImpl, this, op_parameter_->thread_num_); + return ParallelLaunch(this->context_, TransposeImpl, this, op_parameter_->thread_num_); } if (out_tensor->shape().size() <= DIMENSION_6D) { return TransposeDim2to6(); } else { - return static_cast(this->context_) - ->thread_pool_->ParallelLaunch(TransposeImpl, this, op_parameter_->thread_num_); + return ParallelLaunch(this->context_, TransposeImpl, this, op_parameter_->thread_num_); } } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.cc index cf9d2cc6de8..313933b3a2f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.cc @@ -131,8 +131,7 @@ int WhereCPUKernel::RunWithTripleInputs() { MS_LOG(ERROR) << "Error, inputs' length are zero !!!"; return RET_ERROR; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(WhereRun, this, where_param_->thread_num_); + auto ret = ParallelLaunch(this->context_, WhereRun, this, where_param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "WhereDwRun error: error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc index c704b859d96..faa95f09cef 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc @@ -97,8 +97,7 @@ int ActivationGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale } int ActivationGradCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ActivationGradRun, this, thread_count_); + int error_code = ParallelLaunch(this->context_, ActivationGradRun, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Activation Grad function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc index f6a9cddacd7..1cd4671d997 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc @@ -100,8 +100,7 @@ int AdamRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int AdamCPUKernel::Run() { - int error_code = - static_cast(this->context_)->thread_pool_->ParallelLaunch(AdamRun, this, thread_count_); + int error_code = ParallelLaunch(this->context_, AdamRun, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Adam function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc index 7ce8099cf19..c22ce6cb7c4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc @@ -81,8 +81,7 @@ int ApplyMomentumRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) } int ApplyMomentumCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ApplyMomentumRun, this, thread_count_); + int error_code = ParallelLaunch(this->context_, ApplyMomentumRun, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Apply Momentum function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc index c27c5672c75..3c6906f2a64 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc @@ -225,8 +225,7 @@ int ArithmeticGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale } int ArithmeticGradCPUKernel::Run() { - int error_code = - static_cast(this->context_)->thread_pool_->ParallelLaunch(ArithmeticGradRun, this, 1); + int error_code = ParallelLaunch(this->context_, ArithmeticGradRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "Arithmetic Grad function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.cc index 95a231f84d1..c2b03fe6086 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.cc @@ -80,8 +80,7 @@ int ArithmeticSelfGradCPUKernel::DoArithmeticSelfGrad(int task_id) { int ArithmeticSelfGradCPUKernel::ReSize() { return RET_OK; } int ArithmeticSelfGradCPUKernel::Run() { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ArithmeticSelfGradRun, this, thread_count_); + auto ret = ParallelLaunch(this->context_, ArithmeticSelfGradRun, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "parallel launch fail!ret: " << ret; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc index d6d05741940..cd3376872ac 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc @@ -57,8 +57,7 @@ int AssignRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int AssignCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(AssignRun, this, thread_count_); + int error_code = ParallelLaunch(this->context_, AssignRun, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Assign function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc index f20f443d767..3d0dd27aee3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc @@ -81,8 +81,7 @@ int BiasGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int BiasGradCPUKernel::Run() { - int error_code = - static_cast(this->context_)->thread_pool_->ParallelLaunch(BiasGradRun, this, 1); + int error_code = ParallelLaunch(this->context_, BiasGradRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "bias function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc index b9a51815727..887bce605bc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc @@ -140,8 +140,7 @@ int BNGradCPUKernel::Run() { thread_num_ = op_parameter_->thread_num_; int error_code; if (thread_num_ == 1) { - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(BNGradRun, this, thread_num_); + error_code = ParallelLaunch(this->context_, BNGradRun, this, thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "BN function error error_code[" << error_code << "]"; return RET_ERROR; @@ -150,8 +149,7 @@ int BNGradCPUKernel::Run() { const std::vector threads = {thread_num_, 1, thread_num_}; for (size_t stage = 0; stage < threads.size(); stage++) { stage_ = static_cast(stage); - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(BNGradRun, this, threads.at(stage)); + error_code = ParallelLaunch(this->context_, BNGradRun, this, threads.at(stage)); if (error_code != RET_OK) { MS_LOG(ERROR) << "BN function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc index c011bf62231..f00372c57de 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc @@ -167,8 +167,7 @@ int ConvolutionTrainRun(void *cdata, int task_id, float lhs_scale, float rhs_sca } int ConvolutionTrainCPUKernel::Run() { - int error_code = - static_cast(this->context_)->thread_pool_->ParallelLaunch(ConvolutionTrainRun, this, 1); + int error_code = ParallelLaunch(this->context_, ConvolutionTrainRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "conv train function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc index 7ab83bd52a9..f453d0376d1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc @@ -195,8 +195,7 @@ int ConvolutionGradFilterCPUKernel::Run() { auto *out_dw = out_tensors_.at(0); auto dw_addr = reinterpret_cast(out_dw->MutableData()); memset(dw_addr, 0, out_dw->Size()); - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvolutionGradFilterRun, this, op_parameter_->thread_num_); + int error_code = ParallelLaunch(this->context_, ConvolutionGradFilterRun, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "conv filter function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc index 1da08eb6387..dee68f63238 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc @@ -176,8 +176,7 @@ int ConvolutionGradInputCPUKernel::Run() { auto *out_dx = out_tensors_.at(0); auto dx_addr = reinterpret_cast(out_dx->MutableData()); memset(dx_addr, 0, sizeof(float) * batch * in_ch * in_h * in_w); - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvolutionGradInputRun, this, op_parameter_->thread_num_); + int error_code = ParallelLaunch(this->context_, ConvolutionGradInputRun, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "bias function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc index 0922916a5ef..8b836e5be15 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc @@ -121,8 +121,7 @@ int DeConvolutionGradFilterRun(void *cdata, int task_id, float lhs_scale, float } int DeConvolutionGradFilterCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(DeConvolutionGradFilterRun, this, 1); + int error_code = ParallelLaunch(this->context_, DeConvolutionGradFilterRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "conv filter function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc index 630b7d98e35..298102b4b3b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc @@ -99,8 +99,7 @@ int RunDropout(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int DropoutCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(RunDropout, this, thread_count_); + int error_code = ParallelLaunch(this->context_, RunDropout, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Dropout function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc index c1a721139c9..3d3bb238f80 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc @@ -80,8 +80,7 @@ int RunDropoutGrad(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int DropoutGradCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(RunDropoutGrad, this, thread_count_); + int error_code = ParallelLaunch(this->context_, RunDropoutGrad, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Dropout Grad function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/layernorm_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/layernorm_grad.cc index 864022f8bc9..100a13b5681 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/layernorm_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/layernorm_grad.cc @@ -101,8 +101,7 @@ int LayerNormGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) } int LayerNormGradCPUKernel::Run() { - int error_code = - static_cast(this->context_)->thread_pool_->ParallelLaunch(LayerNormGradRun, this, 1); + int error_code = ParallelLaunch(this->context_, LayerNormGradRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "LayerNorm function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc index 104591c206b..61f715590fd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc @@ -55,8 +55,7 @@ int NegGradCPUKernel::DoNegGrad(int task_id) { int NegGradCPUKernel::ReSize() { return RET_OK; } int NegGradCPUKernel::Run() { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(NegGradRun, this, thread_count_); + auto ret = ParallelLaunch(this->context_, NegGradRun, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "parallel launch fail!ret: " << ret; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc index bba43f91b10..1c2dfe1ff40 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc @@ -98,8 +98,7 @@ int PoolingGradImpl(void *cdata, int task_id, float lhs_scale, float rhs_scale) int PoolingGradCPUKernel::Run() { thread_num_ = op_parameter_->thread_num_; - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(PoolingGradImpl, this, thread_num_); + int error_code = ParallelLaunch(this->context_, PoolingGradImpl, this, thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "pooling error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc index d69dfacdaaf..732c341ed12 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc @@ -75,8 +75,7 @@ int PowerGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int PowerGradCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(PowerGradRun, this, thread_count_); + int error_code = ParallelLaunch(this->context_, PowerGradRun, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "power grad function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/resize_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/resize_grad.cc index ee81102e057..eeefc2276f4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/resize_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/resize_grad.cc @@ -90,8 +90,7 @@ int ResizeGradCPUKernel::Run() { auto out_addr = reinterpret_cast(out_tensors_.at(0)->MutableData()); size_t elem_number = out_tensors_.at(0)->ElementsNum(); std::fill(out_addr, out_addr + elem_number, 0.f); - int error_code = - static_cast(this->context_)->thread_pool_->ParallelLaunch(ResizeGradRun, this, 1); + int error_code = ParallelLaunch(this->context_, ResizeGradRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "ResizeGradCPUKernel function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc index 1192bd15f3a..6cd3235d385 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc @@ -143,11 +143,9 @@ int SgdCPUKernel::Run() { auto stat = reinterpret_cast(in_tensors_.at(5)->MutableData()); auto error_code = RET_OK; if (*stat > 0.0f) { - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SgdRunInit, this, thread_count_); + error_code = ParallelLaunch(this->context_, SgdRunInit, this, thread_count_); } else { - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SgdRun, this, thread_count_); + error_code = ParallelLaunch(this->context_, SgdRun, this, thread_count_); } if (error_code != RET_OK) { MS_LOG(ERROR) << "SGD function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.cc index 297d7096c0b..9bb88668fda 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.cc @@ -59,8 +59,7 @@ int SigmoidCrossEntropyWithLogitsRun(void *cdata, int task_id, float lhs_scale, } int SigmoidCrossEntropyWithLogitsCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SigmoidCrossEntropyWithLogitsRun, this, 1); + int error_code = ParallelLaunch(this->context_, SigmoidCrossEntropyWithLogitsRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "SigmoidCrossEntropyWithLogits function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc index 97510a57c19..13ae09409b8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc @@ -59,8 +59,7 @@ int SigmoidCrossEntropyWithLogitsGradRun(void *cdata, int task_id, float lhs_sca } int SigmoidCrossEntropyWithLogitsGradCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SigmoidCrossEntropyWithLogitsGradRun, this, 1); + int error_code = ParallelLaunch(this->context_, SigmoidCrossEntropyWithLogitsGradRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "SigmoidCrossEntropyWithLogitsGrad function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.cc index 9d2e6f25472..4064a50ede0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.cc @@ -71,8 +71,7 @@ int SmoothL1LossRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) } int SmoothL1LossCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SmoothL1LossRun, this, thread_count_); + int error_code = ParallelLaunch(this->context_, SmoothL1LossRun, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "SmoothL1Loss function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.cc index f031f752acb..d60e6030d39 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.cc @@ -68,8 +68,7 @@ int SmoothL1LossGradRun(void *cdata, int task_id, float lhs_scale, float rhs_sca } int SmoothL1LossGradCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SmoothL1LossGradRun, this, thread_count_); + int error_code = ParallelLaunch(this->context_, SmoothL1LossGradRun, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "SmoothL1LossGrad function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc index dac58f8d855..d2795352724 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc @@ -90,8 +90,7 @@ int SoftmaxCrossEntropyWithLogitsRun(void *cdata, int task_id, float lhs_scale, } int SoftmaxCrossEntropyWithLogitsCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SoftmaxCrossEntropyWithLogitsRun, this, 1); + int error_code = ParallelLaunch(this->context_, SoftmaxCrossEntropyWithLogitsRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "SoftmaxCrossEntropy function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc index 3e0e7bfc60d..ba2e8cf65c6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc @@ -79,8 +79,7 @@ int SoftmaxGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int SoftmaxGradCPUKernel::Run() { - int error_code = - static_cast(this->context_)->thread_pool_->ParallelLaunch(SoftmaxGradRun, this, 1); + int error_code = ParallelLaunch(this->context_, SoftmaxGradRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "SoftmaxGradRun function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc index 3b92c101ec3..d24d4ff756e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc @@ -144,8 +144,7 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Run() { for (int stage = 0; stage < static_cast(threads.size()); stage++) { stage_ = stage; threads_ = threads.at(stage); - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SparseSoftmaxCrossEntropyWithLogitsRun, this, threads_); + int error_code = ParallelLaunch(this->context_, SparseSoftmaxCrossEntropyWithLogitsRun, this, threads_); if (error_code != RET_OK) { MS_LOG(ERROR) << "SparseSoftmaxCrossEntropyWithLogits function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.cc index 78e57e4d9db..2c88b50d053 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.cc @@ -122,8 +122,7 @@ int StridedSliceGradImpl(void *cdata, int task_id, float lhs_scale, float rhs_sc } int StridedSliceGradCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(StridedSliceGradImpl, this, 1); + int error_code = ParallelLaunch(this->context_, StridedSliceGradImpl, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "Strided slice error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/unsorted_segment_sum.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/unsorted_segment_sum.cc index c1f1a842fdd..fdb124ed1ab 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/unsorted_segment_sum.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/unsorted_segment_sum.cc @@ -66,8 +66,7 @@ int UnsortedSegmentSumRun(void *cdata, int task_id, float lhs_scale, float rhs_s } int UnsortedSegmentSumCPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(UnsortedSegmentSumRun, this, 1); + int error_code = ParallelLaunch(this->context_, UnsortedSegmentSumRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "Strided slice error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc index 679e9743068..08d0579a336 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc @@ -227,8 +227,7 @@ int QuantizedAddCPUKernel::Run() { input1_data_ = static_cast(in_tensors_.at(1)->data_c()); output_data_ = static_cast(out_tensors_.at(0)->data_c()); - static_cast(this->context_) - ->thread_pool_->ParallelLaunch(AddInt8Run, this, thread_count_); + ParallelLaunch(this->context_, AddInt8Run, this, thread_count_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc index 33f3a4e5400..754de679d0b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc @@ -149,8 +149,7 @@ int ArithmeticInt8CPUKernel::Run() { } TileDimensionsInt8(input_data0, input_data1, tile_data0_, tile_data1_, param); } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ArithmeticsInt8Launch, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, ArithmeticsInt8Launch, this, op_parameter_->thread_num_); if (param->broadcasting_) { context_->allocator->Free(tile_data0_); context_->allocator->Free(tile_data1_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc index 793e3a2b2b2..2647bc6f1f4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc @@ -102,8 +102,7 @@ int ArithmeticSelfInt8CPUKernel::Run() { auto out_tensor = out_tensors_.at(0); in_ptr_ = reinterpret_cast(input_tensor->MutableData()); out_ptr_ = reinterpret_cast(out_tensor->MutableData()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ArithmeticSelfInt8Runs, this, thread_sz_count_); + auto ret = ParallelLaunch(this->context_, ArithmeticSelfInt8Runs, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "ArithmeticSelfRun error error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.cc index a8056847112..476dd39daef 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.cc @@ -191,8 +191,7 @@ int BatchnormInt8CPUKernel::Run() { in_addr_ = reinterpret_cast(in_tensors_.at(0)->MutableData()); out_addr_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(BatchNormInt8Run, this, batchnorm_param_->op_parameter_.thread_num_); + auto ret = ParallelLaunch(this->context_, BatchNormInt8Run, this, batchnorm_param_->op_parameter_.thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "BatchnormRun error error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc index 155b3bf3f40..7fcb7299ffa 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.cc @@ -115,8 +115,7 @@ int ConcatInt8CPUKernel::Run() { } output_data_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConcatInt8Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, ConcatInt8Run, this, op_parameter_->thread_num_); return ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.cc index 538aa1fd891..64acc8efd12 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.cc @@ -531,8 +531,7 @@ int Convolution1x1Int8CPUKernel::Run() { if (parallel_by_oc_) { /* input transpose and input sum */ if (support_optimize_) { - static_cast(this->context_) - ->thread_pool_->ParallelLaunch(Convolution1x1Int8OcOptPre, this, thread_count_hw_); + ParallelLaunch(this->context_, Convolution1x1Int8OcOptPre, this, thread_count_hw_); } else { RowMajor2Row16x4MajorInt8(input_ptr_, packed_input_, matmul_param_->row_, matmul_param_->deep_); if (filter_peroc_) { @@ -543,12 +542,10 @@ int Convolution1x1Int8CPUKernel::Run() { } } /* matmul parallel by oc */ - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(Convolution1x1Int8OcRun, this, thread_count_oc_); + error_code = ParallelLaunch(this->context_, Convolution1x1Int8OcRun, this, thread_count_oc_); } else { /* matmul parallel by hw */ - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(Convolution1x1Int8HwRun, this, thread_count_hw_); + error_code = ParallelLaunch(this->context_, Convolution1x1Int8HwRun, this, thread_count_hw_); } if (error_code != RET_OK) { MS_LOG(ERROR) << "ParallelLaunch run error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc index b1fcd08aef5..82f65956f35 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc @@ -221,8 +221,7 @@ int Convolution3x3Int8CPUKernel::Run() { auto input_addr = reinterpret_cast(in_tensors_.at(kInputIndex)->MutableData()); PackInputToC8Int8(input_addr, input_data_, conv_param_); - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(Convolution3x3Int8Impl, this, thread_count_); + int error_code = ParallelLaunch(this->context_, Convolution3x3Int8Impl, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "conv3x3 int8 error error_code[" << error_code << "]"; FreeTmpBuffer(); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.cc index f92e9e813fa..9979fe23049 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.cc @@ -166,8 +166,7 @@ int ConvolutionDepthwise3x3Int8CPUKernel::Run() { ConvDw3x3Int8Pad(output_ptr_, input_ptr_, packed_weight_, reinterpret_cast(bias_data_), conv_param_, sliding_); } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvDw3x3Int8Run, this, conv_param_->thread_num_); + ret = ParallelLaunch(this->context_, ConvDw3x3Int8Run, this, conv_param_->thread_num_); if (ret != RET_OK) { context_->allocator->Free(buffer_); MS_LOG(ERROR) << "ConvDwInt8Run error: error_code[" << ret << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc index 4ad4369383c..f18a817238d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc @@ -147,8 +147,7 @@ int ConvolutionDepthwiseInt8CPUKernel::Run() { auto output_tensor = out_tensors_.at(kOutputIndex); output_ptr_ = reinterpret_cast(output_tensor->MutableData()); - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvDwInt8Run, this, conv_param_->thread_num_); + ret = ParallelLaunch(this->context_, ConvDwInt8Run, this, conv_param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvDwInt8Run error: error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.cc index 69ce4b34c94..1c8d64feaf8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.cc @@ -311,8 +311,7 @@ int ConvolutionDepthwiseSWInt8CPUKernel::Run() { packed_output_ = output_addr; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvDwSWInt8Run, this, conv_param_->thread_num_); + ret = ParallelLaunch(this->context_, ConvDwSWInt8Run, this, conv_param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvDwSWInt8Run error: error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc index 26fda639ff5..e19dd4b2271 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc @@ -222,8 +222,7 @@ int ConvolutionInt8CPUKernel::Run() { return RET_ERROR; } - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ConvolutionInt8Impl, this, thread_count_); + int error_code = ParallelLaunch(this->context_, ConvolutionInt8Impl, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "conv int8 error error_code[" << error_code << "]"; FreeTmpBuffer(); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc index ff33d3338cf..861862d9729 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc @@ -51,8 +51,7 @@ int CropInt8CPUKernel::Init() { int CropInt8CPUKernel::ReSize() { return CropBaseCPUKernel::ReSize(); } int CropInt8CPUKernel::Run() { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(CropInt8Run, this, crop_para_->thread_count_); + auto ret = ParallelLaunch(this->context_, CropInt8Run, this, crop_para_->thread_count_); return ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc index 3fd5a44d642..fc3f5406919 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc @@ -188,8 +188,7 @@ int DeconvolutionDepthwiseInt8CPUKernel::Run() { packed_output_ = output_addr; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(DeconvDwInt8Run, this, conv_param_->thread_num_); + ret = ParallelLaunch(this->context_, DeconvDwInt8Run, this, conv_param_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "DeconvDwInt8Run error: error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc index 10c1de57cae..5862b22a715 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc @@ -268,8 +268,7 @@ int DeConvInt8CPUKernel::Run() { DeConvPackInputSum(input_ptr_, input_sum_, conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_, UP_ROUND(matmul_param_->row_, C4NUM), UP_ROUND(matmul_param_->deep_, C16NUM), support_optimize_); - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(DeConvInt8Run, this, thread_count_); + error_code = ParallelLaunch(this->context_, DeConvInt8Run, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "deconv int8 run error! error_code[" << error_code << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/detection_post_process_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/detection_post_process_int8.cc index b45d30a5169..5ce9f999793 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/detection_post_process_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/detection_post_process_int8.cc @@ -64,8 +64,7 @@ int DetectionPostProcessInt8CPUKernel::Dequantize(lite::Tensor *tensor, float ** quant_size_ = tensor->ElementsNum(); thread_n_stride_ = UP_DIV(quant_size_, op_parameter_->thread_num_); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(DequantizeInt8ToFp32Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, DequantizeInt8ToFp32Run, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "QuantDTypeCastRun error error_code[" << ret << "]"; context_->allocator->Free(*data); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc index 4d71c5ade79..b50a202e5b5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc @@ -128,8 +128,7 @@ int DivInt8CPUKernel::Run() { static_cast(in_tensors_.at(1)->MutableData()), reinterpret_cast(tile0_data_), reinterpret_cast(tile1_data_), &tile_para); } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(DivInt8Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, DivInt8Run, this, op_parameter_->thread_num_); if (broadcast_) { context_->allocator->Free(tile0_data_); context_->allocator->Free(tile1_data_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.cc index 563074a4383..f0ee0ca75cd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.cc @@ -146,8 +146,7 @@ int GatherNdInt8CPUKernel::Run() { if (ret != RET_OK) { return ret; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(GatherNdInt8Run, this, thread_sz_count_); + ret = ParallelLaunch(this->context_, GatherNdInt8Run, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "gatherNd error error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc index 17a11f1f534..2091f64576f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc @@ -96,8 +96,7 @@ int GatherInt8Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int GatherInt8CPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(GatherInt8Run, this, thread_count_); + int error_code = ParallelLaunch(this->context_, GatherInt8Run, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Gather function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.cc index d9a1fc2cb1f..8786a62cb9b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.cc @@ -88,8 +88,7 @@ int HswishInt8Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int HswishInt8CPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(HswishInt8Run, this, thread_count_); + int error_code = ParallelLaunch(this->context_, HswishInt8Run, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "HswishInt8Run function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/l2_norm_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/l2_norm_int8.cc index 11cb77a7b5c..d56e6fd910b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/l2_norm_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/l2_norm_int8.cc @@ -59,8 +59,7 @@ int L2NormInt8CPUKernel::Run() { MS_LOG(ERROR) << "L2Norm only support reduce on all axis and trailing axis with trailing axis"; return RET_ERROR; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(L2NormInt8Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, L2NormInt8Run, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "L2Norm error: error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/layer_norm_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/layer_norm_int8.cc index b246a72ce23..00eab79c1d3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/layer_norm_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/layer_norm_int8.cc @@ -131,8 +131,7 @@ int LayerNormInt8CPUKernel::Run() { src_ptr_ = reinterpret_cast(in_tensors_.at(0)->data_c()); dst_ptr_ = reinterpret_cast(out_tensors_.at(0)->data_c()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(LayerNormInt8Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, LayerNormInt8Run, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "LayerNormInt8Run error error_code[" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc index c8fdf31c48d..646f46b907c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc @@ -107,8 +107,7 @@ int LeakyReluInt8CPUKernel::ReSize() { } int LeakyReluInt8CPUKernel::Run() { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(LeakyReluInt8Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, LeakyReluInt8Run, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "RunPreluParam failed. errorcode: "; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_base_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_base_int8.cc index 77e4ce24dce..2757eac1749 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_base_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_base_int8.cc @@ -334,8 +334,7 @@ int MatmulBaseInt8CPUKernel::Run() { batch_sums_ = weight_bias_sums_ + i * param_->col_align_; batch_c_ptr_ = c_ptr + i * param_->row_ * param_->col_; - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(MatmulBaseInt8Run, this, thread_count_); + auto ret = ParallelLaunch(this->context_, MatmulBaseInt8Run, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "MatmulInt8Run error: [" << ret << "]"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc index 42934912492..1af7c3b765f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc @@ -155,8 +155,7 @@ int MulInt8CPUKernel::Run() { if (fast_hw_broadcast_) { elements_num_ = out_tensors_.front()->Batch() * out_tensors_.front()->Height() * out_tensors_.front()->Width(); count_unit_ = thread_count_ > 1 ? UP_DIV(elements_num_, thread_count_) : elements_num_; - return static_cast(this->context_) - ->thread_pool_->ParallelLaunch(FastHWBroadcatMulInt8Run, this, thread_count_); + return ParallelLaunch(this->context_, FastHWBroadcatMulInt8Run, this, thread_count_); } elements_num_ = out_tensors_.at(0)->ElementsNum(); @@ -176,15 +175,13 @@ int MulInt8CPUKernel::Run() { } TileDimensionsInt8(static_cast(in_tensors_.at(0)->MutableData()), static_cast(in_tensors_.at(1)->MutableData()), input0_data_, input1_data_, tile_para); - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(MulInt8Run, this, thread_count_); + ret = ParallelLaunch(this->context_, MulInt8Run, this, thread_count_); ctx_->allocator->Free(input0_data_); ctx_->allocator->Free(input1_data_); return ret; } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(MulInt8Run, this, thread_count_); + ret = ParallelLaunch(this->context_, MulInt8Run, this, thread_count_); return ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc index 5c89224d82e..5c9659b69fa 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc @@ -266,8 +266,7 @@ int PadInt8CPUKernel::Run() { int error_code; if (pad_param_->pad_mode_ == static_cast(schema::PaddingMode_CONSTANT)) { memset(out_data_, pad_param_->pad_quant_arg_.constant_value_[0], out_tensors_[0]->ElementsNum() * sizeof(int8_t)); - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(PadInt8Impl, this, op_parameter_->thread_num_); + error_code = ParallelLaunch(this->context_, PadInt8Impl, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Resize run error, error_code[" << error_code << "]"; return RET_ERROR; @@ -280,8 +279,7 @@ int PadInt8CPUKernel::Run() { return error_code; } - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(MirrorPadImplInt8, this, op_parameter_->thread_num_); + error_code = ParallelLaunch(this->context_, MirrorPadImplInt8, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Pad Reflect or Symmetric mode run error, error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc index ab8e1134fcb..1468e05397d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc @@ -95,8 +95,7 @@ int PoolingInt8Impl(void *cdata, int task_id, float lhs_scale, float rhs_scale) } int PoolingInt8CPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(PoolingInt8Impl, this, thread_count_); + int error_code = ParallelLaunch(this->context_, PoolingInt8Impl, this, thread_count_); if (error_code != RET_OK) { MS_LOG(ERROR) << "poolingInt8 error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.cc index 3ab2e7e0de5..1009a97ad53 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.cc @@ -98,8 +98,7 @@ int PowerInt8Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int PowerInt8CPUKernel::Run() { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(PowerInt8Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, PowerInt8Run, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "PowerInt8Run error, error_code[" << ret << "]"; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc index 29ee2a12f0e..bbd041c7f99 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc @@ -457,8 +457,7 @@ int ReduceInt8CPUKernel::Fast4DReduceMeanHWImpl() { } PackNHWCToNCHWInt8(reinterpret_cast(input_data), reinterpret_cast(nchw_in_data_), input->Batch(), input->Height() * input->Width(), input->Channel()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ReduceMeanPatternInt8Impl, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, ReduceMeanPatternInt8Impl, this, op_parameter_->thread_num_); if (ret != RET_OK) { ctx_->allocator->Free(nchw_in_data_); MS_LOG(ERROR) << "Reduce run error, error_code[" << ret << "]"; @@ -502,8 +501,7 @@ int ReduceInt8CPUKernel::Run() { outer_size_ = outer_sizes_[i]; inner_size_ = inner_sizes_[i]; axis_size_ = axis_sizes_[i]; - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ReduceInt8Impl, this, op_parameter_->thread_num_); + error_code = ParallelLaunch(this->context_, ReduceInt8Impl, this, op_parameter_->thread_num_); if (error_code != RET_OK) { FreeTmpBuffer(); MS_LOG(ERROR) << "Reduce run error, error_code[" << error_code << "]"; @@ -518,8 +516,7 @@ int ReduceInt8CPUKernel::Run() { axis_size_ = axis_sizes_.back(); last_dst_data_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); is_last_axis_ = true; - error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ReduceInt8Impl, this, op_parameter_->thread_num_); + error_code = ParallelLaunch(this->context_, ReduceInt8Impl, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Reduce run error, error_code[" << error_code << "]"; FreeTmpBuffer(); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.cc index 50b92df6ebc..efbd6ef289b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.cc @@ -71,8 +71,7 @@ int ReluXInt8Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int ReluXInt8CPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ReluXInt8Run, this, op_parameter_->thread_num_); + int error_code = ParallelLaunch(this->context_, ReluXInt8Run, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "ReluXInt8Run function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.cc index 62baf2a62b8..174ad854f64 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.cc @@ -57,8 +57,7 @@ int ReshapeInt8CPUKernel::Run() { elements_num_ = in_tensors_.at(kInputIndex)->ElementsNum(); count_unit_ = op_parameter_->thread_num_ > 1 ? UP_DIV(elements_num_, op_parameter_->thread_num_) : elements_num_; - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ReshapeInt8Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, ReshapeInt8Run, this, op_parameter_->thread_num_); return ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc index 276572bd83f..8a3658c2e44 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.cc @@ -363,8 +363,7 @@ int ResizeInt8CPUKernel::RunImpl(int task_id) { } int ResizeInt8CPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ResizeInt8Impl, this, op_parameter_->thread_num_); + int error_code = ParallelLaunch(this->context_, ResizeInt8Impl, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "Resize run error, error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.cc index 7fb7bec320e..4e46bba3504 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.cc @@ -319,8 +319,7 @@ int ScaleInt8CPUKernel::Run() { tile_para->in_strides1_, tile_para->out_strides_, tile_para->multiples1_); } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ScaleRunInt8, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, ScaleRunInt8, this, op_parameter_->thread_num_); // free memory malloced from memory pool if (!scale_param_->const_scale_) { ctx_->allocator->Free(input1_data_); @@ -340,8 +339,7 @@ int ScaleInt8CPUKernel::Run() { if (has_bias_ && !scale_param_->const_offset_) { input2_data_ = reinterpret_cast(in_tensors_.at(kOffsetIndex)->data_c()); } - ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(ScaleRunInt8, this, op_parameter_->thread_num_); + ret = ParallelLaunch(this->context_, ScaleRunInt8, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Scale error error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc index 9ced691c3cc..cd4371e7184 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc @@ -88,8 +88,7 @@ int SigmoidInt8Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } int SigmoidInt8CPUKernel::Run() { - int error_code = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SigmoidInt8Run, this, op_parameter_->thread_num_); + int error_code = ParallelLaunch(this->context_, SigmoidInt8Run, this, op_parameter_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "SigmoidInt8Run function error error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc index 996f86e8222..8c0b6686123 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc @@ -77,8 +77,7 @@ int SliceInt8Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { int SliceInt8CPUKernel::Run() { // param_ shape info has already been extended to 8d - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SliceInt8Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, SliceInt8Run, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "SliceInt8Run error, error_code[" << ret << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc index dd901004fc2..80653b4cc0d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc @@ -132,8 +132,7 @@ int SoftmaxInt8CPUKernel::Run() { context_->allocator->Free(sum_data_); return RET_ERROR; } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SoftmaxRun, this, thread_count_); + auto ret = ParallelLaunch(this->context_, SoftmaxRun, this, thread_count_); context_->allocator->Free(exp_data_); context_->allocator->Free(sum_data_); if (ret != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc index 36c95d5f806..62e72742224 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.cc @@ -96,8 +96,7 @@ int SplitInt8CPUKernel::Run() { output_ptr_[i] = reinterpret_cast(out_tensors_.at(i)->data_c()); } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SplitInt8Run, this, thread_n_num_); + auto ret = ParallelLaunch(this->context_, SplitInt8Run, this, thread_n_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "Scale error error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.cc index d35f0570175..d4f7ab0bf5c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.cc @@ -88,8 +88,7 @@ int SqueezeInt8CPUKernel::Init() { int SqueezeInt8CPUKernel::ReSize() { return RET_OK; } int SqueezeInt8CPUKernel::Run() { - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SqueezeInt8Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, SqueezeInt8Run, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "RunSqueezeParam failed. errorcode: "; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc index 052b10c1e24..789c7f76d1c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc @@ -155,8 +155,7 @@ int SubInt8CPUKernel::Run() { static_cast(in_tensors_.at(1)->data_c()), reinterpret_cast(tile0_data_), reinterpret_cast(tile1_data_), &tile_para); } - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(SubInt8Run, this, op_parameter_->thread_num_); + auto ret = ParallelLaunch(this->context_, SubInt8Run, this, op_parameter_->thread_num_); if (broadcast_) { context_->allocator->Free(tile0_data_); context_->allocator->Free(tile1_data_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/tanh_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/tanh_int8.cc index 0eb603589ea..2cdcd9003b5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/tanh_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/tanh_int8.cc @@ -70,8 +70,7 @@ int TanhInt8CPUKernel::Run() { in_ptr_ = reinterpret_cast(in_tensors_.at(0)->data_c()); out_ptr_ = reinterpret_cast(out_tensors_.at(0)->data_c()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(TanhInt8Run, this, thread_count_); + auto ret = ParallelLaunch(this->context_, TanhInt8Run, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "TanhInt8 Run failed"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc index 27a3e3f9a58..b682b7f9889 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc @@ -113,8 +113,7 @@ int TransposeInt8CPUKernel::Run() { memcpy(out_shape_, out_dims.data(), out_dims.size() * sizeof(int)); if (out_tensor->shape().size() > DIMENSION_6D) { - return static_cast(this->context_) - ->thread_pool_->ParallelLaunch(TransposeInt8Run, this, op_parameter_->thread_num_); + return ParallelLaunch(this->context_, TransposeInt8Run, this, op_parameter_->thread_num_); } else { return DoTransposeInt8(in_ptr_, out_ptr_, out_shape_, transpose_param_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc index 3ce7e490461..b263e3e30a3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.cc @@ -88,8 +88,7 @@ int UnsqueezeIn8Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) int Unsqueezeint8CPUKernel::Run() { in_ptr_ = reinterpret_cast(in_tensors_.at(0)->MutableData()); out_ptr_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); - auto ret = static_cast(this->context_) - ->thread_pool_->ParallelLaunch(UnsqueezeIn8Run, this, thread_sz_count_); + auto ret = ParallelLaunch(this->context_, UnsqueezeIn8Run, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "UnsqueezeRun error error_code[" << ret << "]"; return ret;