diff --git a/mindspore/lite/cmake/compile_link_option.cmake b/mindspore/lite/cmake/compile_link_option.cmake index 00489c7c23e..3ab0cef1ddd 100644 --- a/mindspore/lite/cmake/compile_link_option.cmake +++ b/mindspore/lite/cmake/compile_link_option.cmake @@ -13,7 +13,7 @@ else() set(CMAKE_C_FLAGS "-D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -Wno-attributes -Wno-deprecated-declarations \ -Wno-missing-braces ${SECURE_C_FLAGS} ${CMAKE_C_FLAGS}") set(CMAKE_CXX_FLAGS "-D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -Wno-attributes -Wno-deprecated-declarations \ - -Wno-missing-braces -Wno-overloaded-virtual -std=c++17 ${SECURE_CXX_FLAGS} ${CMAKE_CXX_FLAGS}") + -Wno-missing-braces -std=c++17 ${SECURE_CXX_FLAGS} ${CMAKE_CXX_FLAGS}") set(CMAKE_C_FLAGS_DEBUG "-DDebug -g -fvisibility=default") set(CMAKE_CXX_FLAGS_DEBUG "-DDebug -g -fvisibility=default") diff --git a/mindspore/lite/src/cxx_api/train/train_support.cc b/mindspore/lite/src/cxx_api/train/train_support.cc index 633ca613e66..3fe026c3343 100644 --- a/mindspore/lite/src/cxx_api/train/train_support.cc +++ b/mindspore/lite/src/cxx_api/train/train_support.cc @@ -67,7 +67,7 @@ std::shared_ptr CreateTrainSession(std::shared_ptrInit(context, &train_cfg); + auto ret = session->TrainInit(context, &train_cfg); if (ret != mindspore::lite::RET_OK) { MS_LOG(ERROR) << "init session failed"; return nullptr; diff --git a/mindspore/lite/src/inner_kernel.h b/mindspore/lite/src/inner_kernel.h index dc56c32f9d4..0349c8d0066 100644 --- a/mindspore/lite/src/inner_kernel.h +++ b/mindspore/lite/src/inner_kernel.h @@ -95,18 +95,6 @@ class InnerKernel : public Kernel { : schema::PrimitiveType_NONE; } - void set_inputs(const std::vector &in_tensors) { - this->in_tensors_.resize(in_tensors.size()); - (void)std::transform(in_tensors.begin(), in_tensors.end(), in_tensors_.begin(), - [](mindspore::tensor::MSTensor *tensor) { return static_cast(tensor); }); - } - - void set_outputs(const std::vector &out_tensors) { - this->out_tensors_.resize(out_tensors.size()); - (void)std::transform(out_tensors.begin(), out_tensors.end(), out_tensors_.begin(), - [](mindspore::tensor::MSTensor *tensor) { return static_cast(tensor); }); - } - const std::vector &inputs() override { if (inputs_.empty()) { std::transform(in_tensors_.begin(), in_tensors_.end(), std::back_inserter(inputs_), [](lite::Tensor *tensor) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc index d4a143be5b0..7985d091512 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc @@ -122,7 +122,7 @@ void ArithmeticFP16CPUKernel::TileConstTensor(const void *in_data, void *out_dat in_shape, in_strides, out_strides, multiple); } -int ArithmeticFP16CPUKernel::Execute(const void *input0, const void *input1, void *output, int size, bool is_opt) { +int ArithmeticFP16CPUKernel::DoExecute(const void *input0, const void *input1, void *output, int size, bool is_opt) { int ret = RET_OK; if (is_opt) { CHECK_NULL_RETURN(arithmetic_opt_func_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h index 629482a1965..1c314609f99 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h @@ -47,7 +47,7 @@ class ArithmeticFP16CPUKernel : public ArithmeticCPUKernel { int ConstTensorBroadCast() override; void TileConstTensor(const void *in_data, void *out_data, size_t ndim, const int *in_shape, const int *in_strides, const int *out_strides, const int *multiple) override; - int Execute(const void *input0, const void *input1, void *output, int size, bool is_opt) override; + int DoExecute(const void *input0, const void *input1, void *output, int size, bool is_opt) override; void FreeFp16Buffer(); ArithmeticFuncFp16 arithmetic_func_ = nullptr; ArithmeticOptFuncFp16 arithmetic_opt_func_ = nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_3x3_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_3x3_fp16.cc index 4846164516a..b9f55fa5335 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_3x3_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_3x3_fp16.cc @@ -94,7 +94,7 @@ int ConvolutionDepthwise3x3Fp16CPUKernel::ReSize() { return RET_OK; } -int ConvolutionDepthwise3x3Fp16CPUKernel::Execute(int task_id) { +int ConvolutionDepthwise3x3Fp16CPUKernel::DoExecute(int task_id) { int units = UP_DIV(conv_param_->output_w_, C2NUM); // F(2, 3) contains 2 conv units int c8 = UP_ROUND(conv_param_->input_channel_, C8NUM); auto buffer = buffer_ + C12NUM * c8 * units * task_id; @@ -108,7 +108,7 @@ int ConvolutionDepthwise3x3Fp16CPUKernel::Execute(int task_id) { int ConvDw3x3Fp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto conv_dw = reinterpret_cast(cdata); - auto ret = conv_dw->Execute(task_id); + auto ret = conv_dw->DoExecute(task_id); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvolutionDepthwise3x3Run error task_id[" << task_id << "] error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_3x3_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_3x3_fp16.h index 0b77b16cc01..125b96930b3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_3x3_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_3x3_fp16.h @@ -36,7 +36,7 @@ class ConvolutionDepthwise3x3Fp16CPUKernel : public ConvolutionBaseCPUKernel { int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: void PackWeight() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc index b0fa1ef2c67..856e32f5a7a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc @@ -90,7 +90,7 @@ int ConvolutionDepthwiseFp16CPUKernel::ReSize() { return RET_OK; } -int ConvolutionDepthwiseFp16CPUKernel::Execute(int task_id) { +int ConvolutionDepthwiseFp16CPUKernel::DoExecute(int task_id) { auto input_ptr = reinterpret_cast(in_tensors_.at(0)->data()); auto output_ptr = reinterpret_cast(out_tensors_.at(0)->data()); MS_ASSERT(input_ptr != nullptr); @@ -106,7 +106,7 @@ int ConvolutionDepthwiseFp16CPUKernel::Execute(int task_id) { static int ConvDwFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto conv_dw_fp16 = reinterpret_cast(cdata); - auto ret = conv_dw_fp16->Execute(task_id); + auto ret = conv_dw_fp16->DoExecute(task_id); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvolutionDepthwiseFp16Run error task_id[" << task_id << "] error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h index cdefc1456ba..f1bbf9fc5d3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h @@ -44,7 +44,7 @@ class ConvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseCPUKernel { int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: void PackWeight() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc index 9c68b451117..a429baad92b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc @@ -128,7 +128,7 @@ int ConvolutionDepthwiseSWFp16CPUKernel::ReSize() { return RET_OK; } -int ConvolutionDepthwiseSWFp16CPUKernel::Execute(int task_id) { +int ConvolutionDepthwiseSWFp16CPUKernel::DoExecute(int task_id) { ConvDwC8Fp16(packed_output_, packed_input_, reinterpret_cast(packed_weight_), reinterpret_cast(bias_data_), conv_param_, sliding_, task_id); return RET_OK; @@ -136,7 +136,7 @@ int ConvolutionDepthwiseSWFp16CPUKernel::Execute(int task_id) { static int ConvDwSWFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto conv_dw_fp16 = reinterpret_cast(cdata); - auto ret = conv_dw_fp16->Execute(task_id); + auto ret = conv_dw_fp16->DoExecute(task_id); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvolutionDepthwiseSWFp16Run error task_id[" << task_id << "] error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h index f732061a11a..8657ad83ec3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h @@ -46,7 +46,7 @@ class ConvolutionDepthwiseSWFp16CPUKernel : public ConvolutionBaseCPUKernel { int Run() override; int InitPackedInputOutput(); - int Execute(int task_id); + int DoExecute(int task_id); private: void PackWeight() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc index 9edf2ce18c0..630d1360ae8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc @@ -151,7 +151,7 @@ int DeconvolutionDepthwiseFp16CPUKernel::ReSize() { return RET_OK; } -int DeconvolutionDepthwiseFp16CPUKernel::Execute(int task_id) { +int DeconvolutionDepthwiseFp16CPUKernel::DoExecute(int task_id) { DeconvDwC8Fp16(packed_output_, packed_input_, reinterpret_cast(packed_weight_), reinterpret_cast(bias_data_), conv_param_, sliding_, task_id); return RET_OK; @@ -159,7 +159,7 @@ int DeconvolutionDepthwiseFp16CPUKernel::Execute(int task_id) { static int DeconvDwFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto deconv_dw_fp16 = reinterpret_cast(cdata); - auto ret = deconv_dw_fp16->Execute(task_id); + auto ret = deconv_dw_fp16->DoExecute(task_id); if (ret != RET_OK) { MS_LOG(ERROR) << "DeconvolutionDepthwiseFp16Run error task_id[" << task_id << "] error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h index 5f4e7cfc171..c3769730ad1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h @@ -48,7 +48,7 @@ class DeconvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseCPUKernel { int InitPackedInputOutput(); int InitSlideParam(); - int Execute(int task_id); + int DoExecute(int task_id); private: int MallocWeightBiasData() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.cc index 7bacce70707..d927a6e9a14 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.cc @@ -83,7 +83,7 @@ int StackFp16CPUKernel::Prepare() { return ReSize(); } -int StackFp16CPUKernel::Execute(int task_id) { +int StackFp16CPUKernel::DoExecute(int task_id) { auto inputs = buffers_.data(); void *output_data = reinterpret_cast(out_buffer_); auto step = UP_DIV(outer_size_, num_threads_); @@ -99,7 +99,7 @@ int StackFp16CPUKernel::Execute(int task_id) { static int StackRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto stack = reinterpret_cast(cdata); - if (stack->Execute(task_id) != RET_OK) { + if (stack->DoExecute(task_id) != RET_OK) { return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.h index 2aa3d8278a1..37a5eb19010 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.h @@ -29,7 +29,7 @@ class StackFp16CPUKernel : public StackBaseCPUKernel { ~StackFp16CPUKernel() override = default; int Prepare() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: void InitMallocFlags(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_grad.cc index f44461759d7..e1c35eed04b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_grad.cc @@ -72,7 +72,7 @@ int ArithmeticGradCPUKernelFp16::ArithmeticGradMinimum(float16_t *dy, int dy_siz int ArithmeticGradCPUKernelFp16::ReSize() { return RET_OK; } -int ArithmeticGradCPUKernelFp16::Execute(int task_id) { +int ArithmeticGradCPUKernelFp16::DoExecute(int task_id) { auto dy = reinterpret_cast(in_tensors_[0]->data()); auto dx1 = reinterpret_cast(out_tensors_[0]->data()); auto dx2 = reinterpret_cast(out_tensors_[1]->data()); @@ -89,7 +89,7 @@ int ArithmeticGradCPUKernelFp16::Execute(int task_id) { int ArithmeticGradRunFp16(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto Arithmetic_kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(Arithmetic_kernel); - auto error_code = Arithmetic_kernel->Execute(task_id); + auto error_code = Arithmetic_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "ArithmeticGradRunFp16 error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_grad.h index 5f34ee358f3..4d8f0ef3f59 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_grad.h @@ -64,7 +64,7 @@ class ArithmeticGradCPUKernelFp16 : public InnerKernel { int InferShape(); int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int ArithmeticGradMaximum(float16_t *dy, int dy_size, float16_t *dx1, int dx1_size, float16_t *dx2, int dx2_size); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bias_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bias_fp16_grad.cc index b1795214b89..5577ee831e9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bias_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bias_fp16_grad.cc @@ -56,7 +56,7 @@ int BiasGradCPUKernelFp16::Prepare() { return ReSize(); } -int BiasGradCPUKernelFp16::Execute(int task_id) { +int BiasGradCPUKernelFp16::DoExecute(int task_id) { auto in = reinterpret_cast(in_tensors_.at(0)->data()); auto out = reinterpret_cast(out_tensors_.at(0)->data()); CHECK_NULL_RETURN(in); @@ -82,7 +82,7 @@ int BiasGradCPUKernelFp16::Execute(int task_id) { int BiasGradFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto bias_kernel = reinterpret_cast(cdata); - auto error_code = bias_kernel->Execute(task_id); + auto error_code = bias_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "bias error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bias_fp16_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bias_fp16_grad.h index c7a508fddd4..e3318944759 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bias_fp16_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bias_fp16_grad.h @@ -34,7 +34,7 @@ class BiasGradCPUKernelFp16 : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: ArithmeticParameter *bias_param; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bn_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bn_fp16_grad.cc index 9b923ea2460..822e283d31b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bn_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bn_fp16_grad.cc @@ -72,7 +72,7 @@ int BNGradCPUKernelFp16::Prepare() { return ReSize(); } -int BNGradCPUKernelFp16::Execute(int task_id) { +int BNGradCPUKernelFp16::DoExecute(int task_id) { auto *input_yt = in_tensors_.at(kNumInputDim_0); auto *input_x = in_tensors_.at(kNumInputDim_1); auto *input_scale = in_tensors_.at(kNumInputDim_2); @@ -153,7 +153,7 @@ int BNGradCPUKernelFp16::Execute(int task_id) { int BNGradFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto bn_kernel = reinterpret_cast(cdata); - auto error_code = bn_kernel->Execute(task_id); + auto error_code = bn_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "BNGradRun error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bn_fp16_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bn_fp16_grad.h index 40f81b38b7e..34cafb307f4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bn_fp16_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/bn_fp16_grad.h @@ -32,7 +32,7 @@ class BNGradCPUKernelFp16 : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int thread_num_ = 1; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_filter.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_filter.cc index 1311b56dd22..625987e8489 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_filter.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_filter.cc @@ -79,7 +79,7 @@ int ConvolutionGradFilterCPUKernelFp16::ReSize() { int ConvolutionGradFilterCPUKernelFp16::Prepare() { return ReSize(); } -int ConvolutionGradFilterCPUKernelFp16::Execute(int task_id) { +int ConvolutionGradFilterCPUKernelFp16::DoExecute(int task_id) { auto conv_param = reinterpret_cast(op_parameter_); CHECK_NULL_RETURN(conv_param); auto *input_dy = in_tensors_.at(0); @@ -185,7 +185,7 @@ int ConvolutionGradFilterCPUKernelFp16::Execute(int task_id) { int ConvolutionGradFilterFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto convfilter_kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(convfilter_kernel); - auto error_code = convfilter_kernel->Execute(task_id); + auto error_code = convfilter_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "ConvolutionGradFilterRun error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_filter.h b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_filter.h index 816f63cdf15..e748597f3b2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_filter.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_filter.h @@ -32,7 +32,7 @@ class ConvolutionGradFilterCPUKernelFp16 : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: size_t ws_size_ = 0; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.cc index d07a3bfe6b3..138bbbba795 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.cc @@ -75,7 +75,7 @@ int ConvolutionGradInputCPUKernelFp16::ReSize() { int ConvolutionGradInputCPUKernelFp16::Prepare() { return ReSize(); } -int ConvolutionGradInputCPUKernelFp16::Execute(int task_id) { +int ConvolutionGradInputCPUKernelFp16::DoExecute(int task_id) { auto conv_param = reinterpret_cast(op_parameter_); auto *input_dy = in_tensors_.at(0); auto *input_w = in_tensors_.at(1); @@ -156,7 +156,7 @@ int ConvolutionGradInputCPUKernelFp16::Execute(int task_id) { int ConvolutionGradInputFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { MS_ASSERT(cdata != nullptr); auto convinput_kernel = reinterpret_cast(cdata); - auto error_code = convinput_kernel->Execute(task_id); + auto error_code = convinput_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "conv input error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.h b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.h index 27adae8e0d1..5ba2297a548 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.h @@ -31,7 +31,7 @@ class ConvolutionGradInputCPUKernelFp16 : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: size_t ws_size_ = 0; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/dropout_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/dropout_fp16_grad.cc index 12a01b0e295..14bcfb42d50 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/dropout_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/dropout_fp16_grad.cc @@ -61,7 +61,7 @@ int DropoutGradCPUKernelFp16::Prepare() { int DropoutGradCPUKernelFp16::ReSize() { return RET_OK; } -int DropoutGradCPUKernelFp16::Execute(int task_id) { +int DropoutGradCPUKernelFp16::DoExecute(int task_id) { auto yt_ptr = reinterpret_cast(in_tensors_.at(kInputIndex)->data()); auto mask_ptr = reinterpret_cast(in_tensors_.at(1)->data()); auto output_ptr = reinterpret_cast(out_tensors_.at(kOutputIndex)->data()); @@ -81,7 +81,7 @@ int DropoutGradCPUKernelFp16::Execute(int task_id) { int RunDropoutFp16Grad(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto dropout = reinterpret_cast(cdata); CHECK_NULL_RETURN(dropout); - auto error_code = dropout->Execute(task_id); + auto error_code = dropout->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "Dropout Grad Run error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/dropout_fp16_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/dropout_fp16_grad.h index b3663e2e190..8c49e7cd898 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/dropout_fp16_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/dropout_fp16_grad.h @@ -31,7 +31,7 @@ class DropoutGradCPUKernelFp16 : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: float scale_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/layernorm_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/layernorm_fp16_grad.cc index d6636cab575..95b7cfa4160 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/layernorm_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/layernorm_fp16_grad.cc @@ -83,7 +83,7 @@ int LayerNormGradCPUKernelFp16::Prepare() { return RET_OK; } -int LayerNormGradCPUKernelFp16::Execute(int task_id) { +int LayerNormGradCPUKernelFp16::DoExecute(int task_id) { auto input_x = in_tensors_.at(kNumInputDim_0); auto input_dy = in_tensors_.at(kNumInputDim_1); auto input_var = in_tensors_.at(kNumInputDim_2); @@ -117,7 +117,7 @@ int LayerNormF16GradRun(void *cdata, int task_id, float lhs_scale, float rhs_sca CHECK_NULL_RETURN(cdata); auto ln_kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(ln_kernel); - auto error_code = ln_kernel->Execute(task_id); + auto error_code = ln_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "LayerNormGradRun error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/layernorm_fp16_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/layernorm_fp16_grad.h index 71dbd7e7f93..8413f972f93 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/layernorm_fp16_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/layernorm_fp16_grad.h @@ -31,7 +31,7 @@ class LayerNormGradCPUKernelFp16 : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int block_num_ = 1; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/pooling_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/pooling_fp16_grad.cc index 399b6f0d118..1610259875b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/pooling_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/pooling_fp16_grad.cc @@ -72,7 +72,7 @@ int PoolingGradCPUKernelFp16::ReSize() { int PoolingGradCPUKernelFp16::Prepare() { return ReSize(); } -int PoolingGradCPUKernelFp16::Execute(int task_id) { +int PoolingGradCPUKernelFp16::DoExecute(int task_id) { PoolingParameter *pool_param = reinterpret_cast(op_parameter_); auto input_ptr = reinterpret_cast(in_tensors_.at(0)->data()); CHECK_NULL_RETURN(input_ptr); @@ -102,7 +102,7 @@ int PoolingGradCPUKernelFp16::Execute(int task_id) { int PoolingFp16GradImpl(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto pooling = reinterpret_cast(cdata); - auto error_code = pooling->Execute(task_id); + auto error_code = pooling->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "Pooling Run error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/pooling_fp16_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/pooling_fp16_grad.h index 5f9ebf83472..ab76995e596 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/pooling_fp16_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/pooling_fp16_grad.h @@ -36,7 +36,7 @@ class PoolingGradCPUKernelFp16 : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int thread_num_ = 1; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/resize_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/resize_fp16_grad.cc index 5df6dc1289f..f7683002ef2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/resize_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/resize_fp16_grad.cc @@ -62,7 +62,7 @@ int ResizeGradCPUKernelFp16::Prepare() { return ReSize(); } -int ResizeGradCPUKernelFp16::Execute(int task_id) { +int ResizeGradCPUKernelFp16::DoExecute(int task_id) { auto in_addr = reinterpret_cast(in_tensors_.at(0)->data()); auto out_addr = reinterpret_cast(out_tensors_.at(0)->data()); CHECK_NULL_RETURN(in_addr); @@ -91,7 +91,7 @@ int ResizeGradCPUKernelFp16::Execute(int task_id) { int ResizeFp16GradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto resize_grad_kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(resize_grad_kernel); - auto error_code = resize_grad_kernel->Execute(task_id); + auto error_code = resize_grad_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "resize grad error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/resize_fp16_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/resize_fp16_grad.h index a83e736e025..41ac9166add 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/resize_fp16_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/resize_fp16_grad.h @@ -31,7 +31,7 @@ class ResizeGradCPUKernelFp16 : public InnerKernel { int ReSize() override; int Run() override; int ExecuteInit(int task_id); - int Execute(int task_id); + int DoExecute(int task_id); }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/strided_slice_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/strided_slice_fp16_grad.cc index 011c5a7c705..ca10eb8e810 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/strided_slice_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/strided_slice_fp16_grad.cc @@ -114,7 +114,7 @@ int StridedSliceGradCPUKernelFp16::ReSize() { return RET_OK; } int StridedSliceFp16GradImpl(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto slice = reinterpret_cast(cdata); - auto error_code = slice->Execute(task_id); + auto error_code = slice->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "StridedSliceGrad Run error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; @@ -131,7 +131,7 @@ int StridedSliceGradCPUKernelFp16::Run() { return RET_OK; } -int StridedSliceGradCPUKernelFp16::Execute(int task_id) { +int StridedSliceGradCPUKernelFp16::DoExecute(int task_id) { auto input = in_tensors_.at(0); auto output = out_tensors_.at(0); int *po = output_shape_.data(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/strided_slice_fp16_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/strided_slice_fp16_grad.h index 1ee64e445e8..c77e21a20b5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/strided_slice_fp16_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/strided_slice_fp16_grad.h @@ -34,7 +34,7 @@ class StridedSliceGradCPUKernelFp16 : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: void FillEmptyDims(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/unsorted_segment_sum_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/unsorted_segment_sum_fp16.cc index 9342688d503..3983d581a57 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/unsorted_segment_sum_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/unsorted_segment_sum_fp16.cc @@ -63,7 +63,7 @@ int UnsortedSegmentSumFp16Run(void *cdata, int task_id, float lhs_scale, float r CHECK_NULL_RETURN(cdata); auto kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(kernel); - auto error_code = kernel->Execute(task_id); + auto error_code = kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "UnsortedSegmentSum Run error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; @@ -80,7 +80,7 @@ int UnsortedSegmentSumCPUKernelFp16::Run() { return RET_OK; } -int UnsortedSegmentSumCPUKernelFp16::Execute(int task_id) { +int UnsortedSegmentSumCPUKernelFp16::DoExecute(int task_id) { auto input_tensor = in_tensors_.at(0); auto indices_tensor = in_tensors_.at(1); auto output_tensor = out_tensors_.at(0); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/unsorted_segment_sum_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/unsorted_segment_sum_fp16.h index 6ebce829612..1ecc35d1f94 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/unsorted_segment_sum_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/unsorted_segment_sum_fp16.h @@ -31,7 +31,7 @@ class UnsortedSegmentSumCPUKernelFp16 : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); size_t unit_num_ = 0; size_t input_dim1_ = 0; size_t output_dim0_ = 0; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_compare_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_compare_fp32.cc index 81a2baf5cc1..834f7ec0f15 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_compare_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_compare_fp32.cc @@ -51,7 +51,7 @@ void ArithmeticCompareCPUKernel::InitRunFunction(int primitive_type) { } } -int ArithmeticCompareCPUKernel::Execute(const void *input0, const void *input1, void *output, int size, bool is_opt) { +int ArithmeticCompareCPUKernel::DoExecute(const void *input0, const void *input1, void *output, int size, bool is_opt) { int ret = RET_OK; if (in_tensors_[0]->data_type() == kNumberTypeFloat32) { if (is_opt) { @@ -95,9 +95,9 @@ int ArithmeticCompareCPUKernel::CalcArithmeticByBatch(int task_id) { batch_b_ptr_ = static_cast(input1_ptr_) + b_offset_[i] * b_stride_size_ * data_type_len_; batch_c_ptr_ = static_cast(output_ptr_) + i * c_stride_size_ * sizeof(uint8_t); if (batch_scalar_) { - ret = Execute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, true); + ret = DoExecute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, true); } else { - ret = Execute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, false); + ret = DoExecute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, false); } if (ret != RET_OK) { MS_LOG(ERROR) << "failed to calculate."; @@ -124,12 +124,12 @@ int ArithmeticCompareCPUKernel::DoArithmetic(int task_id) { int out_offset = stride * task_id * sizeof(uint8_t); if (scalar_) { if (param_->in_elements_num0_ == 1) { - ret = Execute(batch_a_ptr_, batch_b_ptr_ + in_offset, batch_c_ptr_ + out_offset, count, true); + ret = DoExecute(batch_a_ptr_, batch_b_ptr_ + in_offset, batch_c_ptr_ + out_offset, count, true); } else { - ret = Execute(batch_a_ptr_ + in_offset, batch_b_ptr_, batch_c_ptr_ + out_offset, count, true); + ret = DoExecute(batch_a_ptr_ + in_offset, batch_b_ptr_, batch_c_ptr_ + out_offset, count, true); } } else { - ret = Execute(batch_a_ptr_ + in_offset, batch_b_ptr_ + in_offset, batch_c_ptr_ + out_offset, count, false); + ret = DoExecute(batch_a_ptr_ + in_offset, batch_b_ptr_ + in_offset, batch_c_ptr_ + out_offset, count, false); } return ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_compare_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_compare_fp32.h index 84ddad3a22d..c7daa87df55 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_compare_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_compare_fp32.h @@ -45,7 +45,7 @@ class ArithmeticCompareCPUKernel : public ArithmeticCPUKernel { protected: void InitRunFunction(int primitive_type) override; - int Execute(const void *input0, const void *input1, void *output, int size, bool is_opt) override; + int DoExecute(const void *input0, const void *input1, void *output, int size, bool is_opt) override; int CalcArithmeticByBatch(int task_id) override; private: diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc index b24cde5b17b..bcc9b63e0a8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc @@ -328,7 +328,7 @@ void ArithmeticCPUKernel::InitRunFunction(int primitive_type) { } } -int ArithmeticCPUKernel::Execute(const void *input0, const void *input1, void *output, int size, bool is_opt) { +int ArithmeticCPUKernel::DoExecute(const void *input0, const void *input1, void *output, int size, bool is_opt) { int ret = RET_OK; if (in_tensors_[0]->data_type() == kNumberTypeFloat32) { if (is_opt) { @@ -374,9 +374,9 @@ int ArithmeticCPUKernel::CalcArithmeticByBatch(int task_id) { batch_b_ptr_ = static_cast(input1_ptr_) + b_offset_[i] * b_stride_size_ * data_type_len_; batch_c_ptr_ = static_cast(output_ptr_) + i * c_stride_size_ * data_type_len_; if (batch_scalar_) { - ret = Execute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, true); + ret = DoExecute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, true); } else { - ret = Execute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, false); + ret = DoExecute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, false); } if (ret != RET_OK) { MS_LOG(ERROR) << "failed to calculate."; @@ -402,12 +402,12 @@ int ArithmeticCPUKernel::DoArithmetic(int task_id) { int offset = stride * task_id * data_type_len_; if (scalar_) { if (param_->in_elements_num0_ == 1) { - ret = Execute(batch_a_ptr_, batch_b_ptr_ + offset, batch_c_ptr_ + offset, count, true); + ret = DoExecute(batch_a_ptr_, batch_b_ptr_ + offset, batch_c_ptr_ + offset, count, true); } else { - ret = Execute(batch_a_ptr_ + offset, batch_b_ptr_, batch_c_ptr_ + offset, count, true); + ret = DoExecute(batch_a_ptr_ + offset, batch_b_ptr_, batch_c_ptr_ + offset, count, true); } } else { - ret = Execute(batch_a_ptr_ + offset, batch_b_ptr_ + offset, batch_c_ptr_ + offset, count, false); + ret = DoExecute(batch_a_ptr_ + offset, batch_b_ptr_ + offset, batch_c_ptr_ + offset, count, false); } return ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.h index 964eee6b3f8..eafd3564ee4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.h @@ -82,7 +82,7 @@ class ArithmeticCPUKernel : public InnerKernel { virtual int ConstTensorBroadCast(); virtual void TileConstTensor(const void *in_data, void *out_data, size_t ndim, const int *in_shape, const int *in_strides, const int *out_strides, const int *multiple); - virtual int Execute(const void *input0, const void *input1, void *output, int size, bool is_opt); + virtual int DoExecute(const void *input0, const void *input1, void *output, int size, bool is_opt); virtual bool IsBatchScalarCalc(); virtual bool IsScalarClac(); virtual int CalcArithmeticByBatch(int task_id); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.cc index 142bf9b1198..c9fceb5c611 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.cc @@ -56,7 +56,7 @@ int ConvolutionDepthwise3x3CPUKernel::ReSize() { return RET_OK; } -int ConvolutionDepthwise3x3CPUKernel::Execute(int task_id) { +int ConvolutionDepthwise3x3CPUKernel::DoExecute(int task_id) { int units = UP_DIV(conv_param_->output_w_, C2NUM); // F(2, 3) contains 2 conv units int c4 = UP_ROUND(conv_param_->input_channel_, C4NUM); auto buffer = buffer_ + C12NUM * c4 * units * task_id; @@ -74,7 +74,7 @@ int ConvolutionDepthwise3x3CPUKernel::Execute(int task_id) { int ConvDw3x3Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto conv_dw = reinterpret_cast(cdata); - auto ret = conv_dw->Execute(task_id); + auto ret = conv_dw->DoExecute(task_id); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvolutionDepthwise3x3Run error task_id[" << task_id << "] error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.h index 9e79bdc52b8..40fb17ef4e3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.h @@ -36,7 +36,7 @@ class ConvolutionDepthwise3x3CPUKernel : public ConvolutionBaseCPUKernel { int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int MallocWeightBiasData() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.cc index 0a33f002e33..5c8c98527b4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.cc @@ -62,7 +62,7 @@ int ConvolutionDepthwiseCPUKernel::ReSize() { return RET_OK; } -int ConvolutionDepthwiseCPUKernel::Execute(int task_id) { +int ConvolutionDepthwiseCPUKernel::DoExecute(int task_id) { auto ret = ConvDw(output_ptr_, input_ptr_, reinterpret_cast(packed_weight_), reinterpret_cast(bias_data_), conv_param_, task_id); return ret; @@ -70,7 +70,7 @@ int ConvolutionDepthwiseCPUKernel::Execute(int task_id) { int ConvDwRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto conv_dw = reinterpret_cast(cdata); - auto ret = conv_dw->Execute(task_id); + auto ret = conv_dw->DoExecute(task_id); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvolutionDepthwiseRun error task_id[" << task_id << "] error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.h index 59acac85120..3e4092063c5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.h @@ -36,7 +36,7 @@ class ConvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel { int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int MallocWeightBiasData() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.cc index add7cf2eb2d..91e96b15d07 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.cc @@ -101,7 +101,7 @@ int ConvolutionDepthwiseIndirectCPUKernel::ReSize() { return RET_OK; } -int ConvolutionDepthwiseIndirectCPUKernel::Execute(int task_id) { +int ConvolutionDepthwiseIndirectCPUKernel::DoExecute(int task_id) { ConvDwIndirection(output_ptr_, indirect_buffer_, reinterpret_cast(packed_weight_), reinterpret_cast(bias_data_), zero_ptr_, conv_param_, task_id); return RET_OK; @@ -109,7 +109,7 @@ int ConvolutionDepthwiseIndirectCPUKernel::Execute(int task_id) { int ConvDwIndirectRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto conv_dw = reinterpret_cast(cdata); - auto ret = conv_dw->Execute(task_id); + auto ret = conv_dw->DoExecute(task_id); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvolutionDepthwiseIndirectRun error task_id[" << task_id << "] error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h index a92eda69e7f..ecccdd47eb0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h @@ -35,7 +35,7 @@ class ConvolutionDepthwiseIndirectCPUKernel : public ConvolutionBaseCPUKernel { int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int MallocIndirectBuffer(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.cc index fe00545dadf..c66cfeb92bf 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.cc @@ -92,7 +92,7 @@ int ConvolutionDepthwiseSWCPUKernel::ReSize() { return RET_OK; } -int ConvolutionDepthwiseSWCPUKernel::Execute(int task_id) { +int ConvolutionDepthwiseSWCPUKernel::DoExecute(int task_id) { ConvDwSWFp32(packed_output_, packed_input_, reinterpret_cast(packed_weight_), reinterpret_cast(bias_data_), conv_param_, sliding_, task_id); return RET_OK; @@ -100,7 +100,7 @@ int ConvolutionDepthwiseSWCPUKernel::Execute(int task_id) { int ConvDwSWRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto conv_dw = reinterpret_cast(cdata); - auto ret = conv_dw->Execute(task_id); + auto ret = conv_dw->DoExecute(task_id); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvolutionDepthwiseSWRun error task_id[" << task_id << "] error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h index 1fabaa102cb..4c62e8cf83a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h @@ -35,7 +35,7 @@ class ConvolutionDepthwiseSWCPUKernel : public ConvolutionBaseCPUKernel { int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int InitPackedInputOutput(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.cc index 5a9ab1f3e56..260adf91c9b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.cc @@ -115,7 +115,7 @@ int DeconvolutionDepthwiseCPUKernel::ReSize() { return RET_OK; } -int DeconvolutionDepthwiseCPUKernel::Execute(int task_id) { +int DeconvolutionDepthwiseCPUKernel::DoExecute(int task_id) { DeconvDwSWFp32(packed_output_, packed_input_, reinterpret_cast(packed_weight_), reinterpret_cast(bias_data_), conv_param_, sliding_, task_id); return RET_OK; @@ -123,7 +123,7 @@ int DeconvolutionDepthwiseCPUKernel::Execute(int task_id) { int DeconvDwRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto deconv_dw = reinterpret_cast(cdata); - auto ret = deconv_dw->Execute(task_id); + auto ret = deconv_dw->DoExecute(task_id); if (ret != RET_OK) { MS_LOG(ERROR) << "DeconvolutionDepthwiseRun error task_id[" << task_id << "] error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.h index 8c2e76ba353..015226cfb34 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.h @@ -35,7 +35,7 @@ class DeconvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel { int InitSlideParam(); int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int InitPackedInputOutput(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc index 6b1194ae7de..f4a7f235d64 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc @@ -71,7 +71,7 @@ static int DoAdam(float *m, float *v, const float *gradient, float *weight, floa return RET_OK; } -int AdamCPUKernel::Execute(int task_id) { +int AdamCPUKernel::DoExecute(int task_id) { CHECK_LESS_RETURN(in_tensors_.size(), DIMENSION_10D); auto weight = reinterpret_cast(in_tensors_.at(kWeightIdx)->MutableData()); auto m = reinterpret_cast(in_tensors_.at(kMomentVector1stIdx)->MutableData()); @@ -107,7 +107,7 @@ int AdamRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } else if (adam_kernel->get_optimizer_mode() == WeightUpdateMode::ACCUMULATE_GRADS) { error_code = adam_kernel->ExecuteVirtualBatch(task_id); } else { - error_code = adam_kernel->Execute(task_id); + error_code = adam_kernel->DoExecute(task_id); } if (error_code != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.h index 707c2d595bd..bbd0c073471 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.h @@ -38,7 +38,7 @@ class AdamCPUKernel : public OptimizerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); int OptimizerStep() override; std::vector GetOptimizerParamsIdxs() const override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc index b2e9f0e2d4a..829a5949fc1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc @@ -45,7 +45,7 @@ static int DoApplyMomentum(float *weight, float *accumulate, float learning_rate return RET_OK; } -int ApplyMomentumCPUKernel::Execute(int task_id) { +int ApplyMomentumCPUKernel::DoExecute(int task_id) { CHECK_LESS_RETURN(in_tensors_.size(), DIMENSION_5D); auto weight = reinterpret_cast(in_tensors_.at(FIRST_INPUT)->data()); CHECK_NULL_RETURN(weight); @@ -79,7 +79,7 @@ int ApplyMomentumRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) } else if (applyMomentum_kernel->get_optimizer_mode() == WeightUpdateMode::ACCUMULATE_GRADS) { error_code = applyMomentum_kernel->ExecuteVirtualBatch(task_id); } else { - error_code = applyMomentum_kernel->Execute(task_id); + error_code = applyMomentum_kernel->DoExecute(task_id); } if (error_code != RET_OK) { MS_LOG(ERROR) << "apply Momentum run error task_id[" << task_id << "] error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.h index c5f93b474e7..a231036f1fc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.h @@ -39,7 +39,7 @@ class ApplyMomentumCPUKernel : public OptimizerKernel { } int Prepare() override; int ReSize() override; - int Execute(int task_id); + int DoExecute(int task_id); int Run() override; int OptimizerStep() override; std::vector GetOptimizerParamsIdxs() const override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc index 2ba07e5645b..6ffd7897cd8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc @@ -247,7 +247,7 @@ int ArithmeticGradCPUKernel::ArithmeticGradMinimum(float *dy, int dy_size, float int ArithmeticGradCPUKernel::ReSize() { return RET_OK; } -int ArithmeticGradCPUKernel::Execute(int task_id) { +int ArithmeticGradCPUKernel::DoExecute(int task_id) { auto dy = reinterpret_cast(in_tensors_[kDyIdx]->MutableData()); auto dx1 = reinterpret_cast(out_tensors_[kDx1Idx]->MutableData()); auto dx2 = reinterpret_cast(out_tensors_[kDx2Idx]->MutableData()); @@ -265,7 +265,7 @@ int ArithmeticGradCPUKernel::Execute(int task_id) { int ArithmeticGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto Arithmetic_kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(Arithmetic_kernel); - auto error_code = Arithmetic_kernel->Execute(task_id); + auto error_code = Arithmetic_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "ArithmeticGradRun error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h index 5036a202003..34cd44ca2ea 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h @@ -79,7 +79,7 @@ class ArithmeticGradCPUKernel : public InnerKernel { int InferShape(); int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int ArithmeticGradAdd(float *dy, int dy_size, float *dx1, int dx1_size, float *dx2, int dx2_size); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc index 65d5aefda7b..859949cb114 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc @@ -29,7 +29,7 @@ using mindspore::schema::PrimitiveType_Assign; namespace mindspore::kernel { int AssignCPUKernel::ReSize() { return RET_OK; } -int AssignCPUKernel::Execute(int task_id) { +int AssignCPUKernel::DoExecute(int task_id) { auto x = reinterpret_cast(in_tensors_.at(0)->MutableData()); CHECK_NULL_RETURN(x); auto y = reinterpret_cast(in_tensors_.at(1)->MutableData()); @@ -49,7 +49,7 @@ int AssignCPUKernel::Execute(int task_id) { int AssignRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto Assign_kernel = reinterpret_cast(cdata); - auto error_code = Assign_kernel->Execute(task_id); + auto error_code = Assign_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "assign run error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.h index 030ec537fb3..77fc8161597 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.h @@ -31,7 +31,7 @@ class AssignCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); protected: int thread_count_ = 1; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc index 98dbb664590..f2dcb63b315 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc @@ -54,7 +54,7 @@ int BiasGradCPUKernel::Prepare() { return ReSize(); } -int BiasGradCPUKernel::Execute(int task_id) { +int BiasGradCPUKernel::DoExecute(int task_id) { auto in = reinterpret_cast(in_tensors_.at(0)->MutableData()); auto out = reinterpret_cast(out_tensors_.at(0)->MutableData()); CHECK_NULL_RETURN(in); @@ -79,7 +79,7 @@ int BiasGradCPUKernel::Execute(int task_id) { int BiasGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto bias_kernel = reinterpret_cast(cdata); - auto error_code = bias_kernel->Execute(task_id); + auto error_code = bias_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "bias error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h index 92b4cb81372..8c5bad72951 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h @@ -34,7 +34,7 @@ class BiasGradCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: ArithmeticParameter *bias_param; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc index 71696cec12d..98e3a4514dc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc @@ -57,7 +57,7 @@ int BNGradCPUKernel::Prepare() { return ReSize(); } -int BNGradCPUKernel::Execute(int task_id) { +int BNGradCPUKernel::DoExecute(int task_id) { auto *input_yt = in_tensors_.at(0); auto *input_x = in_tensors_.at(1); auto *input_scale = in_tensors_.at(2); @@ -150,7 +150,7 @@ int BNGradCPUKernel::Execute(int task_id) { int BNGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto bn_kernel = reinterpret_cast(cdata); - auto error_code = bn_kernel->Execute(task_id); + auto error_code = bn_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "BNGradRun error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h index 7aa46d1a2be..232d5854cf0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h @@ -31,7 +31,7 @@ class BNGradCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int thread_num_ = 1; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc index 3bf70532c6b..d8561101c6b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc @@ -72,7 +72,7 @@ int ConvolutionTrainCPUKernel::ReSize() { int ConvolutionTrainCPUKernel::Prepare() { return ReSize(); } -int ConvolutionTrainCPUKernel::Execute(int task_id) { +int ConvolutionTrainCPUKernel::DoExecute(int task_id) { auto conv_param_ = reinterpret_cast(op_parameter_); auto *input_x = in_tensors_.at(kInputIndex); auto *input_w = in_tensors_.at(kWeightIndex); @@ -154,7 +154,7 @@ int ConvolutionTrainCPUKernel::Execute(int task_id) { int ConvolutionTrainRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { MS_ASSERT(cdata != nullptr); auto conv_kernel = reinterpret_cast(cdata); - auto error_code = conv_kernel->Execute(task_id); + auto error_code = conv_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "ConvolutionTrainRun error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h index ac9779341e7..d97f123b6a6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h @@ -31,7 +31,7 @@ class ConvolutionTrainCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int ws_size_ = 0; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc index 2228687d965..e268b2cc786 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc @@ -81,7 +81,7 @@ int ConvolutionGradFilterCPUKernel::ReSize() { int ConvolutionGradFilterCPUKernel::Prepare() { return ReSize(); } -int ConvolutionGradFilterCPUKernel::Execute(int task_id) { +int ConvolutionGradFilterCPUKernel::DoExecute(int task_id) { auto conv_param = reinterpret_cast(op_parameter_); CHECK_NULL_RETURN(conv_param); auto *input_dy = in_tensors_.at(kDyIdx); @@ -187,7 +187,7 @@ int ConvolutionGradFilterCPUKernel::Execute(int task_id) { int ConvolutionGradFilterRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto convfilter_kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(convfilter_kernel); - auto error_code = convfilter_kernel->Execute(task_id); + auto error_code = convfilter_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "ConvolutionGradFilterRun error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h index fd97a888c14..91491e7d8b3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h @@ -32,7 +32,7 @@ class ConvolutionGradFilterCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: size_t ws_size_ = 0; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc index 1568dff8c92..0957e4ba5a4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc @@ -76,7 +76,7 @@ int ConvolutionGradInputCPUKernel::ReSize() { int ConvolutionGradInputCPUKernel::Prepare() { return ReSize(); } -int ConvolutionGradInputCPUKernel::Execute(int task_id) { +int ConvolutionGradInputCPUKernel::DoExecute(int task_id) { auto conv_param = reinterpret_cast(op_parameter_); auto *input_dy = in_tensors_.at(0); auto *input_w = in_tensors_.at(1); @@ -160,7 +160,7 @@ int ConvolutionGradInputCPUKernel::Execute(int task_id) { int ConvolutionGradInputRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { MS_ASSERT(cdata != nullptr); auto convinput_kernel = reinterpret_cast(cdata); - auto error_code = convinput_kernel->Execute(task_id); + auto error_code = convinput_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "conv input error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h index 610a7b20cd4..5f81d79612d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h @@ -31,7 +31,7 @@ class ConvolutionGradInputCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: size_t ws_size_ = 0; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc index a7b52616939..46ff4ceb6b8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc @@ -70,7 +70,7 @@ int DeConvolutionGradFilterCPUKernel::Prepare() { int DeConvolutionGradFilterCPUKernel::ReSize() { return RET_OK; } -int DeConvolutionGradFilterCPUKernel::Execute(int task_id) { +int DeConvolutionGradFilterCPUKernel::DoExecute(int task_id) { auto conv_param = reinterpret_cast(op_parameter_); auto *input_dy = in_tensors_.at(0); auto *input_x = in_tensors_.at(1); @@ -124,7 +124,7 @@ int DeConvolutionGradFilterCPUKernel::Execute(int task_id) { int DeConvolutionGradFilterRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto convfilter_kernel = reinterpret_cast(cdata); - auto error_code = convfilter_kernel->Execute(task_id); + auto error_code = convfilter_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "DeConvolutionGradFilterRun error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h index c0f0498fa55..002439d7290 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h @@ -31,7 +31,7 @@ class DeConvolutionGradFilterCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: size_t ws_size = 0; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc index f0696ce1af5..42e3638aa37 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc @@ -55,7 +55,7 @@ int DropoutCPUKernel::Prepare() { int DropoutCPUKernel::ReSize() { return RET_OK; } -int DropoutCPUKernel::Execute(int task_id) { +int DropoutCPUKernel::DoExecute(int task_id) { auto input_ptr = reinterpret_cast(in_tensors_.at(kInputIndex)->MutableData()); auto output_ptr = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); auto mask = reinterpret_cast(out_tensors_.at(1)->MutableData()); @@ -90,7 +90,7 @@ int DropoutCPUKernel::Execute(int task_id) { int RunDropout(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto dropout = reinterpret_cast(cdata); - auto error_code = dropout->Execute(task_id); + auto error_code = dropout->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "Dropout Run error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.h index 79cf4546aec..c6d149f2da9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.h @@ -31,7 +31,7 @@ class DropoutCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: float scale_ = 1.0; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc index f8af58a737a..4f40397679b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc @@ -65,7 +65,7 @@ int DropoutGradCPUKernel::Prepare() { int DropoutGradCPUKernel::ReSize() { return RET_OK; } -int DropoutGradCPUKernel::Execute(int task_id) { +int DropoutGradCPUKernel::DoExecute(int task_id) { auto yt_ptr = reinterpret_cast(in_tensors_.at(kInputIdx)->MutableData()); auto mask_ptr = reinterpret_cast(in_tensors_.at(kMaskIdx)->MutableData()); auto output_ptr = reinterpret_cast(out_tensors_.at(kOutputIdx)->MutableData()); @@ -85,7 +85,7 @@ int DropoutGradCPUKernel::Execute(int task_id) { int RunDropoutGrad(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto dropout = reinterpret_cast(cdata); CHECK_NULL_RETURN(dropout); - auto error_code = dropout->Execute(task_id); + auto error_code = dropout->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "Dropout Grad Run error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.h index 14d459ea8cc..5e8d3bcfbf5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.h @@ -31,7 +31,7 @@ class DropoutGradCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: float scale_ = 1.0f; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/layernorm_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/layernorm_grad.cc index 76fe7fd1d1d..074e3605913 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/layernorm_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/layernorm_grad.cc @@ -74,7 +74,7 @@ int LayerNormGradCPUKernel::Prepare() { return RET_OK; } -int LayerNormGradCPUKernel::Execute(int task_id) { +int LayerNormGradCPUKernel::DoExecute(int task_id) { auto input_x = in_tensors_.at(0); auto input_dy = in_tensors_.at(1); auto input_var = in_tensors_.at(2); @@ -111,7 +111,7 @@ int LayerNormGradCPUKernel::Execute(int task_id) { int LayerNormGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto ln_kernel = reinterpret_cast(cdata); - auto error_code = ln_kernel->Execute(task_id); + auto error_code = ln_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "LayerNormGradRun error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/layernorm_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/layernorm_grad.h index 835454ad195..9bab1747562 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/layernorm_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/layernorm_grad.h @@ -31,7 +31,7 @@ class LayerNormGradCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int block_num_ = 1; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc index 5dd1b29ed75..0c8df6fd1c8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc @@ -73,7 +73,7 @@ int PoolingGradCPUKernel::ReSize() { int PoolingGradCPUKernel::Prepare() { return ReSize(); } -int PoolingGradCPUKernel::Execute(int task_id) { +int PoolingGradCPUKernel::DoExecute(int task_id) { PoolingParameter *pool_param = reinterpret_cast(op_parameter_); auto input_ptr = reinterpret_cast(in_tensors_.at(0)->data()); CHECK_NULL_RETURN(input_ptr); @@ -103,7 +103,7 @@ int PoolingGradCPUKernel::Execute(int task_id) { int PoolingGradImpl(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto pooling = reinterpret_cast(cdata); - auto error_code = pooling->Execute(task_id); + auto error_code = pooling->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "Pooling Run error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h index d1f63e10349..2b34e012b1a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h @@ -36,7 +36,7 @@ class PoolingGradCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int thread_num_ = 1; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc index 9ac42971ca9..043e1504996 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc @@ -44,7 +44,7 @@ int PowerGradCPUKernel::Prepare() { int PowerGradCPUKernel::ReSize() { return RET_OK; } -int PowerGradCPUKernel::Execute(int task_id) { +int PowerGradCPUKernel::DoExecute(int task_id) { auto dy_addr = reinterpret_cast(in_tensors_.at(0)->MutableData()); auto x_addr = reinterpret_cast(in_tensors_.at(1)->MutableData()); auto dx_addr = reinterpret_cast(out_tensors_.at(0)->MutableData()); @@ -73,7 +73,7 @@ int PowerGradCPUKernel::Execute(int task_id) { int PowerGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto power_kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(power_kernel); - auto error_code = power_kernel->Execute(task_id); + auto error_code = power_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "power grad error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h index 70601b23a7f..a014573bd7f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h @@ -38,7 +38,7 @@ class PowerGradCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: int thread_count_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/resize_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/resize_grad.cc index d68333ff12e..2c01d11132e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/resize_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/resize_grad.cc @@ -64,7 +64,7 @@ int ResizeGradCPUKernel::Prepare() { return ReSize(); } -int ResizeGradCPUKernel::Execute(int task_id) { +int ResizeGradCPUKernel::DoExecute(int task_id) { auto in_addr = reinterpret_cast(in_tensors_.at(0)->MutableData()); auto out_addr = reinterpret_cast(out_tensors_.at(0)->MutableData()); auto param = reinterpret_cast(op_parameter_); @@ -89,7 +89,7 @@ int ResizeGradCPUKernel::Execute(int task_id) { int ResizeGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto resize_grad_kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(resize_grad_kernel); - auto error_code = resize_grad_kernel->Execute(task_id); + auto error_code = resize_grad_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "resize grad error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/resize_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/resize_grad.h index ba0fad8aacb..702cb8514b0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/resize_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/resize_grad.h @@ -31,7 +31,7 @@ class ResizeGradCPUKernel : public InnerKernel { int ReSize() override; int Run() override; int ExecuteInit(int task_id); - int Execute(int task_id); + int DoExecute(int task_id); }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc index d83bfc6f391..8cd0fdc4c2d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc @@ -68,7 +68,7 @@ int DoSgdInit(float *weight, float *accumulate, float *gradient, float *stat, fl return RET_OK; } -int SgdCPUKernel::Execute(int task_id) { +int SgdCPUKernel::DoExecute(int task_id) { auto weight = reinterpret_cast(in_tensors_.at(0)->MutableData()); CHECK_NULL_RETURN(weight); auto accumulate = reinterpret_cast(in_tensors_.at(3)->MutableData()); @@ -127,7 +127,7 @@ int SgdRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { } else if (sgd_kernel->get_optimizer_mode() == WeightUpdateMode::ACCUMULATE_GRADS) { error_code = sgd_kernel->ExecuteVirtualBatch(task_id); } else { - error_code = sgd_kernel->Execute(task_id); + error_code = sgd_kernel->DoExecute(task_id); } if (error_code != RET_OK) { MS_LOG(ERROR) << "SGD run error task_id[" << task_id << "] error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.h index f62c82b2efa..46aab2eed41 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.h @@ -39,7 +39,7 @@ class SgdCPUKernel : public OptimizerKernel { int ReSize() override; int Run() override; int ExecuteInit(int task_id); - int Execute(int task_id); + int DoExecute(int task_id); int OptimizerStep() override; std::vector GetOptimizerParamsIdxs() const override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.cc index b4293276045..298017d4d7f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.cc @@ -34,7 +34,7 @@ int SigmoidCrossEntropyWithLogitsCPUKernel::ReSize() { return RET_OK; } -int SigmoidCrossEntropyWithLogitsCPUKernel::Execute(int task_id) { +int SigmoidCrossEntropyWithLogitsCPUKernel::DoExecute(int task_id) { auto logits = reinterpret_cast(in_tensors_.at(0)->MutableData()); CHECK_NULL_RETURN(logits); auto labels = reinterpret_cast(in_tensors_.at(1)->MutableData()); @@ -61,7 +61,7 @@ int SigmoidCrossEntropyWithLogitsCPUKernel::Execute(int task_id) { int SigmoidCrossEntropyWithLogitsRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto sig_crs_ent_kernel = reinterpret_cast(cdata); - auto error_code = sig_crs_ent_kernel->Execute(task_id); + auto error_code = sig_crs_ent_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "SigmoidCrossEntropyWithLogits error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.h index 505c292bd55..a4746254285 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.h @@ -31,7 +31,7 @@ class SigmoidCrossEntropyWithLogitsCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc index 822005ac81e..fb9caeefacb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc @@ -35,7 +35,7 @@ int SigmoidCrossEntropyWithLogitsGradCPUKernel::ReSize() { return RET_OK; } -int SigmoidCrossEntropyWithLogitsGradCPUKernel::Execute(int task_id) { +int SigmoidCrossEntropyWithLogitsGradCPUKernel::DoExecute(int task_id) { auto logits = reinterpret_cast(in_tensors_.at(0)->MutableData()); CHECK_NULL_RETURN(logits); auto labels = reinterpret_cast(in_tensors_.at(1)->MutableData()); @@ -63,7 +63,7 @@ int SigmoidCrossEntropyWithLogitsGradRun(void *cdata, int task_id, float lhs_sca CHECK_NULL_RETURN(cdata); auto sig_crs_ent_kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(sig_crs_ent_kernel); - auto error_code = sig_crs_ent_kernel->Execute(task_id); + auto error_code = sig_crs_ent_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "SigmoidCrossEntropyWithLogitsGrad error task_id[" << task_id << "] error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.h index b5011545d1b..8e7ef0da8a7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.h @@ -31,7 +31,7 @@ class SigmoidCrossEntropyWithLogitsGradCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.cc index af6b77e8e8b..5b6e62236e6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.cc @@ -30,7 +30,7 @@ constexpr static int kOutputIdx = 0; int SmoothL1LossCPUKernel::ReSize() { return RET_OK; } -int SmoothL1LossCPUKernel::Execute(size_t task_id) { +int SmoothL1LossCPUKernel::DoExecute(size_t task_id) { SmoothL1LossParameter *smooth_l1_loss_param = reinterpret_cast(op_parameter_); CHECK_NULL_RETURN(smooth_l1_loss_param); auto predict = reinterpret_cast(in_tensors_.at(kPredictIdx)->MutableData()); @@ -69,7 +69,7 @@ int SmoothL1LossCPUKernel::Execute(size_t task_id) { int SmoothL1LossRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto smooth_l1_loss_kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(smooth_l1_loss_kernel); - auto error_code = smooth_l1_loss_kernel->Execute(task_id); + auto error_code = smooth_l1_loss_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "SmoothL1Loss error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.h index ae6da391d8c..12532b9ea58 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.h @@ -33,7 +33,7 @@ class SmoothL1LossCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(size_t task_id); + int DoExecute(size_t task_id); private: SmoothL1LossParameter *smooth_l1_param_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.cc index 595f64c9a56..27fba528475 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.cc @@ -41,7 +41,7 @@ int SmoothL1LossGradCPUKernel::ReSize() { return RET_OK; } -int SmoothL1LossGradCPUKernel::Execute(int task_id) { +int SmoothL1LossGradCPUKernel::DoExecute(int task_id) { SmoothL1LossParameter *smooth_l1_loss_param = reinterpret_cast(op_parameter_); auto predict = reinterpret_cast(in_tensors_.at(kPredictIdx)->MutableData()); CHECK_NULL_RETURN(predict); @@ -78,7 +78,7 @@ int SmoothL1LossGradCPUKernel::Execute(int task_id) { int SmoothL1LossGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto smooth_l1_loss_kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(smooth_l1_loss_kernel); - auto error_code = smooth_l1_loss_kernel->Execute(task_id); + auto error_code = smooth_l1_loss_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "SmoothL1LossGrad error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.h index 6d4868a41ad..d260e13e7e4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.h @@ -33,7 +33,7 @@ class SmoothL1LossGradCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: SmoothL1LossParameter *smooth_l1_param_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc index 1ea783ed83a..42b1fda6283 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc @@ -57,7 +57,7 @@ void SoftmaxCrossEntropyWithLogitsCPUKernel::ForwardPostExecute(const float *lab } } -int SoftmaxCrossEntropyWithLogitsCPUKernel::Execute(int task_id) { +int SoftmaxCrossEntropyWithLogitsCPUKernel::DoExecute(int task_id) { auto ins = reinterpret_cast(in_tensors_.at(0)->data()); CHECK_NULL_RETURN(ins); auto labels = reinterpret_cast(in_tensors_.at(1)->data()); @@ -82,7 +82,7 @@ int SoftmaxCrossEntropyWithLogitsCPUKernel::Execute(int task_id) { int SoftmaxCrossEntropyWithLogitsRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto softmax_kernel = reinterpret_cast(cdata); - auto error_code = softmax_kernel->Execute(task_id); + auto error_code = softmax_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "SoftmaxCrossEntropy error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.h index c99e61bbd7a..3f98448e9ae 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.h @@ -40,7 +40,7 @@ class SoftmaxCrossEntropyWithLogitsCPUKernel : public LossKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: SoftmaxCrossEntropyParameter *param_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc index de62bc84e54..b6027c0b0de 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc @@ -57,7 +57,7 @@ int SoftmaxGradCPUKernel::Prepare() { int SoftmaxGradCPUKernel::ReSize() { return RET_OK; } -int SoftmaxGradCPUKernel::Execute(int task_id) { +int SoftmaxGradCPUKernel::DoExecute(int task_id) { auto input_ptr = reinterpret_cast(in_tensors_.at(kInputIndex)->MutableData()); auto yt_ptr = reinterpret_cast(in_tensors_.at(1)->MutableData()); auto output_ptr = reinterpret_cast(out_tensors_.at(kOutputIndex)->MutableData()); @@ -70,7 +70,7 @@ int SoftmaxGradCPUKernel::Execute(int task_id) { int SoftmaxGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto softmax_kernel = reinterpret_cast(cdata); - auto error_code = softmax_kernel->Execute(task_id); + auto error_code = softmax_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "softmax_kernel SoftmaxGradRun task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.h index ffaba9c8a22..0408125bf96 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.h @@ -33,7 +33,7 @@ class SoftmaxGradCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: SoftmaxParameter *param; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc index b596f6053de..fc40c518fd1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc @@ -79,7 +79,7 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::GradPostExecute(const int *lab return RET_OK; } -int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Execute(int task_id) { +int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::DoExecute(int task_id) { auto sce_param = reinterpret_cast(op_parameter_); auto ins = reinterpret_cast(in_tensors_.at(0)->data()); CHECK_NULL_RETURN(ins); @@ -118,7 +118,7 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Execute(int task_id) { int SparseSoftmaxCrossEntropyWithLogitsRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto sparse_kernel = reinterpret_cast(cdata); - auto error_code = sparse_kernel->Execute(task_id); + auto error_code = sparse_kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "SparseSoftmaxCrossEntropyWithLogitsRun error task_id[" << task_id << "] error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h index 3233634686e..ccd6841cc12 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h @@ -47,7 +47,7 @@ class SparseSoftmaxCrossEntropyWithLogitsCPUKernel : public LossKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: SoftmaxCrossEntropyParameter *param; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.cc index 0d226a099ff..87028a030ae 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.cc @@ -117,7 +117,7 @@ int StridedSliceGradCPUKernel::ReSize() { int StridedSliceGradImpl(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto slice = reinterpret_cast(cdata); - auto error_code = slice->Execute(task_id); + auto error_code = slice->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "StridedSliceGrad Run error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; @@ -134,7 +134,7 @@ int StridedSliceGradCPUKernel::Run() { return RET_OK; } -int StridedSliceGradCPUKernel::Execute(int task_id) { +int StridedSliceGradCPUKernel::DoExecute(int task_id) { auto input = in_tensors_.at(0); auto output = out_tensors_.at(0); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.h index f472967a756..c76eaedfd08 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.h @@ -34,7 +34,7 @@ class StridedSliceGradCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: void FillEmptyDims(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/unsorted_segment_sum.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/unsorted_segment_sum.cc index 5413873ac43..2a44cc1d73e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/unsorted_segment_sum.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/unsorted_segment_sum.cc @@ -62,7 +62,7 @@ int UnsortedSegmentSumCPUKernel::ReSize() { return RET_OK; } int UnsortedSegmentSumRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto kernel = reinterpret_cast(cdata); - auto error_code = kernel->Execute(task_id); + auto error_code = kernel->DoExecute(task_id); if (error_code != RET_OK) { MS_LOG(ERROR) << "UnsortedSegmentSum Run error task_id[" << task_id << "] error_code[" << error_code << "]"; return RET_ERROR; @@ -79,7 +79,7 @@ int UnsortedSegmentSumCPUKernel::Run() { return RET_OK; } -int UnsortedSegmentSumCPUKernel::Execute(int task_id) { +int UnsortedSegmentSumCPUKernel::DoExecute(int task_id) { auto input_tensor = in_tensors_.at(0); auto indices_tensor = in_tensors_.at(1); auto output_tensor = out_tensors_.at(0); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/unsorted_segment_sum.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/unsorted_segment_sum.h index 45377c06ad5..c31d6ffe4e2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/unsorted_segment_sum.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/unsorted_segment_sum.h @@ -31,7 +31,7 @@ class UnsortedSegmentSumCPUKernel : public InnerKernel { int Prepare() override; int ReSize() override; int Run() override; - int Execute(int task_id); + int DoExecute(int task_id); private: size_t unit_num_ = 0; diff --git a/mindspore/lite/src/train/train_session.cc b/mindspore/lite/src/train/train_session.cc index 193d0315dee..be225510b14 100644 --- a/mindspore/lite/src/train/train_session.cc +++ b/mindspore/lite/src/train/train_session.cc @@ -51,7 +51,7 @@ TrainSession::TrainSession() { InitCallBack(); } -int TrainSession::Init(InnerContext *context, const TrainCfg *train_cfg) { +int TrainSession::TrainInit(InnerContext *context, const TrainCfg *train_cfg) { if (train_cfg != nullptr) { if (train_cfg->mix_precision_cfg_.loss_scale_ <= 0) { MS_LOG(ERROR) << "illegal loss scale configuration"; @@ -1271,7 +1271,7 @@ session::LiteSession *session::TrainSession::CreateTrainSession(const std::strin } auto *inner_context = new (std::nothrow) mindspore::lite::InnerContext(context); - auto ret = session->Init(inner_context, cfg); + auto ret = session->TrainInit(inner_context, cfg); if (ret != mindspore::lite::RET_OK) { MS_LOG(ERROR) << "init session failed"; return nullptr; diff --git a/mindspore/lite/src/train/train_session.h b/mindspore/lite/src/train/train_session.h index 1880e29b403..b3c2bf82152 100644 --- a/mindspore/lite/src/train/train_session.h +++ b/mindspore/lite/src/train/train_session.h @@ -54,7 +54,7 @@ class TrainSession : virtual public lite::LiteSession { int CompileGraph(lite::Model *model) override; virtual int CompileTrainGraph(std::shared_ptr model); - virtual int Init(InnerContext *context, const TrainCfg *train_cfg); + virtual int TrainInit(InnerContext *context, const TrainCfg *train_cfg); int Train() override; int Eval() override; diff --git a/mindspore/lite/src/train/transfer_session.cc b/mindspore/lite/src/train/transfer_session.cc index ae183ad484c..bebe91c4fd8 100644 --- a/mindspore/lite/src/train/transfer_session.cc +++ b/mindspore/lite/src/train/transfer_session.cc @@ -262,7 +262,7 @@ lite::LiteSession *CreateTransferSessionInt(const char *model_buf_backbone, size } mindspore::lite::InnerContext *inner_context = new (std::nothrow) mindspore::lite::InnerContext(context); - auto ret = session->Init(inner_context, cfg); + auto ret = session->TrainInit(inner_context, cfg); if (ret != lite::RET_OK) { MS_LOG(ERROR) << "init transfer session failed"; delete session;