rm-Wno-overloaded-virtual
This commit is contained in:
parent
564f6089c6
commit
afdcdf9906
|
@ -13,7 +13,7 @@ else()
|
|||
set(CMAKE_C_FLAGS "-D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -Wno-attributes -Wno-deprecated-declarations \
|
||||
-Wno-missing-braces ${SECURE_C_FLAGS} ${CMAKE_C_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "-D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -Wno-attributes -Wno-deprecated-declarations \
|
||||
-Wno-missing-braces -Wno-overloaded-virtual -std=c++17 ${SECURE_CXX_FLAGS} ${CMAKE_CXX_FLAGS}")
|
||||
-Wno-missing-braces -std=c++17 ${SECURE_CXX_FLAGS} ${CMAKE_CXX_FLAGS}")
|
||||
|
||||
set(CMAKE_C_FLAGS_DEBUG "-DDebug -g -fvisibility=default")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "-DDebug -g -fvisibility=default")
|
||||
|
|
|
@ -67,7 +67,7 @@ std::shared_ptr<lite::LiteSession> CreateTrainSession(std::shared_ptr<Graph::Gra
|
|||
}
|
||||
}
|
||||
|
||||
auto ret = session->Init(context, &train_cfg);
|
||||
auto ret = session->TrainInit(context, &train_cfg);
|
||||
if (ret != mindspore::lite::RET_OK) {
|
||||
MS_LOG(ERROR) << "init session failed";
|
||||
return nullptr;
|
||||
|
|
|
@ -95,18 +95,6 @@ class InnerKernel : public Kernel {
|
|||
: schema::PrimitiveType_NONE;
|
||||
}
|
||||
|
||||
void set_inputs(const std::vector<mindspore::tensor::MSTensor *> &in_tensors) {
|
||||
this->in_tensors_.resize(in_tensors.size());
|
||||
(void)std::transform(in_tensors.begin(), in_tensors.end(), in_tensors_.begin(),
|
||||
[](mindspore::tensor::MSTensor *tensor) { return static_cast<lite::Tensor *>(tensor); });
|
||||
}
|
||||
|
||||
void set_outputs(const std::vector<mindspore::tensor::MSTensor *> &out_tensors) {
|
||||
this->out_tensors_.resize(out_tensors.size());
|
||||
(void)std::transform(out_tensors.begin(), out_tensors.end(), out_tensors_.begin(),
|
||||
[](mindspore::tensor::MSTensor *tensor) { return static_cast<lite::Tensor *>(tensor); });
|
||||
}
|
||||
|
||||
const std::vector<mindspore::MSTensor> &inputs() override {
|
||||
if (inputs_.empty()) {
|
||||
std::transform(in_tensors_.begin(), in_tensors_.end(), std::back_inserter(inputs_), [](lite::Tensor *tensor) {
|
||||
|
|
|
@ -122,7 +122,7 @@ void ArithmeticFP16CPUKernel::TileConstTensor(const void *in_data, void *out_dat
|
|||
in_shape, in_strides, out_strides, multiple);
|
||||
}
|
||||
|
||||
int ArithmeticFP16CPUKernel::Execute(const void *input0, const void *input1, void *output, int size, bool is_opt) {
|
||||
int ArithmeticFP16CPUKernel::DoExecute(const void *input0, const void *input1, void *output, int size, bool is_opt) {
|
||||
int ret = RET_OK;
|
||||
if (is_opt) {
|
||||
CHECK_NULL_RETURN(arithmetic_opt_func_);
|
||||
|
|
|
@ -47,7 +47,7 @@ class ArithmeticFP16CPUKernel : public ArithmeticCPUKernel {
|
|||
int ConstTensorBroadCast() override;
|
||||
void TileConstTensor(const void *in_data, void *out_data, size_t ndim, const int *in_shape, const int *in_strides,
|
||||
const int *out_strides, const int *multiple) override;
|
||||
int Execute(const void *input0, const void *input1, void *output, int size, bool is_opt) override;
|
||||
int DoExecute(const void *input0, const void *input1, void *output, int size, bool is_opt) override;
|
||||
void FreeFp16Buffer();
|
||||
ArithmeticFuncFp16 arithmetic_func_ = nullptr;
|
||||
ArithmeticOptFuncFp16 arithmetic_opt_func_ = nullptr;
|
||||
|
|
|
@ -94,7 +94,7 @@ int ConvolutionDepthwise3x3Fp16CPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwise3x3Fp16CPUKernel::Execute(int task_id) {
|
||||
int ConvolutionDepthwise3x3Fp16CPUKernel::DoExecute(int task_id) {
|
||||
int units = UP_DIV(conv_param_->output_w_, C2NUM); // F(2, 3) contains 2 conv units
|
||||
int c8 = UP_ROUND(conv_param_->input_channel_, C8NUM);
|
||||
auto buffer = buffer_ + C12NUM * c8 * units * task_id;
|
||||
|
@ -108,7 +108,7 @@ int ConvolutionDepthwise3x3Fp16CPUKernel::Execute(int task_id) {
|
|||
|
||||
int ConvDw3x3Fp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto conv_dw = reinterpret_cast<ConvolutionDepthwise3x3Fp16CPUKernel *>(cdata);
|
||||
auto ret = conv_dw->Execute(task_id);
|
||||
auto ret = conv_dw->DoExecute(task_id);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "ConvolutionDepthwise3x3Run error task_id[" << task_id << "] error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -36,7 +36,7 @@ class ConvolutionDepthwise3x3Fp16CPUKernel : public ConvolutionBaseCPUKernel {
|
|||
int ReSize() override;
|
||||
int Run() override;
|
||||
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
void PackWeight() override;
|
||||
|
|
|
@ -90,7 +90,7 @@ int ConvolutionDepthwiseFp16CPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwiseFp16CPUKernel::Execute(int task_id) {
|
||||
int ConvolutionDepthwiseFp16CPUKernel::DoExecute(int task_id) {
|
||||
auto input_ptr = reinterpret_cast<float16_t *>(in_tensors_.at(0)->data());
|
||||
auto output_ptr = reinterpret_cast<float16_t *>(out_tensors_.at(0)->data());
|
||||
MS_ASSERT(input_ptr != nullptr);
|
||||
|
@ -106,7 +106,7 @@ int ConvolutionDepthwiseFp16CPUKernel::Execute(int task_id) {
|
|||
|
||||
static int ConvDwFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto conv_dw_fp16 = reinterpret_cast<ConvolutionDepthwiseFp16CPUKernel *>(cdata);
|
||||
auto ret = conv_dw_fp16->Execute(task_id);
|
||||
auto ret = conv_dw_fp16->DoExecute(task_id);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "ConvolutionDepthwiseFp16Run error task_id[" << task_id << "] error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -44,7 +44,7 @@ class ConvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseCPUKernel {
|
|||
int ReSize() override;
|
||||
int Run() override;
|
||||
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
void PackWeight() override;
|
||||
|
|
|
@ -128,7 +128,7 @@ int ConvolutionDepthwiseSWFp16CPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwiseSWFp16CPUKernel::Execute(int task_id) {
|
||||
int ConvolutionDepthwiseSWFp16CPUKernel::DoExecute(int task_id) {
|
||||
ConvDwC8Fp16(packed_output_, packed_input_, reinterpret_cast<float16_t *>(packed_weight_),
|
||||
reinterpret_cast<float16_t *>(bias_data_), conv_param_, sliding_, task_id);
|
||||
return RET_OK;
|
||||
|
@ -136,7 +136,7 @@ int ConvolutionDepthwiseSWFp16CPUKernel::Execute(int task_id) {
|
|||
|
||||
static int ConvDwSWFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto conv_dw_fp16 = reinterpret_cast<ConvolutionDepthwiseSWFp16CPUKernel *>(cdata);
|
||||
auto ret = conv_dw_fp16->Execute(task_id);
|
||||
auto ret = conv_dw_fp16->DoExecute(task_id);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "ConvolutionDepthwiseSWFp16Run error task_id[" << task_id << "] error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -46,7 +46,7 @@ class ConvolutionDepthwiseSWFp16CPUKernel : public ConvolutionBaseCPUKernel {
|
|||
int Run() override;
|
||||
|
||||
int InitPackedInputOutput();
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
void PackWeight() override;
|
||||
|
|
|
@ -151,7 +151,7 @@ int DeconvolutionDepthwiseFp16CPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int DeconvolutionDepthwiseFp16CPUKernel::Execute(int task_id) {
|
||||
int DeconvolutionDepthwiseFp16CPUKernel::DoExecute(int task_id) {
|
||||
DeconvDwC8Fp16(packed_output_, packed_input_, reinterpret_cast<float16_t *>(packed_weight_),
|
||||
reinterpret_cast<float16_t *>(bias_data_), conv_param_, sliding_, task_id);
|
||||
return RET_OK;
|
||||
|
@ -159,7 +159,7 @@ int DeconvolutionDepthwiseFp16CPUKernel::Execute(int task_id) {
|
|||
|
||||
static int DeconvDwFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto deconv_dw_fp16 = reinterpret_cast<DeconvolutionDepthwiseFp16CPUKernel *>(cdata);
|
||||
auto ret = deconv_dw_fp16->Execute(task_id);
|
||||
auto ret = deconv_dw_fp16->DoExecute(task_id);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "DeconvolutionDepthwiseFp16Run error task_id[" << task_id << "] error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -48,7 +48,7 @@ class DeconvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseCPUKernel {
|
|||
|
||||
int InitPackedInputOutput();
|
||||
int InitSlideParam();
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int MallocWeightBiasData() override;
|
||||
|
|
|
@ -83,7 +83,7 @@ int StackFp16CPUKernel::Prepare() {
|
|||
return ReSize();
|
||||
}
|
||||
|
||||
int StackFp16CPUKernel::Execute(int task_id) {
|
||||
int StackFp16CPUKernel::DoExecute(int task_id) {
|
||||
auto inputs = buffers_.data();
|
||||
void *output_data = reinterpret_cast<void *>(out_buffer_);
|
||||
auto step = UP_DIV(outer_size_, num_threads_);
|
||||
|
@ -99,7 +99,7 @@ int StackFp16CPUKernel::Execute(int task_id) {
|
|||
|
||||
static int StackRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto stack = reinterpret_cast<StackFp16CPUKernel *>(cdata);
|
||||
if (stack->Execute(task_id) != RET_OK) {
|
||||
if (stack->DoExecute(task_id) != RET_OK) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
|
|
|
@ -29,7 +29,7 @@ class StackFp16CPUKernel : public StackBaseCPUKernel {
|
|||
~StackFp16CPUKernel() override = default;
|
||||
int Prepare() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
void InitMallocFlags();
|
||||
|
|
|
@ -72,7 +72,7 @@ int ArithmeticGradCPUKernelFp16::ArithmeticGradMinimum(float16_t *dy, int dy_siz
|
|||
|
||||
int ArithmeticGradCPUKernelFp16::ReSize() { return RET_OK; }
|
||||
|
||||
int ArithmeticGradCPUKernelFp16::Execute(int task_id) {
|
||||
int ArithmeticGradCPUKernelFp16::DoExecute(int task_id) {
|
||||
auto dy = reinterpret_cast<float16_t *>(in_tensors_[0]->data());
|
||||
auto dx1 = reinterpret_cast<float16_t *>(out_tensors_[0]->data());
|
||||
auto dx2 = reinterpret_cast<float16_t *>(out_tensors_[1]->data());
|
||||
|
@ -89,7 +89,7 @@ int ArithmeticGradCPUKernelFp16::Execute(int task_id) {
|
|||
int ArithmeticGradRunFp16(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto Arithmetic_kernel = reinterpret_cast<ArithmeticGradCPUKernelFp16 *>(cdata);
|
||||
CHECK_NULL_RETURN(Arithmetic_kernel);
|
||||
auto error_code = Arithmetic_kernel->Execute(task_id);
|
||||
auto error_code = Arithmetic_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "ArithmeticGradRunFp16 error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -64,7 +64,7 @@ class ArithmeticGradCPUKernelFp16 : public InnerKernel {
|
|||
int InferShape();
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int ArithmeticGradMaximum(float16_t *dy, int dy_size, float16_t *dx1, int dx1_size, float16_t *dx2, int dx2_size);
|
||||
|
|
|
@ -56,7 +56,7 @@ int BiasGradCPUKernelFp16::Prepare() {
|
|||
return ReSize();
|
||||
}
|
||||
|
||||
int BiasGradCPUKernelFp16::Execute(int task_id) {
|
||||
int BiasGradCPUKernelFp16::DoExecute(int task_id) {
|
||||
auto in = reinterpret_cast<float16_t *>(in_tensors_.at(0)->data());
|
||||
auto out = reinterpret_cast<float16_t *>(out_tensors_.at(0)->data());
|
||||
CHECK_NULL_RETURN(in);
|
||||
|
@ -82,7 +82,7 @@ int BiasGradCPUKernelFp16::Execute(int task_id) {
|
|||
int BiasGradFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto bias_kernel = reinterpret_cast<BiasGradCPUKernelFp16 *>(cdata);
|
||||
auto error_code = bias_kernel->Execute(task_id);
|
||||
auto error_code = bias_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "bias error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -34,7 +34,7 @@ class BiasGradCPUKernelFp16 : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
ArithmeticParameter *bias_param;
|
||||
|
|
|
@ -72,7 +72,7 @@ int BNGradCPUKernelFp16::Prepare() {
|
|||
return ReSize();
|
||||
}
|
||||
|
||||
int BNGradCPUKernelFp16::Execute(int task_id) {
|
||||
int BNGradCPUKernelFp16::DoExecute(int task_id) {
|
||||
auto *input_yt = in_tensors_.at(kNumInputDim_0);
|
||||
auto *input_x = in_tensors_.at(kNumInputDim_1);
|
||||
auto *input_scale = in_tensors_.at(kNumInputDim_2);
|
||||
|
@ -153,7 +153,7 @@ int BNGradCPUKernelFp16::Execute(int task_id) {
|
|||
int BNGradFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto bn_kernel = reinterpret_cast<BNGradCPUKernelFp16 *>(cdata);
|
||||
auto error_code = bn_kernel->Execute(task_id);
|
||||
auto error_code = bn_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "BNGradRun error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -32,7 +32,7 @@ class BNGradCPUKernelFp16 : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int thread_num_ = 1;
|
||||
|
|
|
@ -79,7 +79,7 @@ int ConvolutionGradFilterCPUKernelFp16::ReSize() {
|
|||
|
||||
int ConvolutionGradFilterCPUKernelFp16::Prepare() { return ReSize(); }
|
||||
|
||||
int ConvolutionGradFilterCPUKernelFp16::Execute(int task_id) {
|
||||
int ConvolutionGradFilterCPUKernelFp16::DoExecute(int task_id) {
|
||||
auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter_);
|
||||
CHECK_NULL_RETURN(conv_param);
|
||||
auto *input_dy = in_tensors_.at(0);
|
||||
|
@ -185,7 +185,7 @@ int ConvolutionGradFilterCPUKernelFp16::Execute(int task_id) {
|
|||
int ConvolutionGradFilterFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto convfilter_kernel = reinterpret_cast<ConvolutionGradFilterCPUKernelFp16 *>(cdata);
|
||||
CHECK_NULL_RETURN(convfilter_kernel);
|
||||
auto error_code = convfilter_kernel->Execute(task_id);
|
||||
auto error_code = convfilter_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "ConvolutionGradFilterRun error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -32,7 +32,7 @@ class ConvolutionGradFilterCPUKernelFp16 : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
size_t ws_size_ = 0;
|
||||
|
|
|
@ -75,7 +75,7 @@ int ConvolutionGradInputCPUKernelFp16::ReSize() {
|
|||
|
||||
int ConvolutionGradInputCPUKernelFp16::Prepare() { return ReSize(); }
|
||||
|
||||
int ConvolutionGradInputCPUKernelFp16::Execute(int task_id) {
|
||||
int ConvolutionGradInputCPUKernelFp16::DoExecute(int task_id) {
|
||||
auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter_);
|
||||
auto *input_dy = in_tensors_.at(0);
|
||||
auto *input_w = in_tensors_.at(1);
|
||||
|
@ -156,7 +156,7 @@ int ConvolutionGradInputCPUKernelFp16::Execute(int task_id) {
|
|||
int ConvolutionGradInputFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto convinput_kernel = reinterpret_cast<ConvolutionGradInputCPUKernelFp16 *>(cdata);
|
||||
auto error_code = convinput_kernel->Execute(task_id);
|
||||
auto error_code = convinput_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "conv input error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class ConvolutionGradInputCPUKernelFp16 : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
size_t ws_size_ = 0;
|
||||
|
|
|
@ -61,7 +61,7 @@ int DropoutGradCPUKernelFp16::Prepare() {
|
|||
|
||||
int DropoutGradCPUKernelFp16::ReSize() { return RET_OK; }
|
||||
|
||||
int DropoutGradCPUKernelFp16::Execute(int task_id) {
|
||||
int DropoutGradCPUKernelFp16::DoExecute(int task_id) {
|
||||
auto yt_ptr = reinterpret_cast<float16_t *>(in_tensors_.at(kInputIndex)->data());
|
||||
auto mask_ptr = reinterpret_cast<float16_t *>(in_tensors_.at(1)->data());
|
||||
auto output_ptr = reinterpret_cast<float16_t *>(out_tensors_.at(kOutputIndex)->data());
|
||||
|
@ -81,7 +81,7 @@ int DropoutGradCPUKernelFp16::Execute(int task_id) {
|
|||
int RunDropoutFp16Grad(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto dropout = reinterpret_cast<DropoutGradCPUKernelFp16 *>(cdata);
|
||||
CHECK_NULL_RETURN(dropout);
|
||||
auto error_code = dropout->Execute(task_id);
|
||||
auto error_code = dropout->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "Dropout Grad Run error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class DropoutGradCPUKernelFp16 : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
float scale_;
|
||||
|
|
|
@ -83,7 +83,7 @@ int LayerNormGradCPUKernelFp16::Prepare() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int LayerNormGradCPUKernelFp16::Execute(int task_id) {
|
||||
int LayerNormGradCPUKernelFp16::DoExecute(int task_id) {
|
||||
auto input_x = in_tensors_.at(kNumInputDim_0);
|
||||
auto input_dy = in_tensors_.at(kNumInputDim_1);
|
||||
auto input_var = in_tensors_.at(kNumInputDim_2);
|
||||
|
@ -117,7 +117,7 @@ int LayerNormF16GradRun(void *cdata, int task_id, float lhs_scale, float rhs_sca
|
|||
CHECK_NULL_RETURN(cdata);
|
||||
auto ln_kernel = reinterpret_cast<LayerNormGradCPUKernelFp16 *>(cdata);
|
||||
CHECK_NULL_RETURN(ln_kernel);
|
||||
auto error_code = ln_kernel->Execute(task_id);
|
||||
auto error_code = ln_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "LayerNormGradRun error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class LayerNormGradCPUKernelFp16 : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int block_num_ = 1;
|
||||
|
|
|
@ -72,7 +72,7 @@ int PoolingGradCPUKernelFp16::ReSize() {
|
|||
|
||||
int PoolingGradCPUKernelFp16::Prepare() { return ReSize(); }
|
||||
|
||||
int PoolingGradCPUKernelFp16::Execute(int task_id) {
|
||||
int PoolingGradCPUKernelFp16::DoExecute(int task_id) {
|
||||
PoolingParameter *pool_param = reinterpret_cast<PoolingParameter *>(op_parameter_);
|
||||
auto input_ptr = reinterpret_cast<float16_t *>(in_tensors_.at(0)->data());
|
||||
CHECK_NULL_RETURN(input_ptr);
|
||||
|
@ -102,7 +102,7 @@ int PoolingGradCPUKernelFp16::Execute(int task_id) {
|
|||
int PoolingFp16GradImpl(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto pooling = reinterpret_cast<PoolingGradCPUKernelFp16 *>(cdata);
|
||||
auto error_code = pooling->Execute(task_id);
|
||||
auto error_code = pooling->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "Pooling Run error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -36,7 +36,7 @@ class PoolingGradCPUKernelFp16 : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int thread_num_ = 1;
|
||||
|
|
|
@ -62,7 +62,7 @@ int ResizeGradCPUKernelFp16::Prepare() {
|
|||
return ReSize();
|
||||
}
|
||||
|
||||
int ResizeGradCPUKernelFp16::Execute(int task_id) {
|
||||
int ResizeGradCPUKernelFp16::DoExecute(int task_id) {
|
||||
auto in_addr = reinterpret_cast<float16_t *>(in_tensors_.at(0)->data());
|
||||
auto out_addr = reinterpret_cast<float16_t *>(out_tensors_.at(0)->data());
|
||||
CHECK_NULL_RETURN(in_addr);
|
||||
|
@ -91,7 +91,7 @@ int ResizeGradCPUKernelFp16::Execute(int task_id) {
|
|||
int ResizeFp16GradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto resize_grad_kernel = reinterpret_cast<ResizeGradCPUKernelFp16 *>(cdata);
|
||||
CHECK_NULL_RETURN(resize_grad_kernel);
|
||||
auto error_code = resize_grad_kernel->Execute(task_id);
|
||||
auto error_code = resize_grad_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "resize grad error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class ResizeGradCPUKernelFp16 : public InnerKernel {
|
|||
int ReSize() override;
|
||||
int Run() override;
|
||||
int ExecuteInit(int task_id);
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ int StridedSliceGradCPUKernelFp16::ReSize() { return RET_OK; }
|
|||
int StridedSliceFp16GradImpl(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto slice = reinterpret_cast<StridedSliceGradCPUKernelFp16 *>(cdata);
|
||||
auto error_code = slice->Execute(task_id);
|
||||
auto error_code = slice->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "StridedSliceGrad Run error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
@ -131,7 +131,7 @@ int StridedSliceGradCPUKernelFp16::Run() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int StridedSliceGradCPUKernelFp16::Execute(int task_id) {
|
||||
int StridedSliceGradCPUKernelFp16::DoExecute(int task_id) {
|
||||
auto input = in_tensors_.at(0);
|
||||
auto output = out_tensors_.at(0);
|
||||
int *po = output_shape_.data();
|
||||
|
|
|
@ -34,7 +34,7 @@ class StridedSliceGradCPUKernelFp16 : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
void FillEmptyDims();
|
||||
|
|
|
@ -63,7 +63,7 @@ int UnsortedSegmentSumFp16Run(void *cdata, int task_id, float lhs_scale, float r
|
|||
CHECK_NULL_RETURN(cdata);
|
||||
auto kernel = reinterpret_cast<UnsortedSegmentSumCPUKernelFp16 *>(cdata);
|
||||
CHECK_NULL_RETURN(kernel);
|
||||
auto error_code = kernel->Execute(task_id);
|
||||
auto error_code = kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "UnsortedSegmentSum Run error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
@ -80,7 +80,7 @@ int UnsortedSegmentSumCPUKernelFp16::Run() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int UnsortedSegmentSumCPUKernelFp16::Execute(int task_id) {
|
||||
int UnsortedSegmentSumCPUKernelFp16::DoExecute(int task_id) {
|
||||
auto input_tensor = in_tensors_.at(0);
|
||||
auto indices_tensor = in_tensors_.at(1);
|
||||
auto output_tensor = out_tensors_.at(0);
|
||||
|
|
|
@ -31,7 +31,7 @@ class UnsortedSegmentSumCPUKernelFp16 : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
size_t unit_num_ = 0;
|
||||
size_t input_dim1_ = 0;
|
||||
size_t output_dim0_ = 0;
|
||||
|
|
|
@ -51,7 +51,7 @@ void ArithmeticCompareCPUKernel::InitRunFunction(int primitive_type) {
|
|||
}
|
||||
}
|
||||
|
||||
int ArithmeticCompareCPUKernel::Execute(const void *input0, const void *input1, void *output, int size, bool is_opt) {
|
||||
int ArithmeticCompareCPUKernel::DoExecute(const void *input0, const void *input1, void *output, int size, bool is_opt) {
|
||||
int ret = RET_OK;
|
||||
if (in_tensors_[0]->data_type() == kNumberTypeFloat32) {
|
||||
if (is_opt) {
|
||||
|
@ -95,9 +95,9 @@ int ArithmeticCompareCPUKernel::CalcArithmeticByBatch(int task_id) {
|
|||
batch_b_ptr_ = static_cast<uint8_t *>(input1_ptr_) + b_offset_[i] * b_stride_size_ * data_type_len_;
|
||||
batch_c_ptr_ = static_cast<uint8_t *>(output_ptr_) + i * c_stride_size_ * sizeof(uint8_t);
|
||||
if (batch_scalar_) {
|
||||
ret = Execute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, true);
|
||||
ret = DoExecute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, true);
|
||||
} else {
|
||||
ret = Execute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, false);
|
||||
ret = DoExecute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, false);
|
||||
}
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "failed to calculate.";
|
||||
|
@ -124,12 +124,12 @@ int ArithmeticCompareCPUKernel::DoArithmetic(int task_id) {
|
|||
int out_offset = stride * task_id * sizeof(uint8_t);
|
||||
if (scalar_) {
|
||||
if (param_->in_elements_num0_ == 1) {
|
||||
ret = Execute(batch_a_ptr_, batch_b_ptr_ + in_offset, batch_c_ptr_ + out_offset, count, true);
|
||||
ret = DoExecute(batch_a_ptr_, batch_b_ptr_ + in_offset, batch_c_ptr_ + out_offset, count, true);
|
||||
} else {
|
||||
ret = Execute(batch_a_ptr_ + in_offset, batch_b_ptr_, batch_c_ptr_ + out_offset, count, true);
|
||||
ret = DoExecute(batch_a_ptr_ + in_offset, batch_b_ptr_, batch_c_ptr_ + out_offset, count, true);
|
||||
}
|
||||
} else {
|
||||
ret = Execute(batch_a_ptr_ + in_offset, batch_b_ptr_ + in_offset, batch_c_ptr_ + out_offset, count, false);
|
||||
ret = DoExecute(batch_a_ptr_ + in_offset, batch_b_ptr_ + in_offset, batch_c_ptr_ + out_offset, count, false);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ class ArithmeticCompareCPUKernel : public ArithmeticCPUKernel {
|
|||
|
||||
protected:
|
||||
void InitRunFunction(int primitive_type) override;
|
||||
int Execute(const void *input0, const void *input1, void *output, int size, bool is_opt) override;
|
||||
int DoExecute(const void *input0, const void *input1, void *output, int size, bool is_opt) override;
|
||||
int CalcArithmeticByBatch(int task_id) override;
|
||||
|
||||
private:
|
||||
|
|
|
@ -328,7 +328,7 @@ void ArithmeticCPUKernel::InitRunFunction(int primitive_type) {
|
|||
}
|
||||
}
|
||||
|
||||
int ArithmeticCPUKernel::Execute(const void *input0, const void *input1, void *output, int size, bool is_opt) {
|
||||
int ArithmeticCPUKernel::DoExecute(const void *input0, const void *input1, void *output, int size, bool is_opt) {
|
||||
int ret = RET_OK;
|
||||
if (in_tensors_[0]->data_type() == kNumberTypeFloat32) {
|
||||
if (is_opt) {
|
||||
|
@ -374,9 +374,9 @@ int ArithmeticCPUKernel::CalcArithmeticByBatch(int task_id) {
|
|||
batch_b_ptr_ = static_cast<uint8_t *>(input1_ptr_) + b_offset_[i] * b_stride_size_ * data_type_len_;
|
||||
batch_c_ptr_ = static_cast<uint8_t *>(output_ptr_) + i * c_stride_size_ * data_type_len_;
|
||||
if (batch_scalar_) {
|
||||
ret = Execute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, true);
|
||||
ret = DoExecute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, true);
|
||||
} else {
|
||||
ret = Execute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, false);
|
||||
ret = DoExecute(batch_a_ptr_, batch_b_ptr_, batch_c_ptr_, c_stride_size_, false);
|
||||
}
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "failed to calculate.";
|
||||
|
@ -402,12 +402,12 @@ int ArithmeticCPUKernel::DoArithmetic(int task_id) {
|
|||
int offset = stride * task_id * data_type_len_;
|
||||
if (scalar_) {
|
||||
if (param_->in_elements_num0_ == 1) {
|
||||
ret = Execute(batch_a_ptr_, batch_b_ptr_ + offset, batch_c_ptr_ + offset, count, true);
|
||||
ret = DoExecute(batch_a_ptr_, batch_b_ptr_ + offset, batch_c_ptr_ + offset, count, true);
|
||||
} else {
|
||||
ret = Execute(batch_a_ptr_ + offset, batch_b_ptr_, batch_c_ptr_ + offset, count, true);
|
||||
ret = DoExecute(batch_a_ptr_ + offset, batch_b_ptr_, batch_c_ptr_ + offset, count, true);
|
||||
}
|
||||
} else {
|
||||
ret = Execute(batch_a_ptr_ + offset, batch_b_ptr_ + offset, batch_c_ptr_ + offset, count, false);
|
||||
ret = DoExecute(batch_a_ptr_ + offset, batch_b_ptr_ + offset, batch_c_ptr_ + offset, count, false);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ class ArithmeticCPUKernel : public InnerKernel {
|
|||
virtual int ConstTensorBroadCast();
|
||||
virtual void TileConstTensor(const void *in_data, void *out_data, size_t ndim, const int *in_shape,
|
||||
const int *in_strides, const int *out_strides, const int *multiple);
|
||||
virtual int Execute(const void *input0, const void *input1, void *output, int size, bool is_opt);
|
||||
virtual int DoExecute(const void *input0, const void *input1, void *output, int size, bool is_opt);
|
||||
virtual bool IsBatchScalarCalc();
|
||||
virtual bool IsScalarClac();
|
||||
virtual int CalcArithmeticByBatch(int task_id);
|
||||
|
|
|
@ -56,7 +56,7 @@ int ConvolutionDepthwise3x3CPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwise3x3CPUKernel::Execute(int task_id) {
|
||||
int ConvolutionDepthwise3x3CPUKernel::DoExecute(int task_id) {
|
||||
int units = UP_DIV(conv_param_->output_w_, C2NUM); // F(2, 3) contains 2 conv units
|
||||
int c4 = UP_ROUND(conv_param_->input_channel_, C4NUM);
|
||||
auto buffer = buffer_ + C12NUM * c4 * units * task_id;
|
||||
|
@ -74,7 +74,7 @@ int ConvolutionDepthwise3x3CPUKernel::Execute(int task_id) {
|
|||
|
||||
int ConvDw3x3Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto conv_dw = reinterpret_cast<ConvolutionDepthwise3x3CPUKernel *>(cdata);
|
||||
auto ret = conv_dw->Execute(task_id);
|
||||
auto ret = conv_dw->DoExecute(task_id);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "ConvolutionDepthwise3x3Run error task_id[" << task_id << "] error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -36,7 +36,7 @@ class ConvolutionDepthwise3x3CPUKernel : public ConvolutionBaseCPUKernel {
|
|||
int ReSize() override;
|
||||
int Run() override;
|
||||
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int MallocWeightBiasData() override;
|
||||
|
|
|
@ -62,7 +62,7 @@ int ConvolutionDepthwiseCPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwiseCPUKernel::Execute(int task_id) {
|
||||
int ConvolutionDepthwiseCPUKernel::DoExecute(int task_id) {
|
||||
auto ret = ConvDw(output_ptr_, input_ptr_, reinterpret_cast<float *>(packed_weight_),
|
||||
reinterpret_cast<float *>(bias_data_), conv_param_, task_id);
|
||||
return ret;
|
||||
|
@ -70,7 +70,7 @@ int ConvolutionDepthwiseCPUKernel::Execute(int task_id) {
|
|||
|
||||
int ConvDwRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto conv_dw = reinterpret_cast<ConvolutionDepthwiseCPUKernel *>(cdata);
|
||||
auto ret = conv_dw->Execute(task_id);
|
||||
auto ret = conv_dw->DoExecute(task_id);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "ConvolutionDepthwiseRun error task_id[" << task_id << "] error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -36,7 +36,7 @@ class ConvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel {
|
|||
int ReSize() override;
|
||||
int Run() override;
|
||||
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int MallocWeightBiasData() override;
|
||||
|
|
|
@ -101,7 +101,7 @@ int ConvolutionDepthwiseIndirectCPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwiseIndirectCPUKernel::Execute(int task_id) {
|
||||
int ConvolutionDepthwiseIndirectCPUKernel::DoExecute(int task_id) {
|
||||
ConvDwIndirection(output_ptr_, indirect_buffer_, reinterpret_cast<float *>(packed_weight_),
|
||||
reinterpret_cast<float *>(bias_data_), zero_ptr_, conv_param_, task_id);
|
||||
return RET_OK;
|
||||
|
@ -109,7 +109,7 @@ int ConvolutionDepthwiseIndirectCPUKernel::Execute(int task_id) {
|
|||
|
||||
int ConvDwIndirectRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto conv_dw = reinterpret_cast<ConvolutionDepthwiseIndirectCPUKernel *>(cdata);
|
||||
auto ret = conv_dw->Execute(task_id);
|
||||
auto ret = conv_dw->DoExecute(task_id);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "ConvolutionDepthwiseIndirectRun error task_id[" << task_id << "] error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -35,7 +35,7 @@ class ConvolutionDepthwiseIndirectCPUKernel : public ConvolutionBaseCPUKernel {
|
|||
int ReSize() override;
|
||||
int Run() override;
|
||||
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int MallocIndirectBuffer();
|
||||
|
|
|
@ -92,7 +92,7 @@ int ConvolutionDepthwiseSWCPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwiseSWCPUKernel::Execute(int task_id) {
|
||||
int ConvolutionDepthwiseSWCPUKernel::DoExecute(int task_id) {
|
||||
ConvDwSWFp32(packed_output_, packed_input_, reinterpret_cast<float *>(packed_weight_),
|
||||
reinterpret_cast<float *>(bias_data_), conv_param_, sliding_, task_id);
|
||||
return RET_OK;
|
||||
|
@ -100,7 +100,7 @@ int ConvolutionDepthwiseSWCPUKernel::Execute(int task_id) {
|
|||
|
||||
int ConvDwSWRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto conv_dw = reinterpret_cast<ConvolutionDepthwiseSWCPUKernel *>(cdata);
|
||||
auto ret = conv_dw->Execute(task_id);
|
||||
auto ret = conv_dw->DoExecute(task_id);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "ConvolutionDepthwiseSWRun error task_id[" << task_id << "] error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -35,7 +35,7 @@ class ConvolutionDepthwiseSWCPUKernel : public ConvolutionBaseCPUKernel {
|
|||
int ReSize() override;
|
||||
int Run() override;
|
||||
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int InitPackedInputOutput();
|
||||
|
|
|
@ -115,7 +115,7 @@ int DeconvolutionDepthwiseCPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int DeconvolutionDepthwiseCPUKernel::Execute(int task_id) {
|
||||
int DeconvolutionDepthwiseCPUKernel::DoExecute(int task_id) {
|
||||
DeconvDwSWFp32(packed_output_, packed_input_, reinterpret_cast<float *>(packed_weight_),
|
||||
reinterpret_cast<float *>(bias_data_), conv_param_, sliding_, task_id);
|
||||
return RET_OK;
|
||||
|
@ -123,7 +123,7 @@ int DeconvolutionDepthwiseCPUKernel::Execute(int task_id) {
|
|||
|
||||
int DeconvDwRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto deconv_dw = reinterpret_cast<DeconvolutionDepthwiseCPUKernel *>(cdata);
|
||||
auto ret = deconv_dw->Execute(task_id);
|
||||
auto ret = deconv_dw->DoExecute(task_id);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "DeconvolutionDepthwiseRun error task_id[" << task_id << "] error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -35,7 +35,7 @@ class DeconvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel {
|
|||
int InitSlideParam();
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int InitPackedInputOutput();
|
||||
|
|
|
@ -71,7 +71,7 @@ static int DoAdam(float *m, float *v, const float *gradient, float *weight, floa
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int AdamCPUKernel::Execute(int task_id) {
|
||||
int AdamCPUKernel::DoExecute(int task_id) {
|
||||
CHECK_LESS_RETURN(in_tensors_.size(), DIMENSION_10D);
|
||||
auto weight = reinterpret_cast<float *>(in_tensors_.at(kWeightIdx)->MutableData());
|
||||
auto m = reinterpret_cast<float *>(in_tensors_.at(kMomentVector1stIdx)->MutableData());
|
||||
|
@ -107,7 +107,7 @@ int AdamRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
|||
} else if (adam_kernel->get_optimizer_mode() == WeightUpdateMode::ACCUMULATE_GRADS) {
|
||||
error_code = adam_kernel->ExecuteVirtualBatch(task_id);
|
||||
} else {
|
||||
error_code = adam_kernel->Execute(task_id);
|
||||
error_code = adam_kernel->DoExecute(task_id);
|
||||
}
|
||||
|
||||
if (error_code != RET_OK) {
|
||||
|
|
|
@ -38,7 +38,7 @@ class AdamCPUKernel : public OptimizerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
int OptimizerStep() override;
|
||||
std::vector<int> GetOptimizerParamsIdxs() const override;
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ static int DoApplyMomentum(float *weight, float *accumulate, float learning_rate
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int ApplyMomentumCPUKernel::Execute(int task_id) {
|
||||
int ApplyMomentumCPUKernel::DoExecute(int task_id) {
|
||||
CHECK_LESS_RETURN(in_tensors_.size(), DIMENSION_5D);
|
||||
auto weight = reinterpret_cast<float *>(in_tensors_.at(FIRST_INPUT)->data());
|
||||
CHECK_NULL_RETURN(weight);
|
||||
|
@ -79,7 +79,7 @@ int ApplyMomentumRun(void *cdata, int task_id, float lhs_scale, float rhs_scale)
|
|||
} else if (applyMomentum_kernel->get_optimizer_mode() == WeightUpdateMode::ACCUMULATE_GRADS) {
|
||||
error_code = applyMomentum_kernel->ExecuteVirtualBatch(task_id);
|
||||
} else {
|
||||
error_code = applyMomentum_kernel->Execute(task_id);
|
||||
error_code = applyMomentum_kernel->DoExecute(task_id);
|
||||
}
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "apply Momentum run error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
|
|
|
@ -39,7 +39,7 @@ class ApplyMomentumCPUKernel : public OptimizerKernel {
|
|||
}
|
||||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
int Run() override;
|
||||
int OptimizerStep() override;
|
||||
std::vector<int> GetOptimizerParamsIdxs() const override;
|
||||
|
|
|
@ -247,7 +247,7 @@ int ArithmeticGradCPUKernel::ArithmeticGradMinimum(float *dy, int dy_size, float
|
|||
|
||||
int ArithmeticGradCPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int ArithmeticGradCPUKernel::Execute(int task_id) {
|
||||
int ArithmeticGradCPUKernel::DoExecute(int task_id) {
|
||||
auto dy = reinterpret_cast<float *>(in_tensors_[kDyIdx]->MutableData());
|
||||
auto dx1 = reinterpret_cast<float *>(out_tensors_[kDx1Idx]->MutableData());
|
||||
auto dx2 = reinterpret_cast<float *>(out_tensors_[kDx2Idx]->MutableData());
|
||||
|
@ -265,7 +265,7 @@ int ArithmeticGradCPUKernel::Execute(int task_id) {
|
|||
int ArithmeticGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto Arithmetic_kernel = reinterpret_cast<ArithmeticGradCPUKernel *>(cdata);
|
||||
CHECK_NULL_RETURN(Arithmetic_kernel);
|
||||
auto error_code = Arithmetic_kernel->Execute(task_id);
|
||||
auto error_code = Arithmetic_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "ArithmeticGradRun error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -79,7 +79,7 @@ class ArithmeticGradCPUKernel : public InnerKernel {
|
|||
int InferShape();
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int ArithmeticGradAdd(float *dy, int dy_size, float *dx1, int dx1_size, float *dx2, int dx2_size);
|
||||
|
|
|
@ -29,7 +29,7 @@ using mindspore::schema::PrimitiveType_Assign;
|
|||
namespace mindspore::kernel {
|
||||
int AssignCPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int AssignCPUKernel::Execute(int task_id) {
|
||||
int AssignCPUKernel::DoExecute(int task_id) {
|
||||
auto x = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
|
||||
CHECK_NULL_RETURN(x);
|
||||
auto y = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
|
||||
|
@ -49,7 +49,7 @@ int AssignCPUKernel::Execute(int task_id) {
|
|||
int AssignRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto Assign_kernel = reinterpret_cast<AssignCPUKernel *>(cdata);
|
||||
auto error_code = Assign_kernel->Execute(task_id);
|
||||
auto error_code = Assign_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "assign run error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class AssignCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
protected:
|
||||
int thread_count_ = 1;
|
||||
|
|
|
@ -54,7 +54,7 @@ int BiasGradCPUKernel::Prepare() {
|
|||
return ReSize();
|
||||
}
|
||||
|
||||
int BiasGradCPUKernel::Execute(int task_id) {
|
||||
int BiasGradCPUKernel::DoExecute(int task_id) {
|
||||
auto in = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
|
||||
auto out = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
|
||||
CHECK_NULL_RETURN(in);
|
||||
|
@ -79,7 +79,7 @@ int BiasGradCPUKernel::Execute(int task_id) {
|
|||
int BiasGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto bias_kernel = reinterpret_cast<BiasGradCPUKernel *>(cdata);
|
||||
auto error_code = bias_kernel->Execute(task_id);
|
||||
auto error_code = bias_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "bias error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -34,7 +34,7 @@ class BiasGradCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
ArithmeticParameter *bias_param;
|
||||
|
|
|
@ -57,7 +57,7 @@ int BNGradCPUKernel::Prepare() {
|
|||
return ReSize();
|
||||
}
|
||||
|
||||
int BNGradCPUKernel::Execute(int task_id) {
|
||||
int BNGradCPUKernel::DoExecute(int task_id) {
|
||||
auto *input_yt = in_tensors_.at(0);
|
||||
auto *input_x = in_tensors_.at(1);
|
||||
auto *input_scale = in_tensors_.at(2);
|
||||
|
@ -150,7 +150,7 @@ int BNGradCPUKernel::Execute(int task_id) {
|
|||
int BNGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto bn_kernel = reinterpret_cast<BNGradCPUKernel *>(cdata);
|
||||
auto error_code = bn_kernel->Execute(task_id);
|
||||
auto error_code = bn_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "BNGradRun error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class BNGradCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int thread_num_ = 1;
|
||||
|
|
|
@ -72,7 +72,7 @@ int ConvolutionTrainCPUKernel::ReSize() {
|
|||
|
||||
int ConvolutionTrainCPUKernel::Prepare() { return ReSize(); }
|
||||
|
||||
int ConvolutionTrainCPUKernel::Execute(int task_id) {
|
||||
int ConvolutionTrainCPUKernel::DoExecute(int task_id) {
|
||||
auto conv_param_ = reinterpret_cast<ConvParameter *>(op_parameter_);
|
||||
auto *input_x = in_tensors_.at(kInputIndex);
|
||||
auto *input_w = in_tensors_.at(kWeightIndex);
|
||||
|
@ -154,7 +154,7 @@ int ConvolutionTrainCPUKernel::Execute(int task_id) {
|
|||
int ConvolutionTrainRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto conv_kernel = reinterpret_cast<ConvolutionTrainCPUKernel *>(cdata);
|
||||
auto error_code = conv_kernel->Execute(task_id);
|
||||
auto error_code = conv_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "ConvolutionTrainRun error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class ConvolutionTrainCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int ws_size_ = 0;
|
||||
|
|
|
@ -81,7 +81,7 @@ int ConvolutionGradFilterCPUKernel::ReSize() {
|
|||
|
||||
int ConvolutionGradFilterCPUKernel::Prepare() { return ReSize(); }
|
||||
|
||||
int ConvolutionGradFilterCPUKernel::Execute(int task_id) {
|
||||
int ConvolutionGradFilterCPUKernel::DoExecute(int task_id) {
|
||||
auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter_);
|
||||
CHECK_NULL_RETURN(conv_param);
|
||||
auto *input_dy = in_tensors_.at(kDyIdx);
|
||||
|
@ -187,7 +187,7 @@ int ConvolutionGradFilterCPUKernel::Execute(int task_id) {
|
|||
int ConvolutionGradFilterRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto convfilter_kernel = reinterpret_cast<ConvolutionGradFilterCPUKernel *>(cdata);
|
||||
CHECK_NULL_RETURN(convfilter_kernel);
|
||||
auto error_code = convfilter_kernel->Execute(task_id);
|
||||
auto error_code = convfilter_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "ConvolutionGradFilterRun error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -32,7 +32,7 @@ class ConvolutionGradFilterCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
size_t ws_size_ = 0;
|
||||
|
|
|
@ -76,7 +76,7 @@ int ConvolutionGradInputCPUKernel::ReSize() {
|
|||
|
||||
int ConvolutionGradInputCPUKernel::Prepare() { return ReSize(); }
|
||||
|
||||
int ConvolutionGradInputCPUKernel::Execute(int task_id) {
|
||||
int ConvolutionGradInputCPUKernel::DoExecute(int task_id) {
|
||||
auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter_);
|
||||
auto *input_dy = in_tensors_.at(0);
|
||||
auto *input_w = in_tensors_.at(1);
|
||||
|
@ -160,7 +160,7 @@ int ConvolutionGradInputCPUKernel::Execute(int task_id) {
|
|||
int ConvolutionGradInputRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto convinput_kernel = reinterpret_cast<ConvolutionGradInputCPUKernel *>(cdata);
|
||||
auto error_code = convinput_kernel->Execute(task_id);
|
||||
auto error_code = convinput_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "conv input error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class ConvolutionGradInputCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
size_t ws_size_ = 0;
|
||||
|
|
|
@ -70,7 +70,7 @@ int DeConvolutionGradFilterCPUKernel::Prepare() {
|
|||
|
||||
int DeConvolutionGradFilterCPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int DeConvolutionGradFilterCPUKernel::Execute(int task_id) {
|
||||
int DeConvolutionGradFilterCPUKernel::DoExecute(int task_id) {
|
||||
auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter_);
|
||||
auto *input_dy = in_tensors_.at(0);
|
||||
auto *input_x = in_tensors_.at(1);
|
||||
|
@ -124,7 +124,7 @@ int DeConvolutionGradFilterCPUKernel::Execute(int task_id) {
|
|||
int DeConvolutionGradFilterRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto convfilter_kernel = reinterpret_cast<DeConvolutionGradFilterCPUKernel *>(cdata);
|
||||
auto error_code = convfilter_kernel->Execute(task_id);
|
||||
auto error_code = convfilter_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "DeConvolutionGradFilterRun error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class DeConvolutionGradFilterCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
size_t ws_size = 0;
|
||||
|
|
|
@ -55,7 +55,7 @@ int DropoutCPUKernel::Prepare() {
|
|||
|
||||
int DropoutCPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int DropoutCPUKernel::Execute(int task_id) {
|
||||
int DropoutCPUKernel::DoExecute(int task_id) {
|
||||
auto input_ptr = reinterpret_cast<float *>(in_tensors_.at(kInputIndex)->MutableData());
|
||||
auto output_ptr = reinterpret_cast<float *>(out_tensors_.at(kOutputIndex)->MutableData());
|
||||
auto mask = reinterpret_cast<float *>(out_tensors_.at(1)->MutableData());
|
||||
|
@ -90,7 +90,7 @@ int DropoutCPUKernel::Execute(int task_id) {
|
|||
|
||||
int RunDropout(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto dropout = reinterpret_cast<DropoutCPUKernel *>(cdata);
|
||||
auto error_code = dropout->Execute(task_id);
|
||||
auto error_code = dropout->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "Dropout Run error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class DropoutCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
float scale_ = 1.0;
|
||||
|
|
|
@ -65,7 +65,7 @@ int DropoutGradCPUKernel::Prepare() {
|
|||
|
||||
int DropoutGradCPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int DropoutGradCPUKernel::Execute(int task_id) {
|
||||
int DropoutGradCPUKernel::DoExecute(int task_id) {
|
||||
auto yt_ptr = reinterpret_cast<float *>(in_tensors_.at(kInputIdx)->MutableData());
|
||||
auto mask_ptr = reinterpret_cast<float *>(in_tensors_.at(kMaskIdx)->MutableData());
|
||||
auto output_ptr = reinterpret_cast<float *>(out_tensors_.at(kOutputIdx)->MutableData());
|
||||
|
@ -85,7 +85,7 @@ int DropoutGradCPUKernel::Execute(int task_id) {
|
|||
int RunDropoutGrad(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto dropout = reinterpret_cast<DropoutGradCPUKernel *>(cdata);
|
||||
CHECK_NULL_RETURN(dropout);
|
||||
auto error_code = dropout->Execute(task_id);
|
||||
auto error_code = dropout->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "Dropout Grad Run error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class DropoutGradCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
float scale_ = 1.0f;
|
||||
|
|
|
@ -74,7 +74,7 @@ int LayerNormGradCPUKernel::Prepare() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int LayerNormGradCPUKernel::Execute(int task_id) {
|
||||
int LayerNormGradCPUKernel::DoExecute(int task_id) {
|
||||
auto input_x = in_tensors_.at(0);
|
||||
auto input_dy = in_tensors_.at(1);
|
||||
auto input_var = in_tensors_.at(2);
|
||||
|
@ -111,7 +111,7 @@ int LayerNormGradCPUKernel::Execute(int task_id) {
|
|||
int LayerNormGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto ln_kernel = reinterpret_cast<LayerNormGradCPUKernel *>(cdata);
|
||||
auto error_code = ln_kernel->Execute(task_id);
|
||||
auto error_code = ln_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "LayerNormGradRun error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class LayerNormGradCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int block_num_ = 1;
|
||||
|
|
|
@ -73,7 +73,7 @@ int PoolingGradCPUKernel::ReSize() {
|
|||
|
||||
int PoolingGradCPUKernel::Prepare() { return ReSize(); }
|
||||
|
||||
int PoolingGradCPUKernel::Execute(int task_id) {
|
||||
int PoolingGradCPUKernel::DoExecute(int task_id) {
|
||||
PoolingParameter *pool_param = reinterpret_cast<PoolingParameter *>(op_parameter_);
|
||||
auto input_ptr = reinterpret_cast<float *>(in_tensors_.at(0)->data());
|
||||
CHECK_NULL_RETURN(input_ptr);
|
||||
|
@ -103,7 +103,7 @@ int PoolingGradCPUKernel::Execute(int task_id) {
|
|||
int PoolingGradImpl(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto pooling = reinterpret_cast<PoolingGradCPUKernel *>(cdata);
|
||||
auto error_code = pooling->Execute(task_id);
|
||||
auto error_code = pooling->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "Pooling Run error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -36,7 +36,7 @@ class PoolingGradCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int thread_num_ = 1;
|
||||
|
|
|
@ -44,7 +44,7 @@ int PowerGradCPUKernel::Prepare() {
|
|||
|
||||
int PowerGradCPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int PowerGradCPUKernel::Execute(int task_id) {
|
||||
int PowerGradCPUKernel::DoExecute(int task_id) {
|
||||
auto dy_addr = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
|
||||
auto x_addr = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
|
||||
auto dx_addr = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
|
||||
|
@ -73,7 +73,7 @@ int PowerGradCPUKernel::Execute(int task_id) {
|
|||
int PowerGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto power_kernel = reinterpret_cast<PowerGradCPUKernel *>(cdata);
|
||||
CHECK_NULL_RETURN(power_kernel);
|
||||
auto error_code = power_kernel->Execute(task_id);
|
||||
auto error_code = power_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "power grad error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -38,7 +38,7 @@ class PowerGradCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
int thread_count_;
|
||||
|
|
|
@ -64,7 +64,7 @@ int ResizeGradCPUKernel::Prepare() {
|
|||
return ReSize();
|
||||
}
|
||||
|
||||
int ResizeGradCPUKernel::Execute(int task_id) {
|
||||
int ResizeGradCPUKernel::DoExecute(int task_id) {
|
||||
auto in_addr = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
|
||||
auto out_addr = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
|
||||
auto param = reinterpret_cast<ResizeGradParameter *>(op_parameter_);
|
||||
|
@ -89,7 +89,7 @@ int ResizeGradCPUKernel::Execute(int task_id) {
|
|||
int ResizeGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto resize_grad_kernel = reinterpret_cast<ResizeGradCPUKernel *>(cdata);
|
||||
CHECK_NULL_RETURN(resize_grad_kernel);
|
||||
auto error_code = resize_grad_kernel->Execute(task_id);
|
||||
auto error_code = resize_grad_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "resize grad error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class ResizeGradCPUKernel : public InnerKernel {
|
|||
int ReSize() override;
|
||||
int Run() override;
|
||||
int ExecuteInit(int task_id);
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ int DoSgdInit(float *weight, float *accumulate, float *gradient, float *stat, fl
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int SgdCPUKernel::Execute(int task_id) {
|
||||
int SgdCPUKernel::DoExecute(int task_id) {
|
||||
auto weight = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
|
||||
CHECK_NULL_RETURN(weight);
|
||||
auto accumulate = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData());
|
||||
|
@ -127,7 +127,7 @@ int SgdRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
|||
} else if (sgd_kernel->get_optimizer_mode() == WeightUpdateMode::ACCUMULATE_GRADS) {
|
||||
error_code = sgd_kernel->ExecuteVirtualBatch(task_id);
|
||||
} else {
|
||||
error_code = sgd_kernel->Execute(task_id);
|
||||
error_code = sgd_kernel->DoExecute(task_id);
|
||||
}
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "SGD run error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
|
|
|
@ -39,7 +39,7 @@ class SgdCPUKernel : public OptimizerKernel {
|
|||
int ReSize() override;
|
||||
int Run() override;
|
||||
int ExecuteInit(int task_id);
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
int OptimizerStep() override;
|
||||
std::vector<int> GetOptimizerParamsIdxs() const override;
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ int SigmoidCrossEntropyWithLogitsCPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int SigmoidCrossEntropyWithLogitsCPUKernel::Execute(int task_id) {
|
||||
int SigmoidCrossEntropyWithLogitsCPUKernel::DoExecute(int task_id) {
|
||||
auto logits = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
|
||||
CHECK_NULL_RETURN(logits);
|
||||
auto labels = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
|
||||
|
@ -61,7 +61,7 @@ int SigmoidCrossEntropyWithLogitsCPUKernel::Execute(int task_id) {
|
|||
int SigmoidCrossEntropyWithLogitsRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto sig_crs_ent_kernel = reinterpret_cast<SigmoidCrossEntropyWithLogitsCPUKernel *>(cdata);
|
||||
auto error_code = sig_crs_ent_kernel->Execute(task_id);
|
||||
auto error_code = sig_crs_ent_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "SigmoidCrossEntropyWithLogits error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -31,7 +31,7 @@ class SigmoidCrossEntropyWithLogitsCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ int SigmoidCrossEntropyWithLogitsGradCPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int SigmoidCrossEntropyWithLogitsGradCPUKernel::Execute(int task_id) {
|
||||
int SigmoidCrossEntropyWithLogitsGradCPUKernel::DoExecute(int task_id) {
|
||||
auto logits = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
|
||||
CHECK_NULL_RETURN(logits);
|
||||
auto labels = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
|
||||
|
@ -63,7 +63,7 @@ int SigmoidCrossEntropyWithLogitsGradRun(void *cdata, int task_id, float lhs_sca
|
|||
CHECK_NULL_RETURN(cdata);
|
||||
auto sig_crs_ent_kernel = reinterpret_cast<SigmoidCrossEntropyWithLogitsGradCPUKernel *>(cdata);
|
||||
CHECK_NULL_RETURN(sig_crs_ent_kernel);
|
||||
auto error_code = sig_crs_ent_kernel->Execute(task_id);
|
||||
auto error_code = sig_crs_ent_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "SigmoidCrossEntropyWithLogitsGrad error task_id[" << task_id << "] error_code[" << error_code
|
||||
<< "]";
|
||||
|
|
|
@ -31,7 +31,7 @@ class SigmoidCrossEntropyWithLogitsGradCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ constexpr static int kOutputIdx = 0;
|
|||
|
||||
int SmoothL1LossCPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int SmoothL1LossCPUKernel::Execute(size_t task_id) {
|
||||
int SmoothL1LossCPUKernel::DoExecute(size_t task_id) {
|
||||
SmoothL1LossParameter *smooth_l1_loss_param = reinterpret_cast<SmoothL1LossParameter *>(op_parameter_);
|
||||
CHECK_NULL_RETURN(smooth_l1_loss_param);
|
||||
auto predict = reinterpret_cast<float *>(in_tensors_.at(kPredictIdx)->MutableData());
|
||||
|
@ -69,7 +69,7 @@ int SmoothL1LossCPUKernel::Execute(size_t task_id) {
|
|||
int SmoothL1LossRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto smooth_l1_loss_kernel = reinterpret_cast<SmoothL1LossCPUKernel *>(cdata);
|
||||
CHECK_NULL_RETURN(smooth_l1_loss_kernel);
|
||||
auto error_code = smooth_l1_loss_kernel->Execute(task_id);
|
||||
auto error_code = smooth_l1_loss_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "SmoothL1Loss error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -33,7 +33,7 @@ class SmoothL1LossCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(size_t task_id);
|
||||
int DoExecute(size_t task_id);
|
||||
|
||||
private:
|
||||
SmoothL1LossParameter *smooth_l1_param_;
|
||||
|
|
|
@ -41,7 +41,7 @@ int SmoothL1LossGradCPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int SmoothL1LossGradCPUKernel::Execute(int task_id) {
|
||||
int SmoothL1LossGradCPUKernel::DoExecute(int task_id) {
|
||||
SmoothL1LossParameter *smooth_l1_loss_param = reinterpret_cast<SmoothL1LossParameter *>(op_parameter_);
|
||||
auto predict = reinterpret_cast<float *>(in_tensors_.at(kPredictIdx)->MutableData());
|
||||
CHECK_NULL_RETURN(predict);
|
||||
|
@ -78,7 +78,7 @@ int SmoothL1LossGradCPUKernel::Execute(int task_id) {
|
|||
int SmoothL1LossGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto smooth_l1_loss_kernel = reinterpret_cast<SmoothL1LossGradCPUKernel *>(cdata);
|
||||
CHECK_NULL_RETURN(smooth_l1_loss_kernel);
|
||||
auto error_code = smooth_l1_loss_kernel->Execute(task_id);
|
||||
auto error_code = smooth_l1_loss_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "SmoothL1LossGrad error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -33,7 +33,7 @@ class SmoothL1LossGradCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
SmoothL1LossParameter *smooth_l1_param_;
|
||||
|
|
|
@ -57,7 +57,7 @@ void SoftmaxCrossEntropyWithLogitsCPUKernel::ForwardPostExecute(const float *lab
|
|||
}
|
||||
}
|
||||
|
||||
int SoftmaxCrossEntropyWithLogitsCPUKernel::Execute(int task_id) {
|
||||
int SoftmaxCrossEntropyWithLogitsCPUKernel::DoExecute(int task_id) {
|
||||
auto ins = reinterpret_cast<float *>(in_tensors_.at(0)->data());
|
||||
CHECK_NULL_RETURN(ins);
|
||||
auto labels = reinterpret_cast<float *>(in_tensors_.at(1)->data());
|
||||
|
@ -82,7 +82,7 @@ int SoftmaxCrossEntropyWithLogitsCPUKernel::Execute(int task_id) {
|
|||
int SoftmaxCrossEntropyWithLogitsRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto softmax_kernel = reinterpret_cast<SoftmaxCrossEntropyWithLogitsCPUKernel *>(cdata);
|
||||
auto error_code = softmax_kernel->Execute(task_id);
|
||||
auto error_code = softmax_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "SoftmaxCrossEntropy error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -40,7 +40,7 @@ class SoftmaxCrossEntropyWithLogitsCPUKernel : public LossKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
SoftmaxCrossEntropyParameter *param_;
|
||||
|
|
|
@ -57,7 +57,7 @@ int SoftmaxGradCPUKernel::Prepare() {
|
|||
|
||||
int SoftmaxGradCPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int SoftmaxGradCPUKernel::Execute(int task_id) {
|
||||
int SoftmaxGradCPUKernel::DoExecute(int task_id) {
|
||||
auto input_ptr = reinterpret_cast<float *>(in_tensors_.at(kInputIndex)->MutableData());
|
||||
auto yt_ptr = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
|
||||
auto output_ptr = reinterpret_cast<float *>(out_tensors_.at(kOutputIndex)->MutableData());
|
||||
|
@ -70,7 +70,7 @@ int SoftmaxGradCPUKernel::Execute(int task_id) {
|
|||
|
||||
int SoftmaxGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
auto softmax_kernel = reinterpret_cast<SoftmaxGradCPUKernel *>(cdata);
|
||||
auto error_code = softmax_kernel->Execute(task_id);
|
||||
auto error_code = softmax_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "softmax_kernel SoftmaxGradRun task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -33,7 +33,7 @@ class SoftmaxGradCPUKernel : public InnerKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
SoftmaxParameter *param;
|
||||
|
|
|
@ -79,7 +79,7 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::GradPostExecute(const int *lab
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Execute(int task_id) {
|
||||
int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::DoExecute(int task_id) {
|
||||
auto sce_param = reinterpret_cast<SoftmaxCrossEntropyParameter *>(op_parameter_);
|
||||
auto ins = reinterpret_cast<float *>(in_tensors_.at(0)->data());
|
||||
CHECK_NULL_RETURN(ins);
|
||||
|
@ -118,7 +118,7 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Execute(int task_id) {
|
|||
int SparseSoftmaxCrossEntropyWithLogitsRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto sparse_kernel = reinterpret_cast<SparseSoftmaxCrossEntropyWithLogitsCPUKernel *>(cdata);
|
||||
auto error_code = sparse_kernel->Execute(task_id);
|
||||
auto error_code = sparse_kernel->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "SparseSoftmaxCrossEntropyWithLogitsRun error task_id[" << task_id << "] error_code[" << error_code
|
||||
<< "]";
|
||||
|
|
|
@ -47,7 +47,7 @@ class SparseSoftmaxCrossEntropyWithLogitsCPUKernel : public LossKernel {
|
|||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int Execute(int task_id);
|
||||
int DoExecute(int task_id);
|
||||
|
||||
private:
|
||||
SoftmaxCrossEntropyParameter *param;
|
||||
|
|
|
@ -117,7 +117,7 @@ int StridedSliceGradCPUKernel::ReSize() {
|
|||
int StridedSliceGradImpl(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto slice = reinterpret_cast<StridedSliceGradCPUKernel *>(cdata);
|
||||
auto error_code = slice->Execute(task_id);
|
||||
auto error_code = slice->DoExecute(task_id);
|
||||
if (error_code != RET_OK) {
|
||||
MS_LOG(ERROR) << "StridedSliceGrad Run error task_id[" << task_id << "] error_code[" << error_code << "]";
|
||||
return RET_ERROR;
|
||||
|
@ -134,7 +134,7 @@ int StridedSliceGradCPUKernel::Run() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int StridedSliceGradCPUKernel::Execute(int task_id) {
|
||||
int StridedSliceGradCPUKernel::DoExecute(int task_id) {
|
||||
auto input = in_tensors_.at(0);
|
||||
auto output = out_tensors_.at(0);
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue