!7526 [MSLITE][Develop] optimize conv dw arm cpu int8 op: add 3x3
Merge pull request !7526 from yangruoqi713/conv_dw
This commit is contained in:
commit
7e8dad791a
|
@ -78,9 +78,9 @@ void ConvDwInt8Post(int8_t *dst, int32_t *buffer, int output_w, int channel, int
|
|||
|
||||
void ConvDwInt8(int8_t *output_data, int32_t *row_buffer, const int8_t *input_data, const int16_t *weight_data,
|
||||
const int32_t *bias_data, const ConvParameter *conv_param, int task_id) {
|
||||
int h_step = UP_DIV(conv_param->output_h_, conv_param->thread_num_);
|
||||
int h_start = h_step * task_id;
|
||||
int h_end = MSMIN(h_start + h_step, conv_param->output_h_);
|
||||
int step_h = UP_DIV(conv_param->output_h_, conv_param->thread_num_);
|
||||
int start_h = step_h * task_id;
|
||||
int end_h = MSMIN(start_h + step_h, conv_param->output_h_);
|
||||
|
||||
bool filter_per_channel = conv_param->conv_quant_arg_.per_channel_ & FILTER_PER_CHANNEL;
|
||||
int *out_multiplier = conv_param->conv_quant_arg_.quant_multiplier_;
|
||||
|
@ -95,7 +95,7 @@ void ConvDwInt8(int8_t *output_data, int32_t *row_buffer, const int8_t *input_da
|
|||
for (int b = 0; b < conv_param->output_batch_; b++) {
|
||||
const int8_t *src = input_data + b * conv_param->input_h_ * conv_param->input_w_ * conv_param->input_channel_;
|
||||
int8_t *dst = output_data + b * conv_param->output_h_ * conv_param->output_w_ * conv_param->output_channel_;
|
||||
for (int oh = h_start; oh < h_end; oh++) {
|
||||
for (int oh = start_h; oh < end_h; oh++) {
|
||||
int8_t *dst_data = dst + oh * conv_param->output_w_ * conv_param->output_channel_;
|
||||
|
||||
int ih_origin = oh * conv_param->stride_h_ - conv_param->pad_u_;
|
||||
|
@ -138,6 +138,253 @@ void ConvDwInt8(int8_t *output_data, int32_t *row_buffer, const int8_t *input_da
|
|||
}
|
||||
/*conv depthwise int8 end*/
|
||||
|
||||
/*conv depthwise 3x3 int8 begin*/
|
||||
bool CheckIfUse3X3(const ConvParameter *conv_param, int channel) {
|
||||
bool use_3x3 = conv_param->kernel_h_ == 3 && conv_param->kernel_w_ == 3 && conv_param->stride_h_ == 1 &&
|
||||
conv_param->stride_w_ == 1 && conv_param->dilation_h_ == 1 && conv_param->dilation_w_ == 1 &&
|
||||
(channel % 8 == 0);
|
||||
return use_3x3;
|
||||
}
|
||||
|
||||
void InitInputBuffer(int8_t *buffer, const int8_t *input, const ConvParameter *conv_param, int block_input_h,
|
||||
int block_input_w) {
|
||||
for (int h = 0; h < block_input_h; h++) {
|
||||
const int8_t *src = input;
|
||||
for (int w = 0; w < block_input_w; w++) {
|
||||
memcpy(buffer, src, 64);
|
||||
src += conv_param->input_channel_;
|
||||
buffer += 64;
|
||||
}
|
||||
input += conv_param->input_w_ * conv_param->input_channel_;
|
||||
}
|
||||
}
|
||||
|
||||
// stride 1
|
||||
void ConvDw3x3Int8Window(int8_t *output, const int8_t *buffer, const int16_t *weight, const int32_t *bias, int col_size,
|
||||
int row_size, int channel, int output_h, int output_w, int8_t in_zp, int32_t out_zp,
|
||||
int out_multiplier, int left_shift, int right_shift, int32_t acc_min, int32_t acc_max) {
|
||||
for (int w = 0; w < output_w; w++) {
|
||||
int tmp_buffer[C8NUM];
|
||||
for (int i = 0; i < C8NUM; i++) {
|
||||
tmp_buffer[i] = 0;
|
||||
}
|
||||
int8_t *output_tmp = output;
|
||||
const int8_t *src_kh = buffer;
|
||||
const int16_t *weight_kh = weight;
|
||||
for (int kh = 0; kh < 3; kh++) {
|
||||
const int8_t *src_kw = src_kh;
|
||||
const int16_t *weight_kw = weight_kh;
|
||||
for (int kw = 0; kw < 3; kw++) {
|
||||
for (int c = 0; c < 8; c++) {
|
||||
tmp_buffer[c] += (src_kw[c] - in_zp) * weight_kw[c];
|
||||
}
|
||||
src_kw += col_size;
|
||||
weight_kw += channel;
|
||||
}
|
||||
src_kh += row_size;
|
||||
weight_kh += 3 * channel;
|
||||
}
|
||||
for (int c = 0; c < C8NUM; c++) {
|
||||
tmp_buffer[c] += bias[c];
|
||||
tmp_buffer[c] = RoundingDivideByPOT(
|
||||
SaturatingRoundingDoublingHighMul(tmp_buffer[c] * (1 << (unsigned int)left_shift), out_multiplier),
|
||||
-right_shift);
|
||||
tmp_buffer[c] += out_zp;
|
||||
tmp_buffer[c] = MSMAX(tmp_buffer[c], acc_min);
|
||||
tmp_buffer[c] = MSMIN(tmp_buffer[c], acc_max);
|
||||
*output_tmp++ = (tmp_buffer[c]);
|
||||
}
|
||||
output += channel;
|
||||
buffer += col_size;
|
||||
}
|
||||
}
|
||||
|
||||
void ConvDw3x3Int8Block(int8_t *output, const int8_t *buffer, const int16_t *weight, const int32_t *bias, int start_c,
|
||||
int end_c, int col_size, int row_size, int channel, int output_h, int output_w, int8_t in_zp,
|
||||
int32_t out_zp, int out_multiplier, int left_shift, int right_shift, int32_t acc_min,
|
||||
int32_t acc_max) {
|
||||
for (; start_c <= end_c - 8; start_c += 8) {
|
||||
ConvDw3x3Int8Window(output, buffer, weight, bias, col_size, row_size, channel, output_h, output_w, in_zp, out_zp,
|
||||
out_multiplier, left_shift, right_shift, acc_min, acc_max);
|
||||
output += 8;
|
||||
buffer += 8;
|
||||
weight += 8;
|
||||
bias += 8;
|
||||
}
|
||||
}
|
||||
|
||||
void ConvDw3x3Int8Row(int8_t *output, int8_t *buffer, const int8_t *input, const int16_t *weight, const int32_t *bias,
|
||||
const ConvParameter *conv_param, int start_w, int end_w, int block_output_h, int block_output_w,
|
||||
int block_input_h, int block_input_w) {
|
||||
int out_multiplier = conv_param->conv_quant_arg_.quant_multiplier_[0];
|
||||
int left_shift = conv_param->conv_quant_arg_.left_shift_[0];
|
||||
int right_shift = conv_param->conv_quant_arg_.right_shift_[0];
|
||||
int in_zp = conv_param->conv_quant_arg_.input_quant_args_[0].zp_;
|
||||
int out_zp = conv_param->conv_quant_arg_.output_quant_args_[0].zp_;
|
||||
int acc_min = conv_param->conv_quant_arg_.out_act_min_[0];
|
||||
int acc_max = conv_param->conv_quant_arg_.out_act_max_[0];
|
||||
|
||||
int ih_offset = 64 * block_input_w;
|
||||
int w = start_w;
|
||||
for (; w <= end_w - block_output_w; w += block_output_w) {
|
||||
int8_t *output_ptr = output;
|
||||
const int8_t *input_ptr = input;
|
||||
const int16_t *weight_ptr = weight;
|
||||
const int32_t *bias_ptr = bias;
|
||||
int c = 0;
|
||||
for (; c <= conv_param->output_channel_ - 64; c += 64) {
|
||||
InitInputBuffer(buffer, input_ptr, conv_param, block_input_h, block_input_w);
|
||||
ConvDw3x3Int8Block(output_ptr, buffer, weight_ptr, bias_ptr, 0, 64, 64, ih_offset, conv_param->input_channel_,
|
||||
block_output_h, block_output_w, in_zp, out_zp, out_multiplier, left_shift, right_shift,
|
||||
acc_min, acc_max);
|
||||
output_ptr += 64;
|
||||
input_ptr += 64;
|
||||
weight_ptr += 64;
|
||||
bias_ptr += 64;
|
||||
}
|
||||
// left channel
|
||||
ConvDw3x3Int8Block(output_ptr, input_ptr, weight_ptr, bias_ptr, c, conv_param->input_channel_,
|
||||
conv_param->input_channel_, conv_param->input_w_ * conv_param->input_channel_,
|
||||
conv_param->input_channel_, block_output_h, block_output_w, in_zp, out_zp, out_multiplier,
|
||||
left_shift, right_shift, acc_min, acc_max);
|
||||
output += block_output_w * conv_param->input_channel_;
|
||||
input += conv_param->stride_w_ * block_output_w * conv_param->input_channel_;
|
||||
}
|
||||
// left width
|
||||
int left_width = end_w - w;
|
||||
if (left_width > 0) {
|
||||
ConvDw3x3Int8Block(output, input, weight, bias, 0, conv_param->input_channel_, conv_param->input_channel_,
|
||||
conv_param->input_w_ * conv_param->input_channel_, conv_param->input_channel_, block_output_h,
|
||||
left_width, in_zp, out_zp, out_multiplier, left_shift, right_shift, acc_min, acc_max);
|
||||
}
|
||||
}
|
||||
|
||||
void ConvDw3x3Int8(int8_t *output_data, int8_t *buffer, const int8_t *input_data, const int16_t *weight_data,
|
||||
const int32_t *bias_data, const ConvParameter *conv_param, int task_id) {
|
||||
int step_oh = UP_DIV(conv_param->output_h_, conv_param->thread_num_);
|
||||
int start_oh = step_oh * task_id;
|
||||
int end_oh = MSMIN(start_oh + step_oh, conv_param->output_h_);
|
||||
int start_ow = MSMAX(0, conv_param->pad_l_);
|
||||
int end_ow = conv_param->output_w_ - conv_param->pad_l_;
|
||||
start_oh = MSMAX(start_oh, conv_param->pad_u_);
|
||||
end_oh = MSMIN(conv_param->output_h_ - conv_param->pad_u_, end_oh);
|
||||
|
||||
int block_output_h = 1;
|
||||
int block_output_w = conv_param->stride_w_ == 1 ? 30 : 14;
|
||||
int block_input_h = 3;
|
||||
int block_input_w = conv_param->stride_w_ * (block_output_w - 1) + 3;
|
||||
|
||||
for (int b = 0; b < conv_param->output_batch_; b++) {
|
||||
int start_ih = start_oh * conv_param->stride_h_ - conv_param->pad_u_;
|
||||
int start_iw = start_ow * conv_param->stride_w_ - conv_param->pad_l_;
|
||||
const int8_t *src = input_data + b * conv_param->input_h_ * conv_param->input_w_ * conv_param->input_channel_ +
|
||||
start_ih * conv_param->input_w_ * conv_param->input_channel_ +
|
||||
start_iw * conv_param->input_channel_;
|
||||
int8_t *dst = output_data + b * conv_param->output_h_ * conv_param->output_w_ * conv_param->output_channel_ +
|
||||
start_oh * conv_param->output_w_ * conv_param->output_channel_ +
|
||||
start_ow * conv_param->output_channel_;
|
||||
|
||||
for (int oh = start_oh; oh < end_oh; oh++) {
|
||||
ConvDw3x3Int8Row(dst, buffer, src, weight_data, bias_data, conv_param, start_ow, end_ow, block_output_h,
|
||||
block_output_w, block_input_h, block_input_w);
|
||||
src += conv_param->stride_h_ * conv_param->input_w_ * conv_param->input_channel_;
|
||||
dst += conv_param->output_w_ * conv_param->output_channel_;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ConvDw3x3BorderPixelInt8(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, int height,
|
||||
int width, int in_kh_step, int in_kw_step, int channel, int8_t in_zp, int32_t out_zp,
|
||||
int out_multiplier, int left_shift, int right_shift, int32_t acc_min, int32_t acc_max) {
|
||||
for (int c = 0; c < channel; c += 8) {
|
||||
int tmp_buffer[8];
|
||||
for (int i = 0; i < 8; i++) {
|
||||
tmp_buffer[i] = 0;
|
||||
}
|
||||
const int8_t *src_kh = src;
|
||||
const int16_t *weight_kh = weight;
|
||||
for (int kh = 0; kh < height; kh++) {
|
||||
const int8_t *src_kw = src_kh;
|
||||
const int16_t *weight_kw = weight_kh;
|
||||
for (int kw = 0; kw < width; kw++) {
|
||||
for (int i = 0; i < 8; i++) {
|
||||
tmp_buffer[i] += (src_kw[c + i] - in_zp) * weight_kw[c + i];
|
||||
}
|
||||
src_kw += in_kw_step;
|
||||
weight_kw += channel;
|
||||
} // kernel_w loop
|
||||
src_kh += in_kh_step;
|
||||
weight_kh += 3 * channel;
|
||||
} // kernel_h loop
|
||||
|
||||
for (int i = 0; i < 8; i++) {
|
||||
tmp_buffer[i] += bias[c + i];
|
||||
tmp_buffer[i] = RoundingDivideByPOT(
|
||||
SaturatingRoundingDoublingHighMul(tmp_buffer[i] * (1 << (unsigned int)left_shift), out_multiplier),
|
||||
-right_shift);
|
||||
tmp_buffer[i] += out_zp;
|
||||
tmp_buffer[i] = MSMAX(tmp_buffer[i], acc_min);
|
||||
tmp_buffer[i] = MSMIN(tmp_buffer[i], acc_max);
|
||||
dst[c + i] = (tmp_buffer[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ConvDw3x3BorderInt8(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, int top,
|
||||
int bottom, int left, int right, const ConvParameter *conv_param,
|
||||
const SlidingWindowParam *sliding, int8_t in_zp, int32_t out_zp, int out_multiplier,
|
||||
int left_shift, int right_shift, int32_t acc_min, int32_t acc_max) {
|
||||
int8_t *dst_h = dst + top * sliding->out_h_step_;
|
||||
for (int oh = top; oh < bottom; oh++) {
|
||||
int ih = oh * conv_param->stride_h_ - conv_param->pad_u_;
|
||||
int start_kh = MSMAX(0, UP_DIV(-ih, conv_param->dilation_h_));
|
||||
int end_kh = MSMIN(conv_param->kernel_h_, UP_DIV(conv_param->input_h_ - ih, conv_param->dilation_h_));
|
||||
const int8_t *src_h = src + ih * sliding->in_h_step_;
|
||||
|
||||
int8_t *dst_kernel = dst_h + left * sliding->block_channel_;
|
||||
for (int ow = left; ow < right; ow++) {
|
||||
int iw = ow * conv_param->stride_w_ - conv_param->pad_l_;
|
||||
int start_kw = MSMAX(0, UP_DIV(-iw, conv_param->dilation_w_));
|
||||
int end_kw = MSMIN(conv_param->kernel_w_, UP_DIV(conv_param->input_w_ - iw, conv_param->dilation_w_));
|
||||
const int8_t *src_w = src_h + iw * sliding->block_channel_;
|
||||
|
||||
const int8_t *src_kernel = src_w + start_kh * sliding->in_kh_step_ + start_kw * sliding->in_kw_step_;
|
||||
const int16_t *weight_kernel =
|
||||
weight + (start_kh * conv_param->kernel_w_ + start_kw) * conv_param->input_channel_;
|
||||
|
||||
ConvDw3x3BorderPixelInt8(dst_kernel, src_kernel, weight_kernel, bias, end_kh - start_kh, end_kw - start_kw,
|
||||
sliding->in_kh_step_, sliding->in_kw_step_, conv_param->input_channel_, in_zp, out_zp,
|
||||
out_multiplier, left_shift, right_shift, acc_min, acc_max);
|
||||
|
||||
dst_kernel += sliding->block_channel_;
|
||||
} // width loop
|
||||
dst_h += sliding->out_h_step_;
|
||||
} // height loop
|
||||
}
|
||||
|
||||
void ConvDw3x3PadInt8(int8_t *output_data, const int8_t *input_data, const int16_t *weight_data,
|
||||
const int32_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding) {
|
||||
int out_multiplier = conv_param->conv_quant_arg_.quant_multiplier_[0];
|
||||
int left_shift = conv_param->conv_quant_arg_.left_shift_[0];
|
||||
int right_shift = conv_param->conv_quant_arg_.right_shift_[0];
|
||||
int in_zp = conv_param->conv_quant_arg_.input_quant_args_[0].zp_;
|
||||
int out_zp = conv_param->conv_quant_arg_.output_quant_args_[0].zp_;
|
||||
int acc_min = conv_param->conv_quant_arg_.out_act_min_[0];
|
||||
int acc_max = conv_param->conv_quant_arg_.out_act_max_[0];
|
||||
ConvDw3x3BorderInt8(output_data, input_data, weight_data, bias_data, 0, sliding->top_, 0, conv_param->output_w_,
|
||||
conv_param, sliding, in_zp, out_zp, out_multiplier, left_shift, right_shift, acc_min, acc_max);
|
||||
ConvDw3x3BorderInt8(output_data, input_data, weight_data, bias_data, sliding->bottom_, conv_param->output_h_, 0,
|
||||
conv_param->output_w_, conv_param, sliding, in_zp, out_zp, out_multiplier, left_shift,
|
||||
right_shift, acc_min, acc_max);
|
||||
ConvDw3x3BorderInt8(output_data, input_data, weight_data, bias_data, sliding->top_, sliding->bottom_, 0,
|
||||
sliding->left_, conv_param, sliding, in_zp, out_zp, out_multiplier, left_shift, right_shift,
|
||||
acc_min, acc_max);
|
||||
ConvDw3x3BorderInt8(output_data, input_data, weight_data, bias_data, sliding->top_, sliding->bottom_, sliding->right_,
|
||||
conv_param->output_w_, conv_param, sliding, in_zp, out_zp, out_multiplier, left_shift,
|
||||
right_shift, acc_min, acc_max);
|
||||
}
|
||||
/*conv depthwise 3x3 int8 end*/
|
||||
|
||||
/*conv depthwise sliding window perchannel int8 begin*/
|
||||
void DepthwiseBorderPixelInt8(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, int height,
|
||||
int width, int in_kh_step, int in_kw_step, int kernel_w, int8_t *input_zp,
|
||||
|
|
|
@ -24,9 +24,17 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
bool CheckIfUse3X3(const ConvParameter *conv_param, int channel);
|
||||
|
||||
void ConvDwInt8(int8_t *output_data, int32_t *output_row, const int8_t *input_data, const int16_t *weight_data,
|
||||
const int32_t *bias_data, const ConvParameter *conv_param, int task_id);
|
||||
|
||||
void ConvDw3x3PadInt8(int8_t *output_data, const int8_t *input_data, const int16_t *weight_data,
|
||||
const int32_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding);
|
||||
|
||||
void ConvDw3x3Int8(int8_t *output_data, int8_t *buffer, const int8_t *input_data, const int16_t *weight_data,
|
||||
const int32_t *bias_data, const ConvParameter *conv_param, int task_id);
|
||||
|
||||
void ConvDwSWInt8(int8_t *output_data, const int8_t *input_data, const int16_t *weight_data, const int32_t *bias_data,
|
||||
int8_t *input_zp, int32_t *output_zp, const ConvParameter *conv_param,
|
||||
const SlidingWindowParam *sliding, int task_id);
|
||||
|
|
|
@ -733,6 +733,25 @@ void PackNHWC8ToNHWCInt8(const void *src, void *dst, int batch, int plane, int c
|
|||
}
|
||||
}
|
||||
|
||||
void PackNCHWToNC8HW8Int8(const void *src, void *dst, int batch, int plane, int channel) {
|
||||
int c8 = UP_DIV(channel, C8NUM);
|
||||
for (int b = 0; b < batch; b++) {
|
||||
int src_offset = b * plane * channel;
|
||||
int dst_offset = b * plane * c8 * C8NUM;
|
||||
for (int c = 0; c < channel; c++) {
|
||||
int c8_block_num = c / C8NUM;
|
||||
int c8_block_rem = c % C8NUM;
|
||||
int src_c_offset = src_offset + c * plane;
|
||||
int dst_c_offset = dst_offset + c8_block_num * plane * C8NUM;
|
||||
for (int k = 0; k < plane; k++) {
|
||||
int src_kernel_offset = src_c_offset + k;
|
||||
int dst_kernel_offset = dst_c_offset + C8NUM * k + c8_block_rem;
|
||||
((int8_t *)dst + dst_kernel_offset)[0] = ((int8_t *)src + src_kernel_offset)[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PackNC4HW4ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel) {
|
||||
int c4 = UP_DIV(channel, C4NUM);
|
||||
for (int b = 0; b < batch; b++) {
|
||||
|
|
|
@ -83,6 +83,8 @@ void PackNHWCToNHWC8Int8(const void *src, void *dst, int batch, int plane, int c
|
|||
|
||||
void PackNHWC8ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel);
|
||||
|
||||
void PackNCHWToNC8HW8Int8(const void *src, void *dst, int batch, int plane, int channel);
|
||||
|
||||
void PackNC4HW4ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel);
|
||||
|
||||
void PackNHWCToC8HWN8Int8(const void *src, void *dst, int batch, int plane, int channel);
|
||||
|
|
|
@ -110,16 +110,11 @@ static int ConvDwFp16Run(void *cdata, int task_id) {
|
|||
}
|
||||
|
||||
int ConvolutionDepthwiseFp16CPUKernel::Run() {
|
||||
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
|
||||
MS_LOG(ERROR) << "Only support input channel equals output channel.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto ret = Prepare();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Prepare failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
ret = ConvolutionBaseFP16CPUKernel::GetExecuteTensor();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Get Execute tensor failed.";
|
||||
|
|
|
@ -140,11 +140,6 @@ static int ConvDwSWFp16Run(void *cdata, int task_id) {
|
|||
}
|
||||
|
||||
int ConvolutionDepthwiseSWFp16CPUKernel::Run() {
|
||||
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
|
||||
MS_LOG(ERROR) << "Only support input channel equals output channel.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
auto ret = Prepare();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Prepare failed.";
|
||||
|
|
|
@ -101,10 +101,6 @@ int ConvDwRun(void *cdata, int task_id) {
|
|||
}
|
||||
|
||||
int ConvolutionDepthwiseCPUKernel::Run() {
|
||||
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
|
||||
MS_LOG(ERROR) << "Only support input channel equals output channel.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto ret = Prepare();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Prepare failed.";
|
||||
|
|
|
@ -134,17 +134,11 @@ int ConvDwSWRun(void *cdata, int task_id) {
|
|||
}
|
||||
|
||||
int ConvolutionDepthwiseSWCPUKernel::Run() {
|
||||
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
|
||||
MS_LOG(ERROR) << "Only support input channel equals output channel.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
auto ret = Prepare();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Prepare failed.";
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = InitBuffer();
|
||||
if (ret != 0) {
|
||||
MS_LOG(ERROR) << "Convolution depthwise fp32 InitBuffer failed.";
|
||||
|
|
|
@ -0,0 +1,183 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.h"
|
||||
#include "schema/model_generated.h"
|
||||
#include "src/kernel_registry.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "nnacl/int8/conv_depthwise_int8.h"
|
||||
#include "src/runtime/runtime_api.h"
|
||||
|
||||
using mindspore::kernel::KERNEL_ARCH::kCPU;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::schema::PrimitiveType_DepthwiseConv2D;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
ConvolutionDepthwise3x3Int8CPUKernel::~ConvolutionDepthwise3x3Int8CPUKernel() {
|
||||
if (sliding_ != nullptr) {
|
||||
delete sliding_;
|
||||
sliding_ = nullptr;
|
||||
}
|
||||
if (packed_weight_ != nullptr) {
|
||||
free(packed_weight_);
|
||||
packed_weight_ = nullptr;
|
||||
}
|
||||
FreeQuantParam();
|
||||
}
|
||||
|
||||
int ConvolutionDepthwise3x3Int8CPUKernel::InitWeightBias() {
|
||||
// init weight, int8 -> int16
|
||||
auto weight_tensor = in_tensors_[kWeightIndex];
|
||||
auto origin_weight = reinterpret_cast<int8_t *>(weight_tensor->MutableData());
|
||||
int channel = weight_tensor->Batch();
|
||||
if (channel % 8 != 0) {
|
||||
MS_LOG(ERROR) << "ConvolutionDepthwise3x3Int8CPUKernel dosen't support channel " << channel;
|
||||
return RET_ERROR;
|
||||
}
|
||||
int pack_weight_size = channel * weight_tensor->Height() * weight_tensor->Width();
|
||||
auto tmp_weight = reinterpret_cast<int8_t *>(malloc(pack_weight_size * sizeof(int8_t)));
|
||||
if (tmp_weight == nullptr) {
|
||||
MS_LOG(ERROR) << "Malloc buffer failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
PackNCHWToNHWCInt8(origin_weight, tmp_weight, 1, weight_tensor->Height() * weight_tensor->Width(),
|
||||
weight_tensor->Batch());
|
||||
|
||||
packed_weight_ = reinterpret_cast<int16_t *>(malloc(pack_weight_size * sizeof(int16_t)));
|
||||
if (packed_weight_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Malloc buffer failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
bool filter_per_channel = conv_param_->conv_quant_arg_.per_channel_ & FILTER_PER_CHANNEL;
|
||||
if (filter_per_channel) {
|
||||
for (int i = 0; i < weight_tensor->Height() * weight_tensor->Width(); i++) {
|
||||
for (int c = 0; c < channel; c++) {
|
||||
int weight_zp = conv_param_->conv_quant_arg_.filter_quant_args_[c].zp_;
|
||||
packed_weight_[i * channel + c] = (int16_t)(tmp_weight[i * channel + c] - weight_zp);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
int weight_zp = conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_;
|
||||
for (int i = 0; i < weight_tensor->ElementsNum(); i++) {
|
||||
packed_weight_[i] = (int16_t)(tmp_weight[i] - weight_zp);
|
||||
}
|
||||
}
|
||||
free(tmp_weight);
|
||||
|
||||
bias_data_ = reinterpret_cast<int32_t *>(malloc(channel * sizeof(int32_t)));
|
||||
if (bias_data_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Malloc buffer failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
memset(bias_data_, 0, channel * sizeof(int32_t));
|
||||
if (in_tensors_.size() == kInputSize2) {
|
||||
auto bias_tensor = in_tensors_.at(kBiasIndex);
|
||||
auto ori_bias = reinterpret_cast<int32_t *>(bias_tensor->MutableData());
|
||||
memcpy(bias_data_, ori_bias, bias_tensor->ElementsNum() * sizeof(int32_t));
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwise3x3Int8CPUKernel::Init() {
|
||||
sliding_ = new (std::nothrow) SlidingWindowParam;
|
||||
if (sliding_ == nullptr) {
|
||||
MS_LOG(ERROR) << "new sliding window param.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (!InferShapeDone()) {
|
||||
return RET_OK;
|
||||
}
|
||||
return ReSize();
|
||||
}
|
||||
|
||||
int ConvolutionDepthwise3x3Int8CPUKernel::ReSize() {
|
||||
ConvolutionBaseCPUKernel::Init();
|
||||
InitSlidingParamConvDw(sliding_, conv_param_, conv_param_->input_channel_);
|
||||
auto ret = ConvolutionBaseCPUKernel::SetQuantParam();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set quant param failed.";
|
||||
return ret;
|
||||
}
|
||||
conv_param_->thread_num_ = MSMIN(thread_count_, conv_param_->output_h_);
|
||||
ret = InitWeightBias();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Depthwise int8 InitWeightBias error!";
|
||||
return ret;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwise3x3Int8CPUKernel::Execute(int task_id) {
|
||||
auto buffer = buffer_ + 64 * 10 * 10 * task_id;
|
||||
ConvDw3x3Int8(output_ptr_, buffer, input_ptr_, packed_weight_, reinterpret_cast<int32_t *>(bias_data_), conv_param_,
|
||||
task_id);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvDw3x3Int8Run(void *cdata, int task_id) {
|
||||
auto conv_dw_int8 = reinterpret_cast<ConvolutionDepthwise3x3Int8CPUKernel *>(cdata);
|
||||
auto ret = conv_dw_int8->Execute(task_id);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "ConvolutionDepthwise3x3Int8Run error task_id[" << task_id << "] error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwise3x3Int8CPUKernel::InitBuffer() {
|
||||
int buffer_size = 64 * 10 * 10 * conv_param_->thread_num_;
|
||||
buffer_ = reinterpret_cast<int8_t *>(context_->allocator->Malloc(buffer_size * sizeof(int8_t)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Malloc buffer failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwise3x3Int8CPUKernel::Run() {
|
||||
auto ret = Prepare();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Prepare failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
ret = InitBuffer();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Depthwise int8 ReSize error!";
|
||||
return ret;
|
||||
}
|
||||
|
||||
auto input_tensor = in_tensors_.at(kInputIndex);
|
||||
input_ptr_ = reinterpret_cast<int8_t *>(input_tensor->MutableData());
|
||||
|
||||
auto output_tensor = out_tensors_.at(kOutputIndex);
|
||||
output_ptr_ = reinterpret_cast<int8_t *>(output_tensor->MutableData());
|
||||
|
||||
if (conv_param_->pad_l_ == 1 && conv_param_->pad_u_ == 1) {
|
||||
ConvDw3x3PadInt8(output_ptr_, input_ptr_, packed_weight_, reinterpret_cast<int32_t *>(bias_data_), conv_param_,
|
||||
sliding_);
|
||||
}
|
||||
ret = ParallelLaunch(this->context_->thread_pool_, ConvDw3x3Int8Run, this, conv_param_->thread_num_);
|
||||
if (ret != RET_OK) {
|
||||
context_->allocator->Free(buffer_);
|
||||
MS_LOG(ERROR) << "ConvDwInt8Run error: error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
}
|
||||
context_->allocator->Free(buffer_);
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore::kernel
|
|
@ -0,0 +1,51 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_CONVOLUTION_DEPTHWISE_3X3_INT8_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_CONVOLUTION_DEPTHWISE_3X3_INT8_H_
|
||||
|
||||
#include <vector>
|
||||
#include "src/lite_kernel.h"
|
||||
#include "src/runtime/kernel/arm/base/convolution_base.h"
|
||||
#include "nnacl/fp32/conv_depthwise.h"
|
||||
|
||||
namespace mindspore::kernel {
|
||||
class ConvolutionDepthwise3x3Int8CPUKernel : public ConvolutionBaseCPUKernel {
|
||||
public:
|
||||
ConvolutionDepthwise3x3Int8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
|
||||
const mindspore::lite::PrimitiveC *primitive)
|
||||
: ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
|
||||
~ConvolutionDepthwise3x3Int8CPUKernel() override;
|
||||
|
||||
int Init() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
|
||||
int InitWeightBias();
|
||||
int Execute(int task_id);
|
||||
|
||||
private:
|
||||
int InitBuffer();
|
||||
SlidingWindowParam *sliding_ = nullptr;
|
||||
int16_t *packed_weight_ = nullptr;
|
||||
int8_t *input_ptr_ = nullptr;
|
||||
int8_t *output_ptr_ = nullptr;
|
||||
int8_t *buffer_ = nullptr;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_CONVOLUTION_DEPTHWISE_3X3_INT8_H_
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
|
||||
#include "src/runtime/kernel/arm/int8/convolution_depthwise_int8.h"
|
||||
#include "src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.h"
|
||||
#include "src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h"
|
||||
#include "schema/model_generated.h"
|
||||
#include "src/kernel_registry.h"
|
||||
|
@ -130,7 +131,7 @@ int ConvDwInt8Run(void *cdata, int task_id) {
|
|||
|
||||
int ConvolutionDepthwiseInt8CPUKernel::InitBuffer() {
|
||||
int output_row_size = conv_param_->thread_num_ * conv_param_->output_w_ * conv_param_->output_channel_;
|
||||
row_buffer_ = reinterpret_cast<int32_t *>(context_->allocator->Malloc(output_row_size * sizeof(float)));
|
||||
row_buffer_ = reinterpret_cast<int32_t *>(context_->allocator->Malloc(output_row_size * sizeof(int)));
|
||||
if (row_buffer_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Malloc buffer failed.";
|
||||
return RET_ERROR;
|
||||
|
@ -139,16 +140,11 @@ int ConvolutionDepthwiseInt8CPUKernel::InitBuffer() {
|
|||
}
|
||||
|
||||
int ConvolutionDepthwiseInt8CPUKernel::Run() {
|
||||
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
|
||||
MS_LOG(ERROR) << "Only support input channel equals output channel.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto ret = Prepare();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Prepare failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
ret = InitBuffer();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Depthwise int8 ReSize error!";
|
||||
|
@ -177,7 +173,6 @@ kernel::LiteKernel *CpuConvDwInt8KernelCreator(const std::vector<lite::Tensor *>
|
|||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
MS_ASSERT(opParameter != nullptr);
|
||||
MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D);
|
||||
|
||||
kernel::LiteKernel *kernel;
|
||||
auto act_quant_size =
|
||||
MSMAX(inputs[kInputIndex]->GetQuantParams().size(), outputs[kOutputIndex]->GetQuantParams().size());
|
||||
|
|
|
@ -29,9 +29,9 @@ using mindspore::schema::PrimitiveType_DepthwiseConv2D;
|
|||
|
||||
namespace mindspore::kernel {
|
||||
ConvolutionDepthwiseSWInt8CPUKernel::~ConvolutionDepthwiseSWInt8CPUKernel() {
|
||||
if (sliding != nullptr) {
|
||||
delete sliding;
|
||||
sliding = nullptr;
|
||||
if (sliding_ != nullptr) {
|
||||
delete sliding_;
|
||||
sliding_ = nullptr;
|
||||
}
|
||||
if (packed_weight_ != nullptr) {
|
||||
free(packed_weight_);
|
||||
|
@ -270,8 +270,8 @@ int ConvolutionDepthwiseSWInt8CPUKernel::ReinitQuantParam() {
|
|||
}
|
||||
|
||||
int ConvolutionDepthwiseSWInt8CPUKernel::Init() {
|
||||
sliding = new (std::nothrow) SlidingWindowParam;
|
||||
if (sliding == nullptr) {
|
||||
sliding_ = new (std::nothrow) SlidingWindowParam;
|
||||
if (sliding_ == nullptr) {
|
||||
MS_LOG(ERROR) << "new sliding window param.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -283,7 +283,7 @@ int ConvolutionDepthwiseSWInt8CPUKernel::Init() {
|
|||
|
||||
int ConvolutionDepthwiseSWInt8CPUKernel::ReSize() {
|
||||
ConvolutionBaseCPUKernel::Init();
|
||||
InitSlidingParamConvDw(sliding, conv_param_, C8NUM);
|
||||
InitSlidingParamConvDw(sliding_, conv_param_, C8NUM);
|
||||
|
||||
auto ret = ConvolutionBaseCPUKernel::SetQuantParam();
|
||||
if (ret != RET_OK) {
|
||||
|
@ -306,7 +306,7 @@ int ConvolutionDepthwiseSWInt8CPUKernel::ReSize() {
|
|||
|
||||
int ConvolutionDepthwiseSWInt8CPUKernel::Execute(int task_id) {
|
||||
ConvDwSWInt8(packed_output_, packed_input_, packed_weight_, reinterpret_cast<int32_t *>(bias_data_), input_zp_,
|
||||
output_zp_, conv_param_, sliding, task_id);
|
||||
output_zp_, conv_param_, sliding_, task_id);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
@ -321,10 +321,6 @@ int ConvDwSWInt8Run(void *cdata, int task_id) {
|
|||
}
|
||||
|
||||
int ConvolutionDepthwiseSWInt8CPUKernel::Run() {
|
||||
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
|
||||
MS_LOG(ERROR) << "Only support input channel equals output channel.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto ret = Prepare();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Prepare failed.";
|
||||
|
|
|
@ -44,7 +44,7 @@ class ConvolutionDepthwiseSWInt8CPUKernel : public ConvolutionBaseCPUKernel {
|
|||
int ReinitFreeBefore();
|
||||
void FreeTmpQuant();
|
||||
|
||||
SlidingWindowParam *sliding = nullptr;
|
||||
SlidingWindowParam *sliding_ = nullptr;
|
||||
int16_t *packed_weight_ = nullptr;
|
||||
int8_t *packed_input_ = nullptr;
|
||||
int8_t *packed_output_ = nullptr;
|
||||
|
|
Loading…
Reference in New Issue