!6743 pad int8 surpport mirror pad.

Merge pull request !6743 from zhaodezan/master
This commit is contained in:
mindspore-ci-bot 2020-09-23 09:54:42 +08:00 committed by Gitee
commit 5ebdaaca26
4 changed files with 193 additions and 5 deletions

View File

@ -32,3 +32,40 @@ int PadConstant4D(const int8_t *in_data, int8_t *out_data, const int32_t *in_dim
}
return NNACL_OK;
}
int TransOut2InputDimIndexInt8(int out_dim_index, int left_pad, int in_dim, int offset) {
if (out_dim_index < left_pad) {
// left pad
const int index_sum = left_pad + offset - 1;
return MSMAX(index_sum - out_dim_index, offset);
}
out_dim_index -= left_pad;
if (out_dim_index < in_dim) {
return out_dim_index;
}
// right pad
out_dim_index -= in_dim;
const int index_sum = in_dim - 1 - offset;
return MSMAX(index_sum - out_dim_index, 0);
}
int GetInputFlattenIndexInt8(int out_flatten_index, const int *input_shape, const PadParameter *pad_param) {
int in_flatten_index = 0;
int i;
for (i = 0; i < DEFAULT_PAD_NDIMS; ++i) {
int left_pad = pad_param->paddings_[i * 2];
int out_dim_index = out_flatten_index / pad_param->out_strides[i];
out_flatten_index %= pad_param->out_strides[i];
int in_dim_index = TransOut2InputDimIndexInt8(out_dim_index, left_pad, input_shape[i], pad_param->mirror_offset_);
in_flatten_index += in_dim_index * pad_param->in_strides[i];
}
return in_flatten_index;
}
void MirrorPadInt8(const int8_t *input_data, int8_t *output_data, const int *input_shape, const PadParameter *pad_param,
int begin, int end) {
int i = 0;
for (i = begin; i < end; ++i) {
output_data[i] = input_data[GetInputFlattenIndexInt8(i, input_shape, pad_param)];
}
}

View File

@ -26,6 +26,8 @@ extern "C" {
#endif
int PadConstant4D(const int8_t *in_data, int8_t *out_data, const int32_t *in_dims, const int32_t *out_dims,
const int32_t *paddings, const int tid, const int thread_num);
void MirrorPadInt8(const int8_t *input_data, int8_t *output_data, const int *input_shape, const PadParameter *pad_param,
int begin, int end);
#ifdef __cplusplus
}
#endif

View File

@ -15,12 +15,23 @@
*/
#include "src/runtime/kernel/arm/int8/pad_int8.h"
#include <string>
#include "include/errorcode.h"
#include "nnacl/errorcode.h"
#include "nnacl/int8/pad.h"
#include "src/runtime/runtime_api.h"
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_MEMORY_FAILED;
using mindspore::lite::RET_OK;
using mindspore::lite::RET_NULL_PTR;
namespace mindspore::kernel {
namespace {
constexpr size_t kMirrorPadInputSize = 2;
}
void PadInt8CPUKernel::FreeQuantParam() {
if (pad_param_->pad_quant_arg_.in_quant_args_ != nullptr) {
free(pad_param_->pad_quant_arg_.in_quant_args_);
@ -122,6 +133,123 @@ int PadInt8Impl(void *cdata, int task_id) {
return RET_OK;
}
int PadInt8CPUKernel::HandleMirrorPad() {
auto ret = CopyPaddingFromInput();
if (ret != RET_OK) {
return ret;
}
ret = CheckPaddings(pad_param_->paddings_, DEFAULT_PAD_NDIMS, in_dims_, pad_param_->pad_mode_);
if (ret != RET_OK) {
return ret;
}
CalculateStrides();
pad_param_->mirror_offset_ = pad_param_->pad_mode_ == static_cast<int>(schema::PaddingMode_REFLECT) ? 1 : 0;
return RET_OK;
}
void PadInt8CPUKernel::CalculateStrides() {
pad_param_->in_strides[DEFAULT_PAD_NDIMS - 1] = 1;
for (auto i = DEFAULT_PAD_NDIMS - 2; i >= 0; --i) {
pad_param_->in_strides[i] = in_dims_[i + 1] * pad_param_->in_strides[i + 1];
}
for (auto i = 0; i < DEFAULT_PAD_NDIMS; ++i) {
out_dims_[i] = in_dims_[i] + pad_param_->paddings_[i * 2] + pad_param_->paddings_[i * 2 + 1];
}
pad_param_->out_strides[DEFAULT_PAD_NDIMS - 1] = 1;
for (auto i = DEFAULT_PAD_NDIMS - 2; i >= 0; --i) {
pad_param_->out_strides[i] = out_dims_[i + 1] * pad_param_->out_strides[i + 1];
}
}
int PadInt8CPUKernel::ExtendPaddings(int *paddings, int length, const int *ori_paddings, int ori_length) {
if (paddings == nullptr || ori_paddings == nullptr) {
return RET_NULL_PTR;
}
for (auto i = 0; i < length - ori_length; ++i) {
paddings[i] = 0;
}
for (auto i = length - ori_length; i < length; ++i) {
paddings[i] = ori_paddings[i - (length - ori_length)];
}
return RET_OK;
}
int PadInt8CPUKernel::RunMirrorPadImpl(int task_id) {
auto input = in_tensors_.at(0);
auto output = out_tensors_.at(0);
auto input_data = reinterpret_cast<int8_t *>(input->MutableData());
auto output_data = reinterpret_cast<int8_t *>(output->MutableData());
int unit = UP_DIV(output->ElementsNum(), context_->thread_num_);
int begin = unit * task_id;
int end = MSMIN(begin + unit, output->ElementsNum());
MirrorPadInt8(input_data, output_data, in_dims_, pad_param_, begin, end);
return RET_OK;
}
int MirrorPadImplInt8(void *cdata, int task_id) {
auto padKernel = reinterpret_cast<PadInt8CPUKernel *>(cdata);
int error_code = padKernel->RunMirrorPadImpl(task_id);
if (error_code != NNACL_OK) {
MS_LOG(ERROR) << "Pad Run error task_id[" << task_id << "] error_code[" << error_code << "]";
return RET_ERROR;
}
return RET_OK;
}
int PadInt8CPUKernel::CheckPaddings(int *paddings, int length, int *input_shape, int mode) {
if (paddings == nullptr || input_shape == nullptr) {
return RET_NULL_PTR;
}
std::string prefix;
int offset;
if (mode == static_cast<int>(schema::PaddingMode_SYMMETRIC)) {
prefix = "For Pad SYMMETRIC ";
offset = 0;
} else {
prefix = "For Pad REFLECT ";
offset = 1;
}
for (auto i = 0; i < length; ++i) {
int max_valid = input_shape[i] - offset;
if (paddings[i * 2] > max_valid) {
MS_LOG(ERROR) << prefix << "paddings " << paddings[i * 2] << "should be less than " << max_valid + 1;
return RET_ERROR;
}
if (paddings[i * 2 + 1] > max_valid) {
MS_LOG(ERROR) << prefix << "paddings " << paddings[i * 2 + 1] << "should be less than " << max_valid + 1;
return RET_ERROR;
}
}
return RET_OK;
}
int PadInt8CPUKernel::CopyPaddingFromInput() {
if (in_tensors_.size() != kMirrorPadInputSize) {
MS_LOG(ERROR) << "Pad Reflect or Symmetric mode need 2 inputs, got " << in_tensors_.size();
return RET_ERROR;
}
auto padding_tensor = in_tensors_.at(1);
auto paddings = reinterpret_cast<int *>(padding_tensor->MutableData());
if (paddings == nullptr) {
MS_LOG(ERROR) << "Pad second input data nullptr";
return RET_ERROR;
}
auto input_shape = in_tensors_.at(0)->shape();
int rank = static_cast<int>(input_shape.size());
if (padding_tensor->ElementsNum() != rank * 2) {
MS_LOG(ERROR) << "Pad second input elements num" << padding_tensor->ElementsNum() << ", should be " << rank * 2;
return RET_ERROR;
}
auto ret = ExtendPaddings(pad_param_->paddings_, MAX_PAD_SIZE, paddings, padding_tensor->ElementsNum());
if (ret != RET_OK) {
return ret;
}
pad_param_->padding_length = MAX_PAD_SIZE;
return RET_OK;
}
int PadInt8CPUKernel::Run() {
auto ret = Prepare();
if (ret != RET_OK) {
@ -131,12 +259,26 @@ int PadInt8CPUKernel::Run() {
in_data_ = reinterpret_cast<int8_t *>(in_tensors_[0]->MutableData());
out_data_ = reinterpret_cast<int8_t *>(out_tensors_[0]->MutableData());
memset(out_data_, pad_param_->pad_quant_arg_.constant_value_[0], out_tensors_[0]->ElementsNum() * sizeof(int8_t));
int error_code = ParallelLaunch(this->context_->thread_pool_, PadInt8Impl, this, context_->thread_num_);
if (error_code != RET_OK) {
MS_LOG(ERROR) << "Resize run error, error_code[" << error_code << "]";
return RET_ERROR;
int error_code;
if (pad_param_->pad_mode_ == static_cast<int>(schema::PaddingMode_CONSTANT)) {
memset(out_data_, pad_param_->pad_quant_arg_.constant_value_[0], out_tensors_[0]->ElementsNum() * sizeof(int8_t));
error_code = ParallelLaunch(this->context_->thread_pool_, PadInt8Impl, this, context_->thread_num_);
if (error_code != RET_OK) {
MS_LOG(ERROR) << "Resize run error, error_code[" << error_code << "]";
return RET_ERROR;
}
} else {
// mirror pad case
HandleMirrorPad();
error_code = ParallelLaunch(this->context_->thread_pool_, MirrorPadImplInt8, this, context_->thread_num_);
if (error_code != RET_OK) {
MS_LOG(ERROR) << "Pad Reflect or Symmetric mode run error, error_code[" << error_code << "]";
return RET_ERROR;
}
}
return RET_OK;
}
} // namespace mindspore::kernel

View File

@ -39,6 +39,7 @@ class PadInt8CPUKernel : public LiteKernel {
int ReSize() override;
int Run() override;
int RunImpl(int task_id);
int RunMirrorPadImpl(int task_id);
private:
int SetQuantParam();
@ -46,6 +47,12 @@ class PadInt8CPUKernel : public LiteKernel {
void FreeQuantParam();
private:
int HandleMirrorPad();
int CheckPaddings(int *paddings, int length, int *input_shape, int mode);
int CopyPaddingFromInput();
void CalculateStrides();
int ExtendPaddings(int *paddings, int length, const int *ori_paddings, int ori_length);
PadParameter *pad_param_ = nullptr;
int8_t *in_data_ = nullptr;
int8_t *out_data_ = nullptr;