!11749 Added dilation part to support dilation larger than one in same padding mode in conv2d.

From: @huangbo77
Reviewed-by: @wuxuejian,@kisnwang
Signed-off-by: @wuxuejian
This commit is contained in:
mindspore-ci-bot 2021-01-29 09:20:17 +08:00 committed by Gitee
commit e2c3c190eb
8 changed files with 18 additions and 17 deletions

View File

@ -61,12 +61,13 @@ void Conv2dCPUKernel::InitKernel(const CNodePtr &kernel_node) {
}
std::vector<int> stride{stride_ori[2], stride_ori[3]};
std::vector<int> dilation{dilation_ori[2], dilation_ori[3]};
dnnl::memory::dims strides{stride_ori[2], stride_ori[3]};
dnnl::memory::dims dilates{dilation_ori[2] - 1, dilation_ori[3] - 1};
std::vector<int> int_padding_l;
std::vector<int> int_padding_r;
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PAD_MODE);
GetPadding(kernel_node, pad_mode, src_shape, kernel_size, stride, &int_padding_l, &int_padding_r);
GetPadding(kernel_node, pad_mode, src_shape, kernel_size, stride, &int_padding_l, &int_padding_r, dilation);
if (int_padding_l.size() != 2 || int_padding_r.size() != 2) {
MS_LOG(EXCEPTION) << "get padding failed";
}

View File

@ -58,12 +58,13 @@ void Conv2dGradFilterCPUKernel::InitKernel(const CNodePtr &kernel_node) {
MS_LOG(EXCEPTION) << "Conv2dGradFilterCPUKernel dilation only support 1 in N axis and C axis!";
}
std::vector<int> stride{stride_ori[0], stride_ori[1]};
std::vector<int> dilation{dilation_ori[2], dilation_ori[3]};
dnnl::memory::dims strides{stride_ori[0], stride_ori[1]};
dnnl::memory::dims dilates{dilation_ori[2] - 1, dilation_ori[3] - 1};
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PAD_MODE);
std::vector<int> int_padding_l;
std::vector<int> int_padding_r;
GetPadding(kernel_node, pad_mode, src_shape, kernel_size, stride, &int_padding_l, &int_padding_r);
GetPadding(kernel_node, pad_mode, src_shape, kernel_size, stride, &int_padding_l, &int_padding_r, dilation);
if (int_padding_l.size() != 2 || int_padding_r.size() != 2) {
MS_LOG(EXCEPTION) << "get padding failed";
}

View File

@ -60,13 +60,13 @@ void Conv2dGradInputCPUKernel::InitKernel(const CNodePtr &kernel_node) {
}
std::vector<int> stride{stride_ori[0], stride_ori[1]};
std::vector<int> dilation{dilation_ori[2], dilation_ori[3]};
dnnl::memory::dims strides{stride_ori[0], stride_ori[1]};
dnnl::memory::dims dilates{dilation_ori[2] - 1, dilation_ori[3] - 1};
std::vector<int> int_padding_l;
std::vector<int> int_padding_r;
const std::string pad_mode = AnfAlgo::GetNodeAttr<std::string>(kernel_node, PAD_MODE);
GetPadding(kernel_node, pad_mode, src_shape, kernel_size, stride, &int_padding_l, &int_padding_r);
GetPadding(kernel_node, pad_mode, src_shape, kernel_size, stride, &int_padding_l, &int_padding_r, dilation);
if (int_padding_l.size() != 2 || int_padding_r.size() != 2) {
MS_LOG(EXCEPTION) << "conv2d grad get padding failed";
}

View File

@ -24,8 +24,8 @@ namespace mindspore {
namespace kernel {
void MKLCPUKernel::GetPadding(const CNodePtr &kernel_node, const std::string &pad_mode,
const std::vector<size_t> &src_shape, const std::vector<size_t> &kernel_size,
const std::vector<int> &stride, std::vector<int> *padding_l,
std::vector<int> *padding_r) {
const std::vector<int> &stride, std::vector<int> *padding_l, std::vector<int> *padding_r,
const std::vector<int> &dilation) {
MS_EXCEPTION_IF_NULL(kernel_node);
if (src_shape.size() < 2) {
MS_LOG(EXCEPTION) << "set pad only support src dim >= 2!";
@ -38,13 +38,9 @@ void MKLCPUKernel::GetPadding(const CNodePtr &kernel_node, const std::string &pa
if (pad_mode == PAD_MODE_LOWER_SAME || pad_mode == PAD_MODE_UPPER_SAME) {
for (size_t i = 0; i < weight_height.size(); ++i) {
auto wh = weight_height[i];
int re = wh % stride[i];
int pad_along;
if (re == 0) {
pad_along = std::max(SizeToInt(kernel_size[i]) - stride[i], 0);
} else {
pad_along = std::max(SizeToInt(kernel_size[i]) - re, 0);
}
int out = (wh + stride[i] - 1) / stride[i];
int effective_k = (SizeToInt(kernel_size[i]) - 1) * dilation[i] + 1;
int pad_along = std::max(0, (out - 1) * stride[i] + effective_k - wh);
int pad = pad_along / 2;
padding_l->emplace_back(pad);
padding_r->emplace_back(pad_along - pad);

View File

@ -36,7 +36,7 @@ class MKLCPUKernel : public CPUKernel {
std::vector<size_t> *dst_shape);
void GetPadding(const CNodePtr &kernel_node, const std::string &pad_mode, const std::vector<size_t> &src_shape,
const std::vector<size_t> &kernel_size, const std::vector<int> &stride, std::vector<int> *padding_l,
std::vector<int> *padding_r);
std::vector<int> *padding_r, const std::vector<int> &dilation);
void AddArgument(int arg_key, const dnnl::memory::desc &mem_desc, bool alloc = false);
void SetArgumentHandle(int arg_key, void *ptr);
dnnl::memory::format_tag GetDefaultFormatTag(const dnnl::memory::dims &dims) const;

View File

@ -47,7 +47,8 @@ void AvgPoolingGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
std::vector<int> int_padding_l;
std::vector<int> int_padding_r;
std::vector<size_t> kernel_size({IntToSize(origin_kernel_sizes[2]), IntToSize(origin_kernel_sizes[3])});
GetPadding(kernel_node, pad_mode, src_shape, kernel_size, stride, &int_padding_l, &int_padding_r);
std::vector<int> dummy_dilation{1, 1};
GetPadding(kernel_node, pad_mode, src_shape, kernel_size, stride, &int_padding_l, &int_padding_r, dummy_dilation);
if (int_padding_l.size() != 2 || int_padding_r.size() != 2) {
MS_LOG(EXCEPTION) << "Pooling avg get padding failed";
}

View File

@ -46,7 +46,8 @@ void PoolingCPUKernel::InitKernel(const CNodePtr &kernel_node) {
std::vector<int> int_padding_l;
std::vector<int> int_padding_r;
std::vector<size_t> kernel_size({IntToSize(origin_kernel_sizes[2]), IntToSize(origin_kernel_sizes[3])});
GetPadding(kernel_node, pad_mode, src_shape, kernel_size, stride, &int_padding_l, &int_padding_r);
std::vector<int> dummy_dilation{1, 1};
GetPadding(kernel_node, pad_mode, src_shape, kernel_size, stride, &int_padding_l, &int_padding_r, dummy_dilation);
if (int_padding_l.size() != 2 || int_padding_r.size() != 2) {
MS_LOG(EXCEPTION) << "pooling get padding failed";
}

View File

@ -43,7 +43,8 @@ void MaxPoolingGradCPUKernel::InitKernel(const CNodePtr &kernel_node) {
kernel_size_ = {IntToSize(kernel_sizes[2]), IntToSize(kernel_sizes[3])};
stride_.push_back(strides[2]);
stride_.push_back(strides[3]);
GetPadding(kernel_node, pad_mode, src_shape_, kernel_size_, stride_, &padding_l_, &padding_r);
std::vector<int> dummy_dilation{1, 1};
GetPadding(kernel_node, pad_mode, src_shape_, kernel_size_, stride_, &padding_l_, &padding_r, dummy_dilation);
}
void MaxPoolingGradCPUKernel::RowPoolingGrad(const float *input, float *output, float diff,