support pad mode for conv2d transpose

This commit is contained in:
yangzhenzhang 2022-02-08 11:31:29 +08:00
parent 982033b6a1
commit 4061c11e18
2 changed files with 77 additions and 24 deletions

View File

@ -892,9 +892,10 @@ ReplaceGraphPtr Conv2DInfo::replace_graph(const CNodePtr &cnode) {
int64_t all_send_lens = std::accumulate(send_lens_.begin(), send_lens_.end(), 0);
int64_t all_recv_lens = std::accumulate(recv_lens_.begin(), recv_lens_.end(), 0);
if (all_send_lens + all_recv_lens == 0) {
int64_t pad_mode = 0; // 0 is "pad" mode
auto prim = GetValueNode<PrimitivePtr>(cnode->input(0));
prim->set_attr(OUT_CHANNEL, MakeValue(new_out_channel_));
prim->set_attr(PAD_MODE, MakeValue(PAD));
prim->set_attr(PAD_MODE, MakeValue(pad_mode)); // need to use int64_t to define pad_mode
prim->set_attr(PAD, MakeValue(new_pad_list_));
MS_LOG(INFO) << name_ << ": the send lens and recv lens is 0, no need exchange data";
return nullptr;
@ -986,12 +987,12 @@ Status Conv2DBackpropInputInfo::CheckStrategy(const StrategyPtr &strategy) {
}
}
// kernel size larger than stride and the h/w dimension is split, need to exchange overlap
if ((kernel_size_use_dilation_[0] > stride_[2]) && (input_strategy[2] > 1)) {
// if the h/w dimension is split, need to exchange overlap
if (input_strategy[2] > 1) {
h_dim_need_exchange_overlap_ = true;
}
if ((kernel_size_use_dilation_[1] > stride_[3]) && (input_strategy[3] > 1)) {
if (input_strategy[3] > 1) {
w_dim_need_exchange_overlap_ = true;
}
return SUCCESS;
@ -1002,21 +1003,11 @@ Status Conv2DBackpropInputInfo::CheckHWStrategy(int64_t h_strategy, int64_t w_st
return FAILED;
}
if (pad_mode_ != 1) { // only support same mode
if (pad_mode_ != 0 && pad_mode_ != 1) { // only support pad mode and same mode
MS_LOG(ERROR) << name_ << ": Do not support the pad mode " << pad_mode_ << " when split H or W dimension";
return FAILED;
}
if (h_strategy > 1 && inputs_shape_[0][2] * stride_[2] != outputs_shape_[0][2]) {
MS_LOG(ERROR) << name_ << ": Do not support to split h dimension when in_shape * stride != out_shape";
return FAILED;
}
if (w_strategy > 1 && inputs_shape_[0][3] * stride_[3] != outputs_shape_[0][3]) {
MS_LOG(ERROR) << name_ << ": Do not support to split w dimension when in_shape * stride != out_shape";
return FAILED;
}
return SUCCESS;
}
@ -1265,8 +1256,9 @@ void Conv2DBackpropInputInfo::InferNewPadListByDimension(const std::string &dime
// if (o/n + k - o + ws - s - x) is divisible by s, real_left_pad = s - 1.
// otherwise, real_left_pad = (o/n + k - o + ws - s - x) % s - 1
// 3) the middle rank:
// if (r*on - k + x + 1) is divisible by s, real_left_pad = 0.
// otherwise, real_left_pad = s - (r*on - k + x + 1) % s
// if (r*o/n - k + x + 1) < 0, real_left_pad = -(r*o/n - k + x + 1);
// otherwise, if (r*o/n - k + x + 1) is divisible by s, real_left_pad = 0.
// otherwise, real_left_pad = s - (r*o/n - k + x + 1) % s
int64_t current_rank_required_size = 0;
int64_t real_top_or_left_pad = 0;
int64_t h_or_w_output_shape = -1;
@ -1324,7 +1316,9 @@ void Conv2DBackpropInputInfo::InferNewPadListByDimension(const std::string &dime
int64_t tmp =
h_or_w_rank_bias * h_or_w_output_shape / h_or_w_dim_shard_num - h_or_w_kernel_size + top_or_left_pad + 1;
if (tmp % h_or_w_stride == 0) {
if (tmp < 0) {
real_top_or_left_pad = -tmp;
} else if (tmp % h_or_w_stride == 0) {
real_top_or_left_pad = 0;
} else {
real_top_or_left_pad = h_or_w_stride - tmp % h_or_w_stride;

View File

@ -41,11 +41,12 @@ class Net(Cell):
class Net2(Cell):
def __init__(self, conv2d_weight, out_channel, kernel_size, pad_mode, stride, group=1, dilation=1,
def __init__(self, conv2d_weight, out_channel, kernel_size, pad_mode, stride, pad=0, group=1, dilation=1,
strategy1=None, strategy2=None):
super().__init__()
self.conv2d_transpose = P.Conv2DTranspose(out_channel=out_channel, kernel_size=kernel_size, pad_mode=pad_mode,
stride=stride, group=group, dilation=dilation).shard(strategy1)
stride=stride, pad=pad, group=group,
dilation=dilation).shard(strategy1)
self.neg = P.Neg().shard(strategy2)
self.weight = Parameter(conv2d_weight, "w1")
@ -61,6 +62,9 @@ _w2 = Tensor(np.ones([8, 16, 4, 4]), dtype=ms.float32)
_w3 = Tensor(np.ones([8, 16, 10, 10]), dtype=ms.float32)
_w4 = Tensor(np.ones([8, 16, 3, 3]), dtype=ms.float32)
_w5 = Tensor(np.ones([8, 8, 4, 4]), dtype=ms.float32)
_w6 = Tensor(np.ones([8, 16, 5, 5]), dtype=ms.float32)
_w7 = Tensor(np.ones([8, 16, 1, 1]), dtype=ms.float32)
_w8 = Tensor(np.ones([8, 16, 4, 4]), dtype=ms.float32)
_b = Tensor(np.ones([32, 16, 8, 8]), dtype=ms.float32)
@ -158,7 +162,7 @@ def test_conv2d_transpose_model_parallel3():
def test_conv2d_transpose_model_parallel4():
"""
Feature: test model parallel strategy
Description: shard batch dimension, channel dimension and w dimension
Description: shard h dimension and w dimension
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
@ -187,15 +191,14 @@ def test_conv2d_transpose_split_h_or_w_in_pad_mode():
"""
Feature: test pad mode
Description: shard batch dimension, channel dimension and w dimension in pad mode
Expectation: compile failed
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((2, 2, 1, 4), (2, 1, 1, 1))
strategy2 = ((2, 2, 1, 4),)
net = Net2(_w1, out_channel=8, kernel_size=(2, 2), pad_mode="pad", stride=2,
strategy1=strategy1, strategy2=strategy2)
with pytest.raises(RuntimeError):
compile_net(net)
compile_net(net)
def test_conv2d_transpose_split_h_in_same_mode():
@ -225,3 +228,59 @@ def test_conv2d_transpose_overlap_size_too_large():
strategy1=strategy1, strategy2=strategy2)
with pytest.raises(RuntimeError):
compile_net(net)
def test_conv2d_transpose_pad_mode_no_need_exchange():
"""
Feature: pad mode, and two direction send, w = 8, o = 16, s = 2, k = 1, n = 8, pad = (0, 0, 0, 0)
Description: shard h and w dimension
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=64, global_rank=13)
strategy1 = ((1, 1, 8, 8), (1, 1, 1, 1))
strategy2 = ((8, 1, 1, 1),)
net = Net2(_w7, out_channel=8, kernel_size=1, pad_mode="pad", pad=(0, 0, 0, 0), stride=2, strategy1=strategy1,
strategy2=strategy2)
compile_net(net)
def test_conv2d_transpose_pad_mode_two_direction_send_all_slice_pad_different():
"""
Feature: pad mode, and two direction send, w = 8, o = 16, s = 2, k = 5, n = 8, pad = (1, 2, 1, 2)
Description: shard h and w dimension
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=64, global_rank=13)
strategy1 = ((1, 1, 8, 8), (1, 1, 1, 1))
strategy2 = ((8, 1, 1, 1),)
net = Net2(_w6, out_channel=8, kernel_size=5, pad_mode="pad", pad=(1, 2, 1, 2), stride=2, strategy1=strategy1,
strategy2=strategy2)
compile_net(net)
def test_conv2d_transpose_pad_mode_two_direction_send_all_slice():
"""
Feature: pad mode, and two direction send, w = 8, o = 16, s = 2, k = 4, n = 8, pad = (1, 1, 1, 1)
Description: shard h and w dimension
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=64, global_rank=13)
strategy1 = ((1, 1, 8, 8), (1, 1, 1, 1))
strategy2 = ((8, 1, 1, 1),)
net = Net2(_w8, out_channel=8, kernel_size=4, pad_mode="pad", pad=(1, 1, 1, 1), stride=2, strategy1=strategy1,
strategy2=strategy2)
compile_net(net)
def test_conv2d_transpose_pad_mode_single_direction_send():
"""
Feature: pad mode, and single direction send, w = 8, o = 16, s = 2, k = 3, n = 8, pad = (0, 1, 0, 1)
Description: shard h and w dimension
Expectation: compile success
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=64, global_rank=13)
strategy1 = ((1, 1, 8, 8), (1, 1, 1, 1))
strategy2 = ((8, 1, 1, 1),)
net = Net2(_w4, out_channel=8, kernel_size=3, pad_mode="pad", pad=(0, 1, 0, 1), stride=2, strategy1=strategy1,
strategy2=strategy2)
compile_net(net)