!16532 Revert the front end of AvgPool3D op

From: @zuochuanyong
Reviewed-by: @liangchenghui,@wuxuejian
Signed-off-by: @liangchenghui
This commit is contained in:
mindspore-ci-bot 2021-05-19 11:11:17 +08:00 committed by Gitee
commit f45a918741
4 changed files with 1 additions and 552 deletions

View File

@ -72,7 +72,7 @@ from .nn_ops import (LSTM, SGD, Adam, FusedSparseAdam, FusedSparseLazyAdam, Adam
GetNext, L2Normalize, LayerNorm, L2Loss, CTCLoss, CTCGreedyDecoder,
LogSoftmax, MaxPool3D,
MaxPool, DataFormatDimMap,
AvgPool, AvgPool3D, Conv2DBackpropInput, ComputeAccidentalHits,
AvgPool, Conv2DBackpropInput, ComputeAccidentalHits,
MaxPoolWithArgmax, OneHot, Pad, MirrorPad, Mish, PReLU, ReLU, ReLU6, ReLUV2, HSwish, HSigmoid,
ResizeBilinear, Sigmoid, SeLU,
SigmoidCrossEntropyWithLogits, NLLLoss, BCEWithLogitsLoss,
@ -317,7 +317,6 @@ __all__ = [
'UpdateState',
'identity',
'AvgPool',
'AvgPool3D',
# Back Primitive
'Equal',
'EqualCount',

View File

@ -1928,104 +1928,6 @@ class AvgPool(_Pool):
def __init__(self, kernel_size=1, strides=1, pad_mode="valid", data_format="NCHW"):
super(AvgPool, self).__init__(kernel_size, strides, pad_mode, data_format)
class AvgPool3D(_Pool):
r"""
Average pooling operation.
Applies a 3D average pooling over an input Tensor which can be regarded as a composition of 3D input planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, D_{in}, H_{in}, W_{in})`, AvgPool outputs
regional average in the :math:`(D_{in}, H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (d_{ker}, h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1, s_2)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, d, h, w) = \frac{1}{d_{ker} * h_{ker} * w_{ker}} \sum_{l=0}^{d_{ker}-1}
\sum_{m=0}^{h_{ker}-1} \sum_{n=0}^{w_{ker}-1} \text{input}(N_i, C_j, s_0 \times d + l,
s_1 \times h + m, s_2 \times w + n)
Args:
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value,
is an int number that represents depth, height and width are both kernel_size, or a tuple
of three int numbers that represent depth, height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the depth, height and width of movement are both strides, or a tuple of three int numbers that
represent depth, height and width of movement respectively. Default: 1.
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The depth, height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest depth, height and width of output
will be returned without padding. Extra pixels will be discarded.
data_format (str) - The format of input and output data. It should be 'NDHWC' or 'NCDHW'\
default is 'NCDHW'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
Outputs:
Tensor, with shape :math:`(N, C_{out}, D_{in}, H_{out}, W_{out})`.
Raises:
TypeError: If `kernel_size` or `strides` is neither int nor tuple.
ValueError: If `pad_mode` is neither 'valid' nor 'same' with not case sensitive.
ValueError: If `data_format` is neither 'NCDHW' nor 'NDHWC'.
ValueError: If `kernel_size` or `strides` is less than 1.
ValueError: If length of shape of `input` is not equal to 5.
Supported Platforms:
``CPU``
Examples:
>>> input = Tensor(np.arange(1 * 2 * 2 * 2 * 3).reshape((1, 2, 2, 2, 3)), mindspore.float32)
>>> avg_pool3d = P.AvgPool3D(kernel_size=2, strides=1, pad_mode="valid")
>>> output = avg_pool3d(input)
>>> print(output)
[[[[[ 5. 6.]]]
[[[17. 18.]]]]]
"""
@prim_attr_register
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID", data_format="NCDHW"):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type('kernel_size', kernel_size, [int, tuple], self.name)
validator.check_value_type('strides', strides, [int, tuple], self.name)
validator.check_value_type('pad_mode', pad_mode, [str], self.name)
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.name)
self.add_prim_attr("pad_mode", self.pad_mode)
self.data_format = validator.check_string(data_format, ['NCDHW'], 'data_format', self.name)
self.kernel_size = _check_3d_int_or_tuple("kernel_size", kernel_size, self.name,
allow_five=False, ret_five=True)
self.add_prim_attr("kernel_size", self.kernel_size)
self.strides = _check_3d_int_or_tuple("strides", strides, self.name, allow_five=False, ret_five=True)
self.add_prim_attr("strides", self.strides)
def infer_shape(self, x_shape):
validator.check_equal_int(len(x_shape), 5, "x rank", self.name)
batch, channel, input_d, input_h, input_w = x_shape
self.add_prim_attr("x_shape", x_shape)
_, _, kernel_d, kernel_h, kernel_w = self.kernel_size
_, _, stride_d, stride_h, stride_w = self.strides
if self.pad_mode == "VALID":
out_d = math.ceil((input_d - (kernel_d - 1)) / stride_d)
out_h = math.ceil((input_h - (kernel_h - 1)) / stride_h)
out_w = math.ceil((input_w - (kernel_w - 1)) / stride_w)
elif self.pad_mode == "SAME":
out_d = math.ceil(input_d / stride_d)
out_h = math.ceil(input_h / stride_h)
out_w = math.ceil(input_w / stride_w)
out_shape = [batch, channel, out_d, out_h, out_w]
_check_shape('output', out_shape, self.name)
return out_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_dtype_valid("x", x_dtype, [mstype.float16, mstype.float32], self.name)
return x_dtype
class Conv2DBackpropInput(PrimitiveWithInfer):
"""

View File

@ -12,13 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from functools import reduce
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops.operations as P
from mindspore import Tensor
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
@ -89,183 +87,6 @@ def test_avgpool_k3s2ps():
assert np.allclose(out.asnumpy(), expect_result)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_avg_pool3d_1():
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
x_shape = (2, 3, 2, 3, 4)
kernel_size = (2, 2, 3)
strides = 1
pad_mode = 'VALID'
x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
output_ms = P.AvgPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
expert_result = (np.array([[[[[9, 10],
[13, 14]]],
[[[33, 34],
[37, 38]]],
[[[57, 58],
[61, 62]]]],
[[[[81, 82],
[85, 86]]],
[[[105, 106],
[109, 110]]],
[[[129, 130],
[133, 134]]]]]))
assert (output_ms.asnumpy() == expert_result).all()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_avg_pool3d_2():
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
x_shape = (2, 3, 2, 3, 4)
kernel_size = 2
strides = 1
pad_mode = 'VALID'
x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
output_ms = P.AvgPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
expert_result = (np.array([[[[[8.5, 9.5, 10.5],
[12.5, 13.5, 14.5]]],
[[[32.5, 33.5, 34.5],
[36.5, 37.5, 38.5]]],
[[[56.5, 57.5, 58.5],
[60.5, 61.5, 62.5]]]],
[[[[80.5, 81.5, 82.5],
[84.5, 85.5, 86.5]]],
[[[104.5, 105.5, 106.5],
[108.5, 109.5, 110.5]]],
[[[128.5, 129.5, 130.5],
[132.5, 133.5, 134.5]]]]]))
assert (output_ms.asnumpy() == expert_result).all()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_avg_pool3d_3():
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
x_shape = (2, 3, 2, 3, 4)
kernel_size = 2
strides = 3
pad_mode = 'VALID'
x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
output_ms = P.AvgPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
expert_result = (np.array([[[[[8.5]]],
[[[32.5]]],
[[[56.5]]]],
[[[[80.5]]],
[[[104.5]]],
[[[128.5]]]]]))
assert (output_ms.asnumpy() == expert_result).all()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_avg_pool3d_4():
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
x_shape = (2, 3, 2, 3, 4)
kernel_size = (2, 2, 3)
strides = 1
pad_mode = 'SAME'
x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
output_ms = P.AvgPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
expert_result = (np.array([[[[[8.5, 9, 10, 10.5],
[12.5, 13, 14, 14.5],
[14.5, 15, 16, 16.5]],
[[14.5, 15, 16, 16.5],
[18.5, 19, 20, 20.5],
[20.5, 21, 22, 22.5]]],
[[[32.5, 33, 34, 34.5],
[36.5, 37, 38, 38.5],
[38.5, 39, 40, 40.5]],
[[38.5, 39, 40, 40.5],
[42.5, 43, 44, 44.5],
[44.5, 45, 46, 46.5]]],
[[[56.5, 57, 58, 58.5],
[60.5, 61, 62, 62.5],
[62.5, 63, 64, 64.5]],
[[62.5, 63, 64, 64.5],
[66.5, 67, 68, 68.5],
[68.5, 69, 70, 70.5]]]],
[[[[80.5, 81, 82, 82.5],
[84.5, 85, 86, 86.5],
[86.5, 87, 88, 88.5]],
[[86.5, 87, 88, 88.5],
[90.5, 91, 92, 92.5],
[92.5, 93, 94, 94.5]]],
[[[104.5, 105, 106, 106.5],
[108.5, 109, 110, 110.5],
[110.5, 111, 112, 112.5]],
[[110.5, 111, 112, 112.5],
[114.5, 115, 116, 116.5],
[116.5, 117, 118, 118.5]]],
[[[128.5, 129, 130, 130.5],
[132.5, 133, 134, 134.5],
[134.5, 135, 136, 136.5]],
[[134.5, 135, 136, 136.5],
[138.5, 139, 140, 140.5],
[140.5, 141, 142, 142.5]]]]]))
assert (output_ms.asnumpy() == expert_result).all()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_avg_pool3d_5():
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
x_shape = (2, 3, 2, 3, 4)
kernel_size = (2, 2, 3)
strides = 1
pad_mode = 'SAME'
x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
output_ms = P.AvgPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
expert_result = (np.array([[[[[8.5, 9, 10, 10.5],
[12.5, 13, 14, 14.5],
[14.5, 15, 16, 16.5]],
[[14.5, 15, 16, 16.5],
[18.5, 19, 20, 20.5],
[20.5, 21, 22, 22.5]]],
[[[32.5, 33, 34, 34.5],
[36.5, 37, 38, 38.5],
[38.5, 39, 40, 40.5]],
[[38.5, 39, 40, 40.5],
[42.5, 43, 44, 44.5],
[44.5, 45, 46, 46.5]]],
[[[56.5, 57, 58, 58.5],
[60.5, 61, 62, 62.5],
[62.5, 63, 64, 64.5]],
[[62.5, 63, 64, 64.5],
[66.5, 67, 68, 68.5],
[68.5, 69, 70, 70.5]]]],
[[[[80.5, 81, 82, 82.5],
[84.5, 85, 86, 86.5],
[86.5, 87, 88, 88.5]],
[[86.5, 87, 88, 88.5],
[90.5, 91, 92, 92.5],
[92.5, 93, 94, 94.5]]],
[[[104.5, 105, 106, 106.5],
[108.5, 109, 110, 110.5],
[110.5, 111, 112, 112.5]],
[[110.5, 111, 112, 112.5],
[114.5, 115, 116, 116.5],
[116.5, 117, 118, 118.5]]],
[[[128.5, 129, 130, 130.5],
[132.5, 133, 134, 134.5],
[134.5, 135, 136, 136.5]],
[[134.5, 135, 136, 136.5],
[138.5, 139, 140, 140.5],
[140.5, 141, 142, 142.5]]]]]))
assert (output_ms.asnumpy() == expert_result).all()
if __name__ == '__main__':
test_avgpool_k2s1pv()
test_avgpool_k2s2pv()

View File

@ -1,273 +0,0 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from functools import reduce
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops.operations as P
from mindspore import Tensor
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_avgpool_k2s1pv():
x = np.arange(1 * 1 * 6 * 6).reshape((1, 1, 6, 6)).astype(np.float32)
net = nn.AvgPool2d(kernel_size=2, stride=1, pad_mode='valid')
out = net(Tensor(x))
print(out)
expect_result = np.array(
[[[[3.5, 4.5, 5.5, 6.5, 7.5],
[9.5, 10.5, 11.5, 12.5, 13.5],
[15.5, 16.5, 17.5, 18.5, 19.5],
[21.5, 22.5, 23.5, 24.5, 25.5],
[27.5, 28.5, 29.5, 30.5, 31.5]]]]
)
assert np.allclose(out.asnumpy(), expect_result)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_avgpool_k2s2pv():
x = np.arange(1 * 1 * 6 * 6).reshape((1, 1, 6, 6)).astype(np.float32)
net = nn.AvgPool2d(kernel_size=2, stride=2, pad_mode='valid')
out = net(Tensor(x))
print(out)
expect_result = np.array(
[[[[3.5, 5.5, 7.5],
[15.5, 17.5, 19.5],
[27.5, 29.5, 31.5]]]]
)
assert np.allclose(out.asnumpy(), expect_result)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_avgpool_k3s2pv():
x = np.arange(1 * 1 * 6 * 6).reshape((1, 1, 6, 6)).astype(np.float32)
net = nn.AvgPool2d(kernel_size=3, stride=2, pad_mode='valid')
out = net(Tensor(x))
print(out)
expect_result = np.array(
[[[[7., 9.],
[19., 21.]]]]
)
assert np.allclose(out.asnumpy(), expect_result)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_avgpool_k3s2ps():
x = np.arange(1 * 1 * 6 * 6).reshape((1, 1, 6, 6)).astype(np.float32)
net = nn.AvgPool2d(kernel_size=3, stride=2, pad_mode='same')
out = net(Tensor(x))
print(out)
expect_result = np.array(
[[[[7., 9., 10.5],
[19., 21., 22.5],
[28., 30., 31.5]]]]
)
assert np.allclose(out.asnumpy(), expect_result)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_avg_pool3d_1():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
x_shape = (2, 3, 2, 3, 4)
kernel_size = (2, 2, 3)
strides = 1
pad_mode = 'VALID'
x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
output_ms = P.AvgPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
expert_result = (np.array([[[[[9, 10],
[13, 14]]],
[[[33, 34],
[37, 38]]],
[[[57, 58],
[61, 62]]]],
[[[[81, 82],
[85, 86]]],
[[[105, 106],
[109, 110]]],
[[[129, 130],
[133, 134]]]]]))
assert (output_ms.asnumpy() == expert_result).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_avg_pool3d_2():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
x_shape = (2, 3, 2, 3, 4)
kernel_size = 2
strides = 1
pad_mode = 'VALID'
x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
output_ms = P.AvgPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
expert_result = (np.array([[[[[8.5, 9.5, 10.5],
[12.5, 13.5, 14.5]]],
[[[32.5, 33.5, 34.5],
[36.5, 37.5, 38.5]]],
[[[56.5, 57.5, 58.5],
[60.5, 61.5, 62.5]]]],
[[[[80.5, 81.5, 82.5],
[84.5, 85.5, 86.5]]],
[[[104.5, 105.5, 106.5],
[108.5, 109.5, 110.5]]],
[[[128.5, 129.5, 130.5],
[132.5, 133.5, 134.5]]]]]))
assert (output_ms.asnumpy() == expert_result).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_avg_pool3d_3():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
x_shape = (2, 3, 2, 3, 4)
kernel_size = 2
strides = 3
pad_mode = 'VALID'
x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
output_ms = P.AvgPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
expert_result = (np.array([[[[[8.5]]],
[[[32.5]]],
[[[56.5]]]],
[[[[80.5]]],
[[[104.5]]],
[[[128.5]]]]]))
assert (output_ms.asnumpy() == expert_result).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_avg_pool3d_4():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
x_shape = (2, 3, 2, 3, 4)
kernel_size = (2, 2, 3)
strides = 1
pad_mode = 'SAME'
x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
output_ms = P.AvgPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
expert_result = (np.array([[[[[8.5, 9, 10, 10.5],
[12.5, 13, 14, 14.5],
[14.5, 15, 16, 16.5]],
[[14.5, 15, 16, 16.5],
[18.5, 19, 20, 20.5],
[20.5, 21, 22, 22.5]]],
[[[32.5, 33, 34, 34.5],
[36.5, 37, 38, 38.5],
[38.5, 39, 40, 40.5]],
[[38.5, 39, 40, 40.5],
[42.5, 43, 44, 44.5],
[44.5, 45, 46, 46.5]]],
[[[56.5, 57, 58, 58.5],
[60.5, 61, 62, 62.5],
[62.5, 63, 64, 64.5]],
[[62.5, 63, 64, 64.5],
[66.5, 67, 68, 68.5],
[68.5, 69, 70, 70.5]]]],
[[[[80.5, 81, 82, 82.5],
[84.5, 85, 86, 86.5],
[86.5, 87, 88, 88.5]],
[[86.5, 87, 88, 88.5],
[90.5, 91, 92, 92.5],
[92.5, 93, 94, 94.5]]],
[[[104.5, 105, 106, 106.5],
[108.5, 109, 110, 110.5],
[110.5, 111, 112, 112.5]],
[[110.5, 111, 112, 112.5],
[114.5, 115, 116, 116.5],
[116.5, 117, 118, 118.5]]],
[[[128.5, 129, 130, 130.5],
[132.5, 133, 134, 134.5],
[134.5, 135, 136, 136.5]],
[[134.5, 135, 136, 136.5],
[138.5, 139, 140, 140.5],
[140.5, 141, 142, 142.5]]]]]))
assert (output_ms.asnumpy() == expert_result).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_avg_pool3d_5():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
x_shape = (2, 3, 2, 3, 4)
kernel_size = (2, 2, 3)
strides = 1
pad_mode = 'SAME'
x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
output_ms = P.AvgPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
expert_result = (np.array([[[[[8.5, 9, 10, 10.5],
[12.5, 13, 14, 14.5],
[14.5, 15, 16, 16.5]],
[[14.5, 15, 16, 16.5],
[18.5, 19, 20, 20.5],
[20.5, 21, 22, 22.5]]],
[[[32.5, 33, 34, 34.5],
[36.5, 37, 38, 38.5],
[38.5, 39, 40, 40.5]],
[[38.5, 39, 40, 40.5],
[42.5, 43, 44, 44.5],
[44.5, 45, 46, 46.5]]],
[[[56.5, 57, 58, 58.5],
[60.5, 61, 62, 62.5],
[62.5, 63, 64, 64.5]],
[[62.5, 63, 64, 64.5],
[66.5, 67, 68, 68.5],
[68.5, 69, 70, 70.5]]]],
[[[[80.5, 81, 82, 82.5],
[84.5, 85, 86, 86.5],
[86.5, 87, 88, 88.5]],
[[86.5, 87, 88, 88.5],
[90.5, 91, 92, 92.5],
[92.5, 93, 94, 94.5]]],
[[[104.5, 105, 106, 106.5],
[108.5, 109, 110, 110.5],
[110.5, 111, 112, 112.5]],
[[110.5, 111, 112, 112.5],
[114.5, 115, 116, 116.5],
[116.5, 117, 118, 118.5]]],
[[[128.5, 129, 130, 130.5],
[132.5, 133, 134, 134.5],
[134.5, 135, 136, 136.5]],
[[134.5, 135, 136, 136.5],
[138.5, 139, 140, 140.5],
[140.5, 141, 142, 142.5]]]]]))
assert (output_ms.asnumpy() == expert_result).all()
if __name__ == '__main__':
test_avgpool_k2s1pv()
test_avgpool_k2s2pv()
test_avgpool_k3s2pv()
test_avgpool_k3s2ps()