[feat] [assistant] [ops] [I51VS2] Add new operator nn.AdaptiveMaxPool1d

This commit is contained in:
fujianzhao 2022-05-18 15:50:44 +08:00
parent fa3f3c0be7
commit 7fcb75beaa
5 changed files with 132 additions and 1 deletions

View File

@ -171,6 +171,7 @@ Dropout层
:template: classtemplate.rst
mindspore.nn.AdaptiveAvgPool1d
mindspore.nn.AdaptiveMaxPool1d
mindspore.nn.AvgPool1d
mindspore.nn.AvgPool2d
mindspore.nn.MaxPool1d

View File

@ -0,0 +1,33 @@
mindspore.nn.AdaptiveMaxPool1d
=======================
.. py:class:: mindspore.nn.AdaptiveMaxPool1d(output_size)
对输入的多维数据进行一维平面上的自适应最大池化运算。
通常输入的shape为 :math:`(N_{in}, C_{in}, L_{in})` AdaptiveMaxPool1d在 :math:`(L_{in})` 维度上输出区域最大值。
输出的shape为 :math:`(N_{in}, C_{in}, L_{out})` ,其中, :math:`L_{out}``output_size`
.. note::
:math:`L_{in}` 必须能被 `output_size` 整除。
**参数:**
- **output_size** (int) - 目标输出大小 :math:`L_{out}`
**输入:**
- **x** (Tensor) - shape为 :math:`(N, C_{in}, L_{in})` 的Tensor数据类型为float16float32或float64。
**输出:**
Tensor其shape为 :math:`(N, C_{in}, L_{out})`,数据类型与 `x` 相同。
**异常:**
- **TypeError** - `x` 不是float16或float32或float64。
- **TypeError** - `output_size` 不是int。
- **ValueError** - `output_size` 小于1。
- **ValueError** - `x` 的最后一个维度小于 `output_size`
- **ValueError** - `x` 的最后一个维度不能被 `output_size` 整除。
- **ValueError** - `x` 的shape长度不等于3。

View File

@ -171,6 +171,7 @@ Pooling Layer
:template: classtemplate.rst
mindspore.nn.AdaptiveAvgPool1d
mindspore.nn.AdaptiveMaxPool1d
mindspore.nn.AvgPool1d
mindspore.nn.AvgPool2d
mindspore.nn.MaxPool1d

View File

@ -21,7 +21,7 @@ import mindspore.context as context
from mindspore.common import dtype as mstype
from ..cell import Cell
__all__ = ['AvgPool2d', 'MaxPool2d', 'AvgPool1d', 'MaxPool1d', 'AdaptiveAvgPool1d']
__all__ = ['AvgPool2d', 'MaxPool2d', 'AvgPool1d', 'MaxPool1d', 'AdaptiveAvgPool1d', 'AdaptiveMaxPool1d']
class _PoolNd(Cell):
@ -497,3 +497,77 @@ class AdaptiveAvgPool1d(Cell):
x = self.squeeze(x)
return x
class AdaptiveMaxPool1d(Cell):
r"""
1D adaptive maximum pooling for temporal data.
Applies a 1D adaptive maximum pooling over an input Tensor which can be regarded as
a composition of 1D input planes.
Typically, the input is of shape :math:`(N_{in}, C_{in}, L_{in})`,
AdaptiveMaxPool1d outputs regional maximum in the :math:`(L_{in})`-dimension. The output is of
shape :math:`(N_{in}, C_{in}, L_{out})`, where :math:`L_{out}` is defined by `output_size`
Note:
:math:`(L_{in})` must be divisible by `output_size`.
Args:
output_size (int): the target output size :math:`L_{out}`.
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, L_{in})`, with float16, float32 or float64 data type.
Outputs:
Tensor of shape :math:`(N, C_{in}, L_{out})`, has the same type as `x`.
Raises:
TypeError: If `x` is neither float16 nor float32 nor float64.
TypeError: If `output_size` is not an int.
ValueError: If `output_size` is less than 1.
ValueError: If the last dimension of `x` is smaller than `output_size`.
ValueError: If the last dimension of `x` is not divisible by `output_size`.
ValueError: If length of shape of `x` is not equal to 3.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> pool = nn.AdaptiveMaxPool1d(output_size=3)
>>> x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32)
>>> output = pool(x)
>>> result = output.shape
>>> print(result)
(1, 3, 3)
"""
def __init__(self, output_size):
"""Initialize AdaptiveMaxPool1d."""
super(AdaptiveMaxPool1d, self).__init__()
validator.check_int(output_size, 1, Rel.GE, "output_size", self.cls_name)
validator.check_value_type('output_size', output_size, [int], self.cls_name)
self.expand = P.ExpandDims()
self.squeeze = P.Squeeze(2)
self.output_size = output_size
self.shape = F.shape
self.dtype = P.DType()
def construct(self, x):
_adaptive_shape_check(self.shape(x), self.output_size, self.cls_name)
_adaptive_dtype_check(self.dtype(x), self.cls_name)
_, _, width = self.shape(x)
stride = width // self.output_size
kernel_size = width - (self.output_size - 1) * stride
stride = (1, width // self.output_size)
kernel_size = (1, kernel_size)
max_pool = P.MaxPool(kernel_size=kernel_size, strides=stride)
x = self.expand(x, 2)
x = max_pool(x)
x = self.squeeze(x)
return x

View File

@ -98,3 +98,25 @@ def test_adaptive_avg_pool_1d():
net = AdaptiveAvgPool1dNet(2)
input_ = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32))
_cell_graph_executor.compile(net, input_)
class AdaptiveMaxPool1dNet(nn.Cell):
"""AdaptiveMaxPool1d."""
def __init__(self, output_size):
super(AdaptiveMaxPool1dNet, self).__init__()
self.adaptive_max_pool_1d = nn.AdaptiveMaxPool1d(output_size)
def construct(self, x):
return self.adaptive_max_pool_1d(x)
def test_adaptive_max_pool_1d():
"""
Feature: Test AdaptiveMaxPool1d.
Description: Test AdaptiveMaxPool1d functional.
Expectation: Success.
"""
net = AdaptiveMaxPool1dNet(2)
input_ = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32))
_cell_graph_executor.compile(net, input_)