!35888 remove Pad tensor interface

Merge pull request !35888 from looop5/remove_pad_tensor
This commit is contained in:
i-robot 2022-06-14 06:38:14 +00:00 committed by Gitee
commit a300990322
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
6 changed files with 2 additions and 159 deletions

View File

@ -935,28 +935,6 @@ mindspore.Tensor
- **ValueError** - 当前Tensor的shape不是大于或等于2维。
- **ValueError** - 当前Tensor、 `lower``upper` 三者的shapes不能广播。
.. py:method:: pad(paddings)
根据参数 `paddings` 对当前Tensor进行填充。
更多参考详见 :func:`mindspore.ops.pad`
**参数:**
- **paddings** (tuple) - 填充大小其shape为(N, 2)N是当前Tensor的维度填充的元素为int类型。对于 `x` 的第 `D` 个维度paddings[D, 0]表示当前Tensor的第 `D` 维度前面要扩展如果该值大于0或裁剪如果该值小于0的大小paddings[D, 1]表示在当前Tensor的第 `D` 个维度后面要扩展如果该值大于0或裁剪如果该值小于0的大小。
**返回:**
填充后的Tensor。
**异常:**
- **TypeError** - `paddings` 不是tuple。
- **TypeError** - `input_x` 不是Tensor。
- **ValueError** - `paddings` 的shape不是 :math:`(N, 2)`
- **ValueError** - `paddings` 的大小不等于2 * len(当前Tensor)。
- **ValueError** - 计算出来的输出Tensor的shape里存在0或负数。
.. py:method:: padding(pad_dim_size=8)
通过填充0将当前Tensor的最后一个维度从1扩展到pad_dim_size。

View File

@ -230,7 +230,6 @@ BuiltInTypeMap &GetMethodMap() {
{"inv", std::string("inv")}, // inv()
{"invert", std::string("invert")}, // invert()
{"matrix_band_part", std::string("matrix_band_part")}, // matrix_band_part()
{"pad", std::string("pad")}, // P.Pad
{"padding", std::string("padding")}, // padding()
{"searchsorted", std::string("searchsorted")}, // P.Select()
{"take", std::string("take")}, // P.GatherNd()

View File

@ -863,14 +863,6 @@ def matrix_band_part(x, lower, upper):
return F.matrix_band_part(x, lower, upper)
def pad(input_x, paddings):
r"""
Pads the input tensor according to the paddings.
Refer to :func:`mindspore.ops.pad` for more detail.
"""
return F.pad(input_x, paddings)
def padding(x, pad_dim_size=8):
"""
Extends the last dimension of the input tensor from 1 to pad_dim_size, by filling with 0.

View File

@ -1351,86 +1351,6 @@ class Tensor(Tensor_):
self._init_check()
return tensor_operator_registry.get('matrix_band_part')(self, lower, upper)
def pad(self, paddings):
r"""
Pads the current tensor according to the paddings.
Refer to :func:`mindspore.ops.pad` for more detail.
Args:
paddings (tuple): The shape of parameter `paddings` is (N, 2). N is the rank of current tensor. All elements
of paddings are int type. For the input in `D` th dimension, paddings[D, 0] indicates how many sizes to
be extended ahead of the current tensor in the `D` th dimension, and paddings[D, 1] indicates how many
sizes to be extended (if this value > 0) or clipped(if this value < 0) ahead of the current tensor in
the `D` th dimension, and paddings[D, 1] indicates how many sizes to be extended(if this value > 0) or
clipped(if this value < 0) behind the current tensor in the `D` th dimension.
Returns:
Tensor, the tensor after padding.
Raises:
TypeError: If `paddings` is not a tuple.
ValueError: If shape of `paddings` is not :math:`(N, 2)`.
ValueError: If paddings.size is not equal to 2 * len(current tensor).
ValueError: If the calculated output shape contains zero or negative dimension.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> paddings = ((1, 2), (2, 1))
>>> output = input_x.pad(paddings)
>>> print(output)
[[ 0. 0. 0. 0. 0. 0. ]
[ 0. 0. -0.1 0.3 3.6 0. ]
[ 0. 0. 0.4 0.5 -3.2 0. ]
[ 0. 0. 0. 0. 0. 0. ]
[ 0. 0. 0. 0. 0. 0. ]]
"""
self._init_check()
if not isinstance(paddings, tuple):
raise TypeError(f"For 'Tensor.pad', the type of 'paddings' must be tuple, but got {type(paddings)}.")
for _, pd in enumerate(paddings):
if not isinstance(pd, (list, tuple)) or len(pd) != 2 or not isinstance(pd[0], int) or \
not isinstance(pd[1], int):
raise TypeError(f"For 'Tensor.pad', each element in 'paddings' must be a list or tuple of 2 int, "
f"but got {pd}.")
x_shape = self.shape
if len(x_shape) != len(paddings):
raise ValueError(f"For 'Tensor.pad', the size of paddings must be 2 * {len(x_shape)}, but "
f"got {2 * len(paddings)}")
pad_all_non_negative = True
pad_all_non_positive = True
slice_begin = []
slice_size = []
non_negative_padding = []
for i, pd in enumerate(paddings):
sz = x_shape[i] + pd[0]
if sz <= 0:
raise ValueError(f"For 'Tensor.pad', input_x_shape[{i}] + paddings[{i}, 0] is {sz}, which is <= 0 and "
f"causes the output shape invalid.")
sz = sz + pd[1]
if sz <= 0:
raise ValueError(f"For 'Tensor.pad', input_x_shape[{i}] + paddings[{i}, 0] + paddings[{i}, 1] is {sz}, "
f"which is <= 0 and causes the output shape invalid.")
slice_size.append(sz)
if pd[0] < 0:
slice_begin.append(abs(pd[0]))
else:
slice_begin.append(0)
if pd[0] < 0 or pd[1] < 0:
pad_all_non_negative = False
if pd[0] > 0 or pd[1] > 0:
pad_all_non_positive = False
non_negative_padding.append((max(0, pd[0]), max(0, pd[1])))
if pad_all_non_negative:
return tensor_operator_registry.get("pad")(paddings)(self)
if pad_all_non_positive:
return tensor_operator_registry.get("slice")(self, slice_begin, slice_size)
out = tensor_operator_registry.get("pad")(tuple(non_negative_padding))(self)
return tensor_operator_registry.get("slice")(out, slice_begin, slice_size)
def padding(self, pad_dim_size=8):
r"""
Extends the last dimension of this Tensor from 1 to pad_dim_size, by filling with 0.

View File

@ -66,7 +66,6 @@ tensor_range = P.Range()
if not security.enable_security():
print_ = P.Print()
squeeze = P.Squeeze()
slice_op = P.Slice()
tensor_scatter_update = P.TensorScatterUpdate()
tensor_scatter_max = P.TensorScatterMax()
scatter_nd_update = P.ScatterNdUpdate()
@ -970,8 +969,6 @@ tensor_operator_registry.register('svd', linalg_ops.Svd)
tensor_operator_registry.register('diag', P.Diag)
tensor_operator_registry.register('unique_consecutive', UniqueConsecutive)
tensor_operator_registry.register('pdist', NN.Pdist)
tensor_operator_registry.register('pad', P.Pad)
tensor_operator_registry.register('slice', slice_op)
tensor_operator_registry.register('inplace_update', P.InplaceUpdate)
tensor_operator_registry.register('inplace_add', P.InplaceAdd)
tensor_operator_registry.register('inplace_sub', P.InplaceSub)

View File

@ -31,15 +31,6 @@ class FuncNet(nn.Cell):
return ops.pad(x, self.paddings)
class TensorNet(nn.Cell):
def __init__(self, paddings):
super(TensorNet, self).__init__()
self.paddings = paddings
def construct(self, x):
return x.pad(self.paddings)
class GradNet(nn.Cell):
def __init__(self, network):
super(GradNet, self).__init__()
@ -50,11 +41,8 @@ class GradNet(nn.Cell):
return self.grad(self.network)(x)
def run_case(x, paddings, expect, mode="functional"):
if mode == "functional":
net = FuncNet(paddings)
else:
net = TensorNet(paddings)
def run_case(x, paddings, expect):
net = FuncNet(paddings)
out_ms = net(Tensor(x))
assert np.allclose(expect, out_ms.asnumpy())
@ -114,37 +102,6 @@ def test_pad_function_grad_cpu():
assert np.allclose(expect, out_ms.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_pad_tensor_cpu():
"""
Feature: test ops.Pad tensor interface.
Description: paddings with different values.
Expectation: the result match with numpy result.
"""
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
# case1: padding value are non negative
paddings1 = ((1, 1), (1, 1))
expect1 = np.pad(x, paddings1, "constant", constant_values=0).astype(x.dtype)
# case2: padding value are non positive
paddings2 = ((-1, -1), (0, -1))
expect2 = np.array([[4, 5]], dtype=np.float32)
# case3: padding with positive and negative value
paddings3 = ((1, -1), (-1, 1))
expect3 = np.array([[0, 0, 0], [2, 3, 0], [5, 6, 0]], dtype=np.float32)
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
run_case(x, paddings1, expect1, "tensor")
run_case(x, paddings2, expect2, "tensor")
run_case(x, paddings3, expect3, "tensor")
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
run_case(x, paddings1, expect1, "tensor")
run_case(x, paddings2, expect2, "tensor")
run_case(x, paddings3, expect3, "tensor")
def vmap_case():
class Net(nn.Cell):
def __init__(self, paddings):