diff --git a/docs/api/api_python/mindspore.ops.functional.rst b/docs/api/api_python/mindspore.ops.functional.rst index 3f48e4ebc87..bf6ed086144 100644 --- a/docs/api/api_python/mindspore.ops.functional.rst +++ b/docs/api/api_python/mindspore.ops.functional.rst @@ -306,15 +306,18 @@ Array操作 :nosignatures: :template: classtemplate.rst - mindspore.ops.broadcast_to mindspore.ops.adaptive_max_pool2d + mindspore.ops.batch_to_space_nd + mindspore.ops.broadcast_to mindspore.ops.col2im mindspore.ops.diag mindspore.ops.expand_dims + mindspore.ops.flatten mindspore.ops.gather mindspore.ops.gather_d mindspore.ops.gather_elements mindspore.ops.gather_nd + mindspore.ops.gumbel_softmax mindspore.ops.masked_fill mindspore.ops.masked_select mindspore.ops.matrix_band_part @@ -326,12 +329,10 @@ Array操作 mindspore.ops.range mindspore.ops.rank mindspore.ops.reshape - mindspore.ops.flatten mindspore.ops.scatter_nd mindspore.ops.select mindspore.ops.shape mindspore.ops.size - mindspore.ops.gumbel_softmax mindspore.ops.space_to_batch_nd mindspore.ops.tensor_scatter_add mindspore.ops.tensor_scatter_min diff --git a/docs/api/api_python/ops/mindspore.ops.BatchToSpaceND.rst b/docs/api/api_python/ops/mindspore.ops.BatchToSpaceND.rst new file mode 100644 index 00000000000..02d2eaf1974 --- /dev/null +++ b/docs/api/api_python/ops/mindspore.ops.BatchToSpaceND.rst @@ -0,0 +1,8 @@ +mindspore.ops.BatchToSpaceND +============================ + +.. py:class:: mindspore.ops.BatchToSpaceND(block_shape, crops) + + 用块划分批次维度,并将这些块交错回空间维度。 + + 更多参考详见 :func:`mindspore.ops.batch_to_space_nd`。 \ No newline at end of file diff --git a/docs/api/api_python/ops/mindspore.ops.func_batch_to_space_nd.rst b/docs/api/api_python/ops/mindspore.ops.func_batch_to_space_nd.rst new file mode 100644 index 00000000000..7d78ca1369b --- /dev/null +++ b/docs/api/api_python/ops/mindspore.ops.func_batch_to_space_nd.rst @@ -0,0 +1,39 @@ +mindspore.ops.batch_to_space_nd +================================ + +.. py:function:: mindspore.ops.batch_to_space_nd(input_x, block_shape, crops) + + 用块划分批次维度,并将这些块交错回空间维度。 + + 此函数会将批次维度 `N` 划分为具有 `block_shape` 的块,即输出张量的 `N` 维度是划分后对应的块数。 + 输出张量的 `H` 、`W` 维度是原始的 `H` 、`W` 维度和 `block_shape` 的乘积从维度裁剪给定。 + 如此,假设输入的形状为 :math:`(n, c, h, w)`,则输出的形状为 :math:`(n', c', h', w')`, + 其中 + + :math:`n' = n//(block\_shape[0]*block\_shape[1])` + + :math:`c' = c` + + :math:`h' = h*block\_shape[0]-crops[0][0]-crops[0][1]` + + :math:`w' = w*block\_shape[1]-crops[1][0]-crops[1][1]` + + **参数:** + + - **input_x** (Tensor) - 输入张量,必须大于或者等于四维。 + - **block_shape** (list[int], tuple[int], int) - 块形状描述批次维度为分割的个数。 + - **crops** (tuple, list) - 空间维度的裁剪大小。 + + **返回:** + + Tensor,经过划分排列之后的结果。 + + **异常:** + + - **TypeError** - 如果 `block_shape` 不是 list, tuple 或者 int。 + - **TypeError** - 如果 `crops` 不是 list 或者 tuple。 + - **ValueError** - 如果当 `block_shape` 为 list 或 tuple, `block_shape` 不是一维。 + - **ValueError** - 如果 `block_shape` 或 `crops` 长度不是2。 + - **ValueError** - 如果 `block_shape` 的元素不是大于一的整数。 + - **ValueError** - 如果 `crops` 的形状不是 (M, 2), 其中 M 为 `block_shape` 的长度。 + - **ValueError** - 如果 `crops` 的元素不是非负的整数。 diff --git a/docs/api/api_python_en/mindspore.ops.functional.rst b/docs/api/api_python_en/mindspore.ops.functional.rst index 2fa71fde804..82782508a39 100644 --- a/docs/api/api_python_en/mindspore.ops.functional.rst +++ b/docs/api/api_python_en/mindspore.ops.functional.rst @@ -305,6 +305,7 @@ Array Operation :template: classtemplate.rst mindspore.ops.adaptive_max_pool2d + mindspore.ops.batch_to_space_nd mindspore.ops.broadcast_to mindspore.ops.col2im mindspore.ops.diag diff --git a/mindspore/ccsrc/pipeline/jit/resource.cc b/mindspore/ccsrc/pipeline/jit/resource.cc index 99329a10b67..b7356eb0972 100644 --- a/mindspore/ccsrc/pipeline/jit/resource.cc +++ b/mindspore/ccsrc/pipeline/jit/resource.cc @@ -221,7 +221,6 @@ BuiltInTypeMap &GetMethodMap() { {"argmin", std::string("argmin")}, // P.Argmax() {"resize", std::string("resize")}, // P.Reshape() {"select", std::string("select")}, // P.Select() - {"batch_to_space_nd", std::string("batch_to_space_nd")}, // P.BatchToSpaceND {"choose", std::string("choose")}, // P.Select() {"diagonal", std::string("diagonal")}, // P.Eye() {"isclose", std::string("isclose")}, // P.IsClose() diff --git a/mindspore/core/ops/batch_to_space_nd.cc b/mindspore/core/ops/batch_to_space_nd.cc index bfee16f557f..3a02981bce2 100644 --- a/mindspore/core/ops/batch_to_space_nd.cc +++ b/mindspore/core/ops/batch_to_space_nd.cc @@ -35,6 +35,8 @@ abstract::ShapePtr BatchToSpaceNDInferShape(const PrimitivePtr &primitive, auto shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape()); auto input_min_shape = shape_map[kMinShape]; auto input_max_shape = shape_map[kMaxShape]; + constexpr size_t len = 4; + CheckAndConvertUtils::CheckInteger("input_x rank", SizeToLong(x_shape.size()), kGreaterEqual, len, prim_name); auto out_shape = x_shape; int64_t block_shape_prod = 1; diff --git a/mindspore/python/mindspore/_extends/parse/standard_method.py b/mindspore/python/mindspore/_extends/parse/standard_method.py index 28d6034d30d..1400b58b5f7 100644 --- a/mindspore/python/mindspore/_extends/parse/standard_method.py +++ b/mindspore/python/mindspore/_extends/parse/standard_method.py @@ -2177,15 +2177,6 @@ def filter_(fun, iter_): return result -def batch_to_space_nd(x, block_shape, crops): - r""" - Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions. - - Refer to :func:`mindspore.ops.batch_to_space_nd` for more detail. - """ - return P.BatchToSpaceND(block_shape, crops)(x) - - ################## # Sparse methods # ################## diff --git a/mindspore/python/mindspore/common/tensor.py b/mindspore/python/mindspore/common/tensor.py index 9753f7136a3..0f248d8ac8a 100644 --- a/mindspore/python/mindspore/common/tensor.py +++ b/mindspore/python/mindspore/common/tensor.py @@ -3066,28 +3066,6 @@ class Tensor(Tensor_): j = tensor_operator_registry.get('select')(mask, mid, j) return j - def batch_to_space_nd(self, block_shape, crops): - """ - Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions. - - Refer to :func:`mindspore.ops.batch_to_space_nd` for more detail. - - Supported Platforms: - ``Ascend`` ``CPU`` - - Examples: - >>> import numpy as np - >>> from mindspore import Tensor - >>> block_shape = [2, 2] - >>> crops = [[0, 0], [0, 0]] - >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32) - >>> output = input_x.batch_to_space_nd(block_shape, crops) - >>> print(output) - [[[[1. 2.] - [3. 4.]]]] - """ - return tensor_operator_registry.get('batch_to_space_nd')(block_shape, crops)(self) - def one_hot(self, depth, on_value, off_value, axis=-1): r""" Computes a one-hot tensor. diff --git a/mindspore/python/mindspore/ops/function/array_func.py b/mindspore/python/mindspore/ops/function/array_func.py index 9d88387001b..b35b35cca72 100644 --- a/mindspore/python/mindspore/ops/function/array_func.py +++ b/mindspore/python/mindspore/ops/function/array_func.py @@ -2238,8 +2238,8 @@ def batch_to_space_nd(input_x, block_shape, crops): original H, W dimension and block_shape with given amount to crop from dimension, respectively. Args: - input_x (Tensor) - The input tensor. It must be a 4-D tensor, dimension 0 must be divisible by - product of `block_shape`. The data type is float16 or float32. + input_x (Tensor): The input tensor. It must be greater or equal to 4-D tensor(equal to 4-D tensor on Ascend), + batch dimension must be divisible by product of `block_shape`. The data type is float16 or float32. block_shape (Union[list(int), tuple(int), int]): The block shape of dividing block with all value greater than 1. If `block_shape` is a tuple or list, the length of `block_shape` is M corresponding to the number of spatial dimensions. If `block_shape` is an int, the block size of M dimensions are the same, @@ -2266,7 +2266,11 @@ def batch_to_space_nd(input_x, block_shape, crops): Raises: TypeError: If `block_shape` is not one of list, tuple, int. TypeError: If `crops` is neither list nor tuple. + ValueError: If `block_shape` is not one dimensional when `block_shape` is a list or tuple. ValueError: If length of `block_shape` or `crops` is not equal to 2. + ValueError: If the element of `block_shape` is not an integer larger than 1. + ValueError: If shape of `crops` is not (M, 2), where M is the length of `block_shape`. + ValueError: If the element of `crops` is not an integer larger than 0. Supported Platforms: ``Ascend`` ``CPU`` diff --git a/mindspore/python/mindspore/ops/functional.py b/mindspore/python/mindspore/ops/functional.py index 5a9dda0c39e..35a7f90a539 100644 --- a/mindspore/python/mindspore/ops/functional.py +++ b/mindspore/python/mindspore/ops/functional.py @@ -950,7 +950,6 @@ tensor_operator_registry.register('sum', P.ReduceSum) tensor_operator_registry.register('split', P.Split) tensor_operator_registry.register('select', P.Select) tensor_operator_registry.register('zeros_like', P.ZerosLike) -tensor_operator_registry.register('batch_to_space_nd', P.BatchToSpaceND) tensor_operator_registry.register('one_hot', P.OneHot) tensor_operator_registry.register('masked_fill', masked_fill) tensor_operator_registry.register('masked_select', masked_select) diff --git a/mindspore/python/mindspore/ops/operations/array_ops.py b/mindspore/python/mindspore/ops/operations/array_ops.py index 6d369d6bd95..f782dd7ef81 100755 --- a/mindspore/python/mindspore/ops/operations/array_ops.py +++ b/mindspore/python/mindspore/ops/operations/array_ops.py @@ -5833,6 +5833,9 @@ class BatchToSpaceND(Primitive): Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions. Refer to :func:`mindspore.ops.batch_to_space_nd` for more detail. + + Supported Platforms: + ``Ascend`` ``CPU`` """ @prim_attr_register diff --git a/tests/st/ops/cpu/test_batchtospacend_op.py b/tests/st/ops/cpu/test_batchtospacend_op.py index b1b67ecac00..5b7a694b274 100644 --- a/tests/st/ops/cpu/test_batchtospacend_op.py +++ b/tests/st/ops/cpu/test_batchtospacend_op.py @@ -94,37 +94,6 @@ def test_batch_to_space_nd_function(): np.testing.assert_array_equal(output.asnumpy(), expect) -class BatchToSpaceNDTensorNet(nn.Cell): - def __init__(self, block_shape=2): - super(BatchToSpaceNDTensorNet, self).__init__() - self.block_shape = block_shape - - def construct(self, x): - return x.batch_to_space_nd(self.block_shape, [[0, 0], [0, 0]]) - - -@pytest.mark.level0 -@pytest.mark.platform_x86_cpu -@pytest.mark.env_onecard -def test_batch_to_space_nd_tensor(): - """ - Feature: test BatchToSpaceND tensor interface. - Description: test tensor interface. - Expectation: the result match with numpy result - """ - net = BatchToSpaceNDTensorNet(2) - input_x = Tensor(np.arange(4).reshape((4, 1, 1, 1)).astype(np.float32), mindspore.float32) - expect = np.array([[[[0, 1], - [2, 3]]]]).astype(np.float32) - - context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU") - output = net(input_x) - assert (output.asnumpy() == expect).all() - context.set_context(mode=context.GRAPH_MODE, device_target="CPU") - output = net(input_x) - assert (output.asnumpy() == expect).all() - - class BatchToSpaceNDDynamicShapeNetMS(nn.Cell): def __init__(self, block_shape, crops, axis=1): super().__init__()