diff --git a/docs/api/api_python/mindspore.ops.rst b/docs/api/api_python/mindspore.ops.rst index 4c3eef1f3c6..31735d5d64a 100644 --- a/docs/api/api_python/mindspore.ops.rst +++ b/docs/api/api_python/mindspore.ops.rst @@ -297,8 +297,10 @@ Reduction函数 :nosignatures: :template: classtemplate.rst + mindspore.ops.all mindspore.ops.amax mindspore.ops.amin + mindspore.ops.any mindspore.ops.argmax mindspore.ops.argmin mindspore.ops.cummax diff --git a/docs/api/api_python/ops/mindspore.ops.func_all.rst b/docs/api/api_python/ops/mindspore.ops.func_all.rst new file mode 100644 index 00000000000..48915f2b0b2 --- /dev/null +++ b/docs/api/api_python/ops/mindspore.ops.func_all.rst @@ -0,0 +1,23 @@ +mindspore.ops.all +================= + +.. py:function:: mindspore.ops.all(x, axis=(), keep_dims=False) + + 默认情况下,通过对维度中所有元素进行“逻辑与”来减少 `x` 的维度。也可以沿轴减少 `x` 的维度。通过控制 `keep_dim` 来确定输出和输入的维度是否相同。 + + 参数: + - **x** (Tensor[bool]) - 输入Tensor,其数据类型为bool型。shape是 :math:`(N, *)` ,其中 :math:`*` 表示任意数量的附加维度。秩应小于8。 + - **axis** (Union[int, tuple(int), list(int)], 可选) - 要减少的维度。只允许常量值。假设 `x` 的秩为r,取值范围[-r,r)。默认值:(),缩小所有维度。 + - **keep_dims** (bool, 可选) - 如果为True,则保留缩小的维度,大小为1。否则移除维度。默认值:False。 + + 返回: + Tensor,数据类型是bool。 + + - 如果 `axis` 为(),且 `keep_dims` 为False,则输出一个零维Tensor,表示输入Tensor中所有元素进行“逻辑与”。 + - 如果 `axis` 为int,取值为2,并且 `keep_dims` 为False,则输出的shape为 :math:`(x_1, x_3, ..., x_R)` 。 + - 如果 `axis` 为tuple(int)或list(int),取值为(2, 3),并且 `keep_dims` 为False,则输出Tensor的shape为 :math:`(x_1, x_4, ..., x_R)` 。 + + 异常: + - **TypeError** - `keep_dims` 不是bool类型。 + - **TypeError** - `x` 不是Tensor。 + - **TypeError** - `axis` 不是以下数据类型之一:int、tuple或list。 diff --git a/docs/api/api_python/ops/mindspore.ops.func_any.rst b/docs/api/api_python/ops/mindspore.ops.func_any.rst new file mode 100644 index 00000000000..258a75c6dff --- /dev/null +++ b/docs/api/api_python/ops/mindspore.ops.func_any.rst @@ -0,0 +1,23 @@ +mindspore.ops.any +================= + +.. py:function:: mindspore.ops.any(x, axis=(), keep_dims=False) + + 默认情况下,通过对维度中所有元素进行“逻辑或”来减少 `x` 的维度。也可以沿轴减少 `x` 的维度。通过控制 `keep_dim` 来确定输出和输入的维度是否相同。 + + 参数: + - **x** (Tensor[bool]) - 输入Tensor,其数据类型为bool型。shape是 :math:`(N, *)` ,其中 :math:`*` 表示任意数量的附加维度。秩应小于8。 + - **axis** (Union[int, tuple(int), list(int)], 可选) - 要减少的维度。只允许常量值。假设 `x` 的秩为r,取值范围[-r,r)。默认值:(),缩小所有维度。 + - **keep_dims** (bool, 可选) - 如果为True,则保留缩小的维度,大小为1。否则移除维度。默认值:False。 + + 返回: + Tensor,数据类型是bool。 + + - 如果 `axis` 为(),且 `keep_dims` 为False,则输出一个零维Tensor,表示输入Tensor中所有元素进行“逻辑或”。 + - 如果 `axis` 为int,取值为2,并且 `keep_dims` 为False,则输出的shape为 :math:`(x_1, x_3, ..., x_R)` 。 + - 如果 `axis` 为tuple(int)或list(int),取值为(2, 3),并且 `keep_dims` 为False,则输出Tensor的shape为 :math:`(x_1, x_4, ..., x_R)` 。 + + 异常: + - **TypeError** - `keep_dims` 不是bool类型。 + - **TypeError** - `x` 不是Tensor。 + - **TypeError** - `axis` 不是以下数据类型之一:int、tuple或list。 diff --git a/docs/api/api_python_en/mindspore.ops.rst b/docs/api/api_python_en/mindspore.ops.rst index d4349db5194..33f27ed35ca 100644 --- a/docs/api/api_python_en/mindspore.ops.rst +++ b/docs/api/api_python_en/mindspore.ops.rst @@ -297,8 +297,10 @@ Reduction Functions :nosignatures: :template: classtemplate.rst + mindspore.ops.all mindspore.ops.amax mindspore.ops.amin + mindspore.ops.any mindspore.ops.argmax mindspore.ops.argmin mindspore.ops.cummax diff --git a/mindspore/python/mindspore/ops/function/math_func.py b/mindspore/python/mindspore/ops/function/math_func.py index 9a35400c66d..8273a2073dd 100644 --- a/mindspore/python/mindspore/ops/function/math_func.py +++ b/mindspore/python/mindspore/ops/function/math_func.py @@ -7674,8 +7674,7 @@ def _check_matmul_shapes(shape1, shape2, prim_name=None): r_shape2 = shape2[:-2] max_len = max(len(r_shape1), len(r_shape2)) for i in range(max_len): - items = [it[i - max_len + len(it)] if i - max_len + - len(it) >= 0 else 1 for it in (r_shape1, r_shape2)] + items = [it[i - max_len + len(it)] if i - max_len + len(it) >= 0 else 1 for it in (r_shape1, r_shape2)] max_size = max(items) shape_out.append(max_size) return tuple(shape_out) @@ -8470,23 +8469,24 @@ def kron(x, y): def all(x, axis=(), keep_dims=False): r""" - Reduces a dimension of a tensor by the "logicalAND" of all elements in the dimension, by default. And also can + Reduces a dimension of `x` by the "logicalAND" of all elements in the dimension, by default. And also can reduce a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by controlling `keep_dims`. Args: - x (Tensor[bool]): The input tensor. The dtype of the tensor to be reduced is bool. + x (Tensor[bool]): The input Tensor. The dtype of the Tensor is bool. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: (), reduce all dimensions. - Only constant value is allowed. Must be in the range [-rank(x), rank(x)). - keep_dims (bool): If true, keep these reduced dimensions and the length is 1. - If false, don't keep these dimensions. Default : False. + axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce. + Only constant value is allowed. Supposed the rank of `x` is r, + axis must be in the range [-rank(x), rank(x)). Default: (), all dimensions are reduced. + keep_dims (bool, optional): If true, keep these reduced dimensions and the length is 1. + If false, don't keep these dimensions. Default : False. Returns: Tensor, the dtype is bool. - If axis is (), and keep_dims is False, - the output is a 0-D tensor representing the "logical and" of all elements in the input tensor. + the output is a 0-D Tensor representing the "logical and" of all elements in the input Tensor. - If axis is int, set as 2, and keep_dims is False, the shape of output is :math:`(x_1, x_3, ..., x_R)`. - If axis is tuple(int), set as (2, 3), and keep_dims is False, @@ -8523,23 +8523,24 @@ def all(x, axis=(), keep_dims=False): def any(x, axis=(), keep_dims=False): r""" - Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension, by default. And also can + Reduces a dimension of `x` by the "logical OR" of all elements in the dimension, by default. And also can reduce a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by controlling `keep_dims`. Args: - x (Tensor[bool]): The input tensor. The dtype of the tensor to be reduced is bool. + x (Tensor[bool]): The input Tensor. The dtype of the Tensor is bool. :math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8. - axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Default: (), reduce all dimensions. - Only constant value is allowed. Must be in the range [-rank(x), rank(x)). - keep_dims (bool): If true, keep these reduced dimensions and the length is 1. - If false, don't keep these dimensions. Default : False. + axis (Union[int, tuple(int), list(int)], optional): The dimensions to reduce. + Only constant value is allowed. Supposed the rank of `x` is r, + axis must be in the range [-rank(x), rank(x)). Default: (), all dimensions are reduced. + keep_dims (bool, optional): If true, keep these reduced dimensions and the length is 1. + If false, don't keep these dimensions. Default : False. Returns: Tensor, the dtype is bool. - If axis is (), and keep_dims is False, - the output is a 0-D tensor representing the "logical or" of all elements in the input tensor. + the output is a 0-D Tensor representing the "logical or" of all elements in the input Tensor. - If axis is int, set as 2, and keep_dims is False, the shape of output is :math:`(x_1, x_3, ..., x_R)`. - If axis is tuple(int), set as (2, 3), and keep_dims is False, diff --git a/mindspore/python/mindspore/ops/function/nn_func.py b/mindspore/python/mindspore/ops/function/nn_func.py index 95b7f67d368..ada8cc9d13f 100644 --- a/mindspore/python/mindspore/ops/function/nn_func.py +++ b/mindspore/python/mindspore/ops/function/nn_func.py @@ -4897,12 +4897,12 @@ def pixel_shuffle(x, upscale_factor): "by `upscale_factor` squared.") c = c // upscale_factor ** 2 input_perm = (pre + (c, upscale_factor, upscale_factor, h, w)) - reshape = ops.Reshape() + reshape = _get_cache_prim(P.Reshape)() + transpose = _get_cache_prim(P.Transpose)() x = reshape(x, input_perm) input_perm = [i for i in range(length - 2)] input_perm = input_perm + [length, length - 2, length + 1, length - 1] input_perm = tuple(input_perm) - transpose = ops.Transpose() x = transpose(x, input_perm) x = reshape(x, (pre + (c, upscale_factor * h, upscale_factor * w))) return x @@ -4953,12 +4953,12 @@ def pixel_unshuffle(x, downscale_factor): h = h // downscale_factor w = w // downscale_factor input_perm = (pre + (c, h, downscale_factor, w, downscale_factor)) - reshape = ops.Reshape() + reshape = _get_cache_prim(P.Reshape)() + transpose = _get_cache_prim(P.Transpose)() x = reshape(x, input_perm) input_perm = [i for i in range(length - 2)] input_perm = input_perm + [length - 1, length + 1, length - 2, length] input_perm = tuple(input_perm) - transpose = ops.Transpose() x = transpose(x, input_perm) x = reshape(x, (pre + (c * downscale_factor * downscale_factor, h, w))) return x diff --git a/tests/st/ops/test_func_all.py b/tests/st/ops/test_func_all.py new file mode 100644 index 00000000000..ca7eda24886 --- /dev/null +++ b/tests/st/ops/test_func_all.py @@ -0,0 +1,51 @@ +# Copyright 2023 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import pytest +import mindspore as ms +import mindspore.nn as nn +from mindspore import Tensor, ops + + +class Net(nn.Cell): + def construct(self, x): + return ops.all(x, axis=1, keep_dims=True) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.platform_arm_cpu +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_onecard +@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE]) +def test_ops_all(mode): + """ + Feature: ops.all + Description: Verify the result of all + Expectation: success + """ + ms.set_context(mode=mode) + x = Tensor([[True, True], + [False, True], + [True, False], + [False, False]], ms.bool_) + net = Net() + output = net(x) + expect_output = Tensor([[True], + [False], + [False], + [False]], ms.bool_) + assert all(output == expect_output) diff --git a/tests/st/ops/test_func_any.py b/tests/st/ops/test_func_any.py new file mode 100644 index 00000000000..bb33198425a --- /dev/null +++ b/tests/st/ops/test_func_any.py @@ -0,0 +1,51 @@ +# Copyright 2023 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import pytest +import mindspore as ms +import mindspore.nn as nn +from mindspore import Tensor, ops + + +class Net(nn.Cell): + def construct(self, x): + return ops.any(x, axis=1, keep_dims=True) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.platform_arm_cpu +@pytest.mark.platform_x86_gpu_training +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_onecard +@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE]) +def test_ops_any(mode): + """ + Feature: ops.any + Description: Verify the result of any + Expectation: success + """ + ms.set_context(mode=mode) + x = Tensor([[True, True], + [False, True], + [True, False], + [False, False]], ms.bool_) + net = Net() + output = net(x) + expect_output = Tensor([[True], + [True], + [True], + [False]], ms.bool_) + assert all(output == expect_output)