!43659 [ST][MS][OPS] isinf & logical_xor Functional APIs; isinf, isnan, le, less, logical_and, logical_not, logical_or & logical_xor Tensor APIs and STs.

Merge pull request !43659 from alashkari/new-apis-oct-11
This commit is contained in:
i-robot 2022-10-29 09:24:20 +00:00 committed by Gitee
commit f6866548c6
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
20 changed files with 952 additions and 12 deletions

View File

@ -185,6 +185,7 @@ mindspore.ops.function
mindspore.ops.logical_and
mindspore.ops.logical_not
mindspore.ops.logical_or
mindspore.ops.logical_xor
mindspore.ops.logit
mindspore.ops.log_matrix_determinant
mindspore.ops.matrix_determinant
@ -246,6 +247,7 @@ Reduction函数
mindspore.ops.intopk
mindspore.ops.isclose
mindspore.ops.isfinite
mindspore.ops.isinf
mindspore.ops.isnan
mindspore.ops.le
mindspore.ops.less

View File

@ -96,6 +96,10 @@ mindspore.Tensor
mindspore.Tensor.lerp
mindspore.Tensor.log
mindspore.Tensor.log1p
mindspore.Tensor.logical_and
mindspore.Tensor.logical_not
mindspore.Tensor.logical_or
mindspore.Tensor.logical_xor
mindspore.Tensor.logit
mindspore.Tensor.negative
mindspore.Tensor.pow
@ -151,6 +155,10 @@ Reduction方法
mindspore.Tensor.has_init
mindspore.Tensor.isclose
mindspore.Tensor.isfinite
mindspore.Tensor.isinf
mindspore.Tensor.isnan
mindspore.Tensor.le
mindspore.Tensor.less
mindspore.Tensor.top_k
线性代数方法

View File

@ -101,6 +101,10 @@ Element-wise Methods
mindspore.Tensor.lerp
mindspore.Tensor.log
mindspore.Tensor.log1p
mindspore.Tensor.logical_and
mindspore.Tensor.logical_not
mindspore.Tensor.logical_or
mindspore.Tensor.logical_xor
mindspore.Tensor.logit
mindspore.Tensor.negative
mindspore.Tensor.pow
@ -156,6 +160,10 @@ Comparison Methods
mindspore.Tensor.has_init
mindspore.Tensor.isclose
mindspore.Tensor.isfinite
mindspore.Tensor.isinf
mindspore.Tensor.isnan
mindspore.Tensor.le
mindspore.Tensor.less
mindspore.Tensor.top_k
Linear Algebraic Methods

View File

@ -186,6 +186,7 @@ Element-by-Element Operations
mindspore.ops.logical_and
mindspore.ops.logical_not
mindspore.ops.logical_or
mindspore.ops.logical_xor
mindspore.ops.logit
mindspore.ops.log_matrix_determinant
mindspore.ops.matrix_determinant
@ -246,6 +247,7 @@ Comparison Functions
mindspore.ops.intopk
mindspore.ops.isclose
mindspore.ops.isfinite
mindspore.ops.isinf
mindspore.ops.isnan
mindspore.ops.le
mindspore.ops.less

View File

@ -349,6 +349,14 @@ BuiltInTypeMap &GetMethodMap() {
{"greater_equal", std::string("greater_equal")}, // greater_equal()
{"igamma", std::string("igamma")}, // igamma()
{"igammac", std::string("igammac")}, // igammac()
{"isinf", std::string("isinf")}, // isinf()
{"isnan", std::string("isnan")}, // isnan()
{"le", std::string("le")}, // le()
{"less", std::string("less")}, // less()
{"logical_and", std::string("logical_and")}, // logical_and()
{"logical_not", std::string("logical_not")}, // logical_not()
{"logical_or", std::string("logical_or")}, // logical_or()
{"logical_xor", std::string("logical_xor")}, // logical_xor()
}},
{kObjectTypeRowTensorType,
{

View File

@ -3523,3 +3523,59 @@ def igammac(input, other):
Computes upper regularized incomplete Gamma function.
"""
return F.igammac(input, other)
def isinf(input):
r"""
Determines which elements are inf or -inf for each position.
"""
return F.isinf(input)
def isnan(input):
r"""
Determines which elements are NaN for each position.
"""
return F.isnan(input)
def le(input, other):
r"""
Computes the boolean value of :math:`input <= other` element-wise.
"""
return F.le(input, other)
def less(input, other):
r"""
Computes the boolean value of :math:`input < other` element-wise.
"""
return F.less(input, other)
def logical_and(input, other):
r"""
Computes the "logical AND" of two tensors element-wise.
"""
return F.logical_and(input, other)
def logical_not(input):
r"""
Computes the "logical NOT" of input tensor element-wise.
"""
return F.logical_not(input)
def logical_or(input, other):
r"""
Computes the "logical OR" of two tensors element-wise.
"""
return F.logical_or(input, other)
def logical_xor(input, other):
r"""
Computes the "logical XOR" of two tensors element-wise.
"""
return F.logical_xor(input, other)

View File

@ -3581,7 +3581,7 @@ class Tensor(Tensor_):
return self
origin_dtype = self.dtype
x = self
logical_not_op = tensor_operator_registry.get('logical_not')()
logical_not_op = tensor_operator_registry.get('logical_not')
if origin_dtype == mstype.bool_:
return logical_not_op(logical_not_op(x))
if origin_dtype != mstype.float64:
@ -7233,6 +7233,305 @@ class Tensor(Tensor_):
return tensor_operator_registry.get('igammac')(self, other)
def isinf(self):
r"""
Determines which elements are inf or -inf for each position.
.. math::
out_i = \begin{cases}
& \text{ if } x_{i} = \text{Inf},\ \ True \\
& \text{ if } x_{i} \ne \text{Inf},\ \ False
\end{cases}
where :math:`Inf` means not a number.
Returns:
Tensor, has the same shape of input, and the dtype is bool.
Raises:
TypeError: If input is not a Tensor.
Supported Platforms:
``CPU`` ``GPU``
Examples:
>>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
>>> output = x.isinf()
>>> print(output)
[False False True]
"""
self._init_check()
return tensor_operator_registry.get('isinf')(self)
def isnan(self):
r"""
Determines which elements are NaN for each position.
.. math::
out_i = \begin{cases}
& \ True,\ \text{ if } x_{i} = \text{Nan} \\
& \ False,\ \text{ if } x_{i} \ne \text{Nan}
\end{cases}
where :math:`Nan` means not a number.
Returns:
Tensor, has the same shape of input, and the dtype is bool.
Raises:
TypeError: If input is not a Tensor.
Supported Platforms:
``GPU`` ``CPU``
Examples:
>>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
>>> output = x.isnan()
>>> print(output)
[True False False]
"""
self._init_check()
return tensor_operator_registry.get('isnan')(self)
def le(self, other):
r"""
Computes the boolean value of :math:`input <= other` element-wise.
.. math::
out_{i} =\begin{cases}
& \text{True, if } input_{i}<=other_{i} \\
& \text{False, if } input_{i}>other_{i}
\end{cases}
.. note::
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data
types consistent.
- The inputs must be two tensors or one tensor and one scalar.
- When the inputs are two tensors,
dtypes of them cannot be both bool , and the shapes of them can be broadcast.
- When the inputs are one tensor and one scalar, the scalar could only be a constant.
Args:
y (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
the second input should be a number.Number or bool value, or a Tensor whose data type is
number or bool\_. When the first input is Scalar, the second input must be a Tensor whose
data type is number or bool\_.
Returns:
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
Raises:
TypeError: If neither `other` is not a Tensor.
Supported Platforms:
``Ascend`` ``CPU`` ``GPU``
Examples:
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
>>> output = x.less(y)
>>> print(output)
[True False True]
"""
self._init_check()
return tensor_operator_registry.get('le')(self, other)
def less(self, other):
r"""
Computes the boolean value of :math:`input < other` element-wise.
.. math::
out_{i} =\begin{cases}
& \text{True, if } input_{i}<other_{i} \\
& \text{False, if } input_{i}>=other_{i}
\end{cases}
.. note::
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
consistent.
- The inputs must be two tensors or one tensor and one scalar.
- When the inputs are two tensors,
dtypes of them cannot be both bool , and the shapes of them can be broadcast.
- When the inputs are one tensor and one scalar, the scalar could only be a constant.
Args:
y (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
the second input should be a number.Number or bool value, or a Tensor whose data type is
number or bool\_. When the first input is Scalar, the second input must be a Tensor whose
data type is number or bool\_.
Returns:
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
Raises:
TypeError: If neither `input` nor `other` is a Tensor.
Supported Platforms:
``Ascend`` ``CPU`` ``GPU``
Examples:
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> y = Tensor(np.array([1, 1, 4]), mindspore.int32)
>>> output = x.less(y)
>>> print(output)
[False False True]
"""
self._init_check()
return tensor_operator_registry.get('less')(self, other)
def logical_and(self, other):
r"""
Computes the "logical AND" of two tensors element-wise.
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
consistent. The inputs must be two tensors or one tensor and one bool.
When the inputs are two tensors, the shapes of them could be broadcast,
and the data types of them must be bool.
When the inputs are one tensor and one bool, the bool object could only be a constant,
and the data type of the tensor must be bool.
.. math::
out_{i} = input_{i} \wedge other_{i}
Note:
LogicalAnd supports broadcasting.
Args:
other (Union[Tensor, bool]): The second input is a bool when the first input is a tensor or
a tensor whose data type is bool.
Returns:
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
Raises:
TypeError: If `other` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
>>> output = x.logical_and(x)
>>> print(output)
[True False False]
"""
self._init_check()
return tensor_operator_registry.get('logical_and')(self, other)
def logical_not(self):
r"""
Computes the "logical NOT" of a tensor element-wise.
.. math::
out_{i} = \\neg x_{i}
Returns:
Tensor, the shape is the same as the input, and the dtype is bool.
Raises:
TypeError: If input is not a Tensor.
TypeError: If dtype of input is not a bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> output = x.logical_not()
>>> print(output)
[False True False]
"""
self._init_check()
return tensor_operator_registry.get('logical_not')(self)
def logical_or(self, other):
r"""
Computes the "logical OR" of two tensors element-wise.
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
consistent. The inputs must be two tensors or one tensor and one bool.
When the inputs are two tensors, the shapes of them could be broadcast,
and the data types of them must be bool.
When the inputs are one tensor and one bool, the bool object could only be a constant,
and the data type of the tensor must be bool.
.. math::
out_{i} = input_{i} \\vee other_{i}
Note:
Logical Or supports broadcasting.
Args:
other (Union[Tensor, bool]): The second input is a bool when the first input is a tensor or
a tensor whose data type is bool.
Returns:
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
Raises:
TypeError: If `other` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
>>> output = ops.logical_or(x, y)
>>> print(output)
[True True True]
"""
self._init_check()
return tensor_operator_registry.get('logical_or')(self, other)
def logical_xor(self, other):
r"""
Computes the "logical XOR" of two tensors element-wise.
.. math::
out_{i} = x_{i} \oplus y_{i}
Args:
other (Tensor): This input is a the tensor to compute XOR with the input ensor.
Datatype must be bool.
Returns:
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
Raises:
TypeError: If `other` is a Tensor whose data type is not bool.
ValueError: If the shape of two inputs cannot be broadcast.
Supported Platforms:
``CPU``
Examples:
>>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
>>> output = x.logical_xor(y)
>>> print(output)
[False True True]
"""
self._init_check()
return tensor_operator_registry.get('logical_xor')(self, other)
class RowTensorInner(RowTensor_):
"""
Implementation for RowTensor, for MindSpore developers only.

View File

@ -289,6 +289,8 @@ from .math_func import (
greater_equal,
igamma,
igammac,
isinf,
logical_xor,
)
from .nn_func import (
adaptive_avg_pool1d,

View File

@ -6731,6 +6731,76 @@ def igammac(input, other):
return igammac_op(input, other)
def isinf(input):
r"""
Determines which elements are inf or -inf for each position.
.. math::
out_i = \begin{cases}
& \text{ if } x_{i} = \text{Inf},\ \ True \\
& \text{ if } x_{i} \ne \text{Inf},\ \ False
\end{cases}
where :math:`Inf` means not a number.
Args:
input (Tensor): The input tensor.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
Returns:
Tensor, has the same shape of input, and the dtype is bool.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``CPU`` ``GPU``
Examples:
>>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
>>> output = ops.isinf(x)
>>> print(output)
[False False True]
"""
isinf_op = _get_cache_prim(P.IsInf)()
return isinf_op(input)
def logical_xor(input, other):
r"""
Computes the "logical XOR" of two tensors element-wise.
.. math::
out_{i} = x_{i} \oplus y_{i}
Args:
input (Tensor): The first input is a tensor whose data type is bool.
other (Tensor): The second input is a the tensor to compute XOR with the first input.
Datatype must be bool.
Returns:
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
Raises:
TypeError: If neither `input` nor `other` is a Tensor whose data type is bool.
ValueError: If the shape of two inputs cannot be broadcast.
Supported Platforms:
``CPU``
Examples:
>>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
>>> output = ops.logical_xor(x, y)
>>> print(output)
[False True True]
"""
logical_xor_op = _get_cache_prim(P.LogicalXor)()
return logical_xor_op(input, other)
__all__ = [
'addn',
'absolute',
@ -6891,5 +6961,7 @@ __all__ = [
'greater_equal',
'igamma',
'igammac',
'isinf',
'logical_xor',
]
__all__.sort()

View File

@ -383,7 +383,6 @@ tensor_operator_registry.register('log_matrix_determinant', log_matrix_determina
tensor_operator_registry.register('ceil', P.Ceil)
tensor_operator_registry.register('fill', P.Fill)
tensor_operator_registry.register('tile', P.Tile)
tensor_operator_registry.register('logical_not', P.LogicalNot)
tensor_operator_registry.register('logit', logit)
tensor_operator_registry.register('sum', P.ReduceSum)
tensor_operator_registry.register('split', P.Split)
@ -515,6 +514,13 @@ tensor_operator_registry.register('cumprod', cumprod)
tensor_operator_registry.register('div', div)
tensor_operator_registry.register('equal', equal)
tensor_operator_registry.register('expm1', expm1)
tensor_operator_registry.register('isinf', isinf)
tensor_operator_registry.register('isnan', isnan)
tensor_operator_registry.register('le', le)
tensor_operator_registry.register('less', less)
tensor_operator_registry.register('logical_and', logical_and)
tensor_operator_registry.register('logical_not', logical_not)
tensor_operator_registry.register('logical_or', logical_or)
tensor_operator_registry.register('logical_xor', logical_xor)
__all__ = [name for name in dir() if name[0] != "_"]
__all__.remove('Primitive')

View File

@ -100,3 +100,41 @@ def test_greater_tensor_api_modes(mode):
output = x.greater(y)
expected = np.array([False, True, False])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_le_tensor_api_modes(mode):
"""
Feature: Test le tensor api.
Description: Test le tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="Ascend")
x = Tensor([1, 2, 3], mstype.int32)
y = Tensor([1, 1, 4], mstype.int32)
output = x.le(y)
expected = np.array([True, False, True])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_less_tensor_api_modes(mode):
"""
Feature: Test less tensor api.
Description: Test less tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="Ascend")
x = Tensor([1, 2, 3], mstype.int32)
y = Tensor([1, 1, 4], mstype.int32)
output = x.less(y)
expected = np.array([False, False, True])
np.testing.assert_array_equal(output.asnumpy(), expected)

View File

@ -0,0 +1,77 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
from mindspore import Tensor
import mindspore.context as context
from mindspore.common import dtype as mstype
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_logical_and_tensor_api_modes(mode):
"""
Feature: Test logical_and tensor api.
Description: Test logical_and tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="Ascend")
input_x = Tensor([True, False, True], mstype.bool_)
other = Tensor([True, True, False], mstype.bool_)
output = input_x.logical_and(other)
expected = np.array([True, False, False])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_logical_not_tensor_api_modes(mode):
"""
Feature: Test logical_not tensor api.
Description: Test logical_not tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="Ascend")
input_x = Tensor([True, False, True], mstype.bool_)
output = input_x.logical_not()
expected = np.array([False, True, False])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_logical_or_tensor_api_modes(mode):
"""
Feature: Test logical_or tensor api.
Description: Test logical_or tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="Ascend")
input_x = Tensor([True, False, True], mstype.bool_)
other = Tensor([True, True, False], mstype.bool_)
output = input_x.logical_or(other)
expected = np.array([True, True, True])
np.testing.assert_array_equal(output.asnumpy(), expected)

View File

@ -158,3 +158,39 @@ def test_greater_tensor_api_modes(mode):
output = x.greater(y)
expected = np.array([False, True, False])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_le_tensor_api_modes(mode):
"""
Feature: Test le tensor api.
Description: Test le tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="CPU")
x = Tensor([1, 2, 3], mstype.int32)
y = Tensor([1, 1, 4], mstype.int32)
output = x.le(y)
expected = np.array([True, False, True])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_less_tensor_api_modes(mode):
"""
Feature: Test less tensor api.
Description: Test less tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="CPU")
x = Tensor([1, 2, 3], mstype.int32)
y = Tensor([1, 1, 4], mstype.int32)
output = x.less(y)
expected = np.array([False, False, True])
np.testing.assert_array_equal(output.asnumpy(), expected)

View File

@ -1,4 +1,4 @@
# Copyright 2021 Huawei Technologies Co., Ltd
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -16,11 +16,12 @@
import numpy as np
import pytest
import mindspore as ms
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.common import dtype as mstype
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
@ -34,9 +35,9 @@ class NetIsInf(nn.Cell):
return self.isinf(x)
x1 = Tensor(np.array([3, np.log(0), 1, np.log(0)]), ms.float32)
x2 = Tensor(np.array([np.log(0), 1, np.log(0), 3]), ms.float32)
x3 = Tensor(np.array([[np.log(0), 2], [np.log(0), np.log(0)]]), ms.float32)
x1 = Tensor(np.array([3, np.log(0), 1, np.log(0)]), mstype.float32)
x2 = Tensor(np.array([np.log(0), 1, np.log(0), 3]), mstype.float32)
x3 = Tensor(np.array([[np.log(0), 2], [np.log(0), np.log(0)]]), mstype.float32)
@pytest.mark.level0
@ -68,9 +69,43 @@ def test_is_nan_cpu_dynamic_shape():
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
net = NetIsInf()
x_dyn = Tensor(shape=[1, 32, 9, None], dtype=ms.float32)
x_dyn = Tensor(shape=[1, 32, 9, None], dtype=mstype.float32)
net.set_inputs(x_dyn)
x = np.random.randn(1, 32, 9, 9)
output = net(Tensor(x, ms.float32))
output = net(Tensor(x, mstype.float32))
except_shape = (1, 32, 9, 9)
assert output.asnumpy().shape == except_shape
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_isinf_functional_api_modes(mode):
"""
Feature: Test isinf functional api.
Description: Test isinf functional api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="CPU")
x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mstype.float32)
output = F.isinf(x)
expected = np.array([False, False, True])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_isinf_tensor_api_modes(mode):
"""
Feature: Test isinf tensor api.
Description: Test isinf tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="CPU")
x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mstype.float32)
output = x.isinf()
expected = np.array([False, False, True])
np.testing.assert_array_equal(output.asnumpy(), expected)

View File

@ -74,3 +74,20 @@ def test_is_nan_cpu_dynamic_shape():
output = net(Tensor(x, ms.float32))
except_shape = (1, 32, 9, 9)
assert output.asnumpy().shape == except_shape
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_isnan_tensor_api_modes(mode):
"""
Feature: Test isnan tensor api.
Description: Test isnan tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="CPU")
x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mstype.float32)
output = x.isnan()
expected = np.array([True, False, False])
np.testing.assert_array_equal(output.asnumpy(), expected)

View File

@ -1,4 +1,4 @@
# Copyright 2020-2021 Huawei Technologies Co., Ltd
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -20,6 +20,8 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.common import dtype as mstype
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
@ -74,6 +76,95 @@ def test_logicalnot():
assert np.allclose(outputs.asnumpy(), (False, True, True))
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_logical_xor_functional_api_modes(mode):
"""
Feature: Test logical_xor functional api.
Description: Test logical_xor functional api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="CPU")
x = Tensor([True, False, True], mstype.bool_)
y = Tensor([True, True, False], mstype.bool_)
output = F.logical_xor(x, y)
expected = np.array([False, True, True])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_logical_and_tensor_api_modes(mode):
"""
Feature: Test logical_and tensor api.
Description: Test logical_and tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="CPU")
input_x = Tensor([True, False, True], mstype.bool_)
other = Tensor([True, True, False], mstype.bool_)
output = input_x.logical_and(other)
expected = np.array([True, False, False])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_logical_not_tensor_api_modes(mode):
"""
Feature: Test logical_not tensor api.
Description: Test logical_not tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="CPU")
input_x = Tensor([True, False, True], mstype.bool_)
output = input_x.logical_not()
expected = np.array([False, True, False])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_logical_or_tensor_api_modes(mode):
"""
Feature: Test logical_or tensor api.
Description: Test logical_or tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="CPU")
input_x = Tensor([True, False, True], mstype.bool_)
other = Tensor([True, True, False], mstype.bool_)
output = input_x.logical_or(other)
expected = np.array([True, True, True])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_logical_xor_tensor_api_modes(mode):
"""
Feature: Test logical_xor tensor api.
Description: Test logical_xor tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="CPU")
input_x = Tensor([True, False, True], mstype.bool_)
other = Tensor([True, True, False], mstype.bool_)
output = input_x.logical_xor(other)
expected = np.array([False, True, True])
np.testing.assert_array_equal(output.asnumpy(), expected)
if __name__ == '__main__':
test_logicaland()
test_logicalor()

View File

@ -96,3 +96,39 @@ def test_greater_tensor_api_modes(mode):
output = x.greater(y)
expected = np.array([False, True, False])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_le_tensor_api_modes(mode):
"""
Feature: Test le tensor api.
Description: Test le tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
x = Tensor([1, 2, 3], mstype.int32)
y = Tensor([1, 1, 4], mstype.int32)
output = x.le(y)
expected = np.array([True, False, True])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_less_tensor_api_modes(mode):
"""
Feature: Test less tensor api.
Description: Test less tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
x = Tensor([1, 2, 3], mstype.int32)
y = Tensor([1, 1, 4], mstype.int32)
output = x.less(y)
expected = np.array([False, False, True])
np.testing.assert_array_equal(output.asnumpy(), expected)

View File

@ -0,0 +1,56 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
from mindspore import Tensor
import mindspore.context as context
from mindspore.ops import functional as F
from mindspore.common import dtype as mstype
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_isinf_functional_api_modes(mode):
"""
Feature: Test isinf functional api.
Description: Test isinf functional api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mstype.float32)
output = F.isinf(x)
expected = np.array([False, False, True])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_isinf_tensor_api_modes(mode):
"""
Feature: Test isinf tensor api.
Description: Test isinf tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mstype.float32)
output = x.isinf()
expected = np.array([False, False, True])
np.testing.assert_array_equal(output.asnumpy(), expected)

View File

@ -0,0 +1,38 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
from mindspore import Tensor
import mindspore.context as context
from mindspore.common import dtype as mstype
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_isnan_tensor_api_modes(mode):
"""
Feature: Test isnan tensor api.
Description: Test isnan tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mstype.float32)
output = x.isnan()
expected = np.array([True, False, False])
np.testing.assert_array_equal(output.asnumpy(), expected)

View File

@ -1,4 +1,4 @@
# Copyright 2020 Huawei Technologies Co., Ltd
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -96,3 +96,56 @@ def test_logicalnot():
logicalnot = NetNot()
output = logicalnot(Tensor(x))
assert np.all(output.asnumpy() == np.logical_not(x))
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_logical_and_tensor_api_modes(mode):
"""
Feature: Test logical_and tensor api.
Description: Test logical_and tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
input_x = Tensor([True, False, True], mstype.bool_)
other = Tensor([True, True, False], mstype.bool_)
output = input_x.logical_and(other)
expected = np.array([True, False, False])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_logical_not_tensor_api_modes(mode):
"""
Feature: Test logical_not tensor api.
Description: Test logical_not tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
input_x = Tensor([True, False, True], mstype.bool_)
output = input_x.logical_not()
expected = np.array([False, True, False])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_logical_or_tensor_api_modes(mode):
"""
Feature: Test logical_or tensor api.
Description: Test logical_or tensor api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
input_x = Tensor([True, False, True], mstype.bool_)
other = Tensor([True, True, False], mstype.bool_)
output = input_x.logical_or(other)
expected = np.array([True, True, True])
np.testing.assert_array_equal(output.asnumpy(), expected)