forked from mindspore-Ecosystem/mindspore
!49788 modify add function
Merge pull request !49788 from changzherui/mod_add
This commit is contained in:
commit
a2d27849bd
|
@ -1,6 +1,6 @@
|
|||
mindspore.Tensor.add
|
||||
====================
|
||||
|
||||
.. py:method:: mindspore.Tensor.add(y)
|
||||
.. py:method:: mindspore.Tensor.add(other)
|
||||
|
||||
详情请参考 :func:`mindspore.ops.add`。
|
||||
|
|
|
@ -1,26 +1,26 @@
|
|||
mindspore.ops.add
|
||||
=================
|
||||
|
||||
.. py:function:: mindspore.ops.add(x, y)
|
||||
.. py:function:: mindspore.ops.add(input, other)
|
||||
|
||||
两个输入Tensor逐元素相加。
|
||||
把other的数值与input进行逐元素相加。
|
||||
|
||||
.. math::
|
||||
|
||||
out_{i} = x_{i} + y_{i}
|
||||
out_{i} = input_{i} + other_{i}
|
||||
|
||||
.. note::
|
||||
- 输入 `x` 和 `y` 遵循 `隐式类型转换规则 <https://www.mindspore.cn/docs/zh-CN/master/note/operator_list_implicit.html>`_ ,使数据类型保持一致。
|
||||
- 输入 `input` 和 `other` 遵循 `隐式类型转换规则 <https://www.mindspore.cn/docs/zh-CN/master/note/operator_list_implicit.html>`_ ,使数据类型保持一致。
|
||||
- 输入必须是两个Tensor,或一个Tensor和一个Scalar。
|
||||
- 当输入是两个Tensor时,它们的数据类型不能同时是bool,并保证其shape可以广播。
|
||||
- 当输入是一个Tensor和一个Scalar时,Scalar只能是一个常数。
|
||||
|
||||
参数:
|
||||
- **x** (Union[Tensor, number.Number, bool]) - 第一个输入,是一个number.Number、bool值或数据类型为 `number <https://www.mindspore.cn/docs/zh-CN/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ 或 `bool_ <https://www.mindspore.cn/docs/zh-CN/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ 的Tensor。
|
||||
- **y** (Union[Tensor, number.Number, bool]) - 第二个输入,当第一个输入是Tensor时,第二个输入应该是一个number.Number或bool值,或数据类型为number或bool_的Tensor。当第一个输入是Scalar时,第二个输入必须是数据类型为number或bool_的Tensor。
|
||||
- **input** (Union[Tensor, number.Number, bool]) - 第一个输入,是一个number.Number、bool值或数据类型为 `number <https://www.mindspore.cn/docs/zh-CN/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ 或 `bool_ <https://www.mindspore.cn/docs/zh-CN/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ 的Tensor。
|
||||
- **other** (Union[Tensor, number.Number, bool]) - 第二个输入,当第一个输入是Tensor时,第二个输入应该是一个number.Number或bool值,或数据类型为number或bool_的Tensor。当第一个输入是Scalar时,第二个输入必须是数据类型为number或bool_的Tensor。
|
||||
|
||||
返回:
|
||||
Tensor,shape与输入 `x`,`y` 广播后的shape相同,数据类型为两个输入中精度较高的类型。
|
||||
Tensor,shape与输入 `input`,`other` 广播后的shape相同,数据类型为两个输入中精度较高的类型。
|
||||
|
||||
异常:
|
||||
- **TypeError** - `x` 和 `y` 不是Tensor、number.Number或bool。
|
||||
- **TypeError** - `input` 和 `other` 不是Tensor、number.Number或bool。
|
||||
|
|
|
@ -3736,11 +3736,11 @@ def acosh(x):
|
|||
return F.acosh(x)
|
||||
|
||||
|
||||
def add(x, y):
|
||||
def add(input, other):
|
||||
r"""
|
||||
Computes the element-wise addition of input tensors.
|
||||
"""
|
||||
return F.add(x, y)
|
||||
return F.add(input, other)
|
||||
|
||||
|
||||
def addr(x, vec1, vec2, beta=1, alpha=1):
|
||||
|
|
|
@ -810,13 +810,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|||
self._init_check()
|
||||
return tensor_operator_registry.get('addcmul')()(self, x1, x2, value)
|
||||
|
||||
def add(self, y):
|
||||
def add(self, other):
|
||||
r"""
|
||||
For details, please refer to :func:`mindspore.ops.add`.
|
||||
"""
|
||||
|
||||
self._init_check()
|
||||
return tensor_operator_registry.get('add')()(self, y)
|
||||
return tensor_operator_registry.get('add')()(self, other)
|
||||
|
||||
def subtract(self, other, *, alpha=1):
|
||||
r"""
|
||||
|
|
|
@ -252,36 +252,37 @@ def absolute(input):
|
|||
return abs(input)
|
||||
|
||||
|
||||
def add(x, y):
|
||||
def add(input, other):
|
||||
r"""
|
||||
Adds two input tensors element-wise.
|
||||
Adds other value to input Tensor.
|
||||
|
||||
.. math::
|
||||
|
||||
out_{i} = x_{i} + y_{i}
|
||||
out_{i} = input_{i} + other_{i}
|
||||
|
||||
.. note::
|
||||
- Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
|
||||
- Inputs of `input` and `other` comply with the implicit type conversion rules to make
|
||||
the data types consistent.
|
||||
- The inputs must be two tensors or one tensor and one scalar.
|
||||
- When the inputs are two tensors,
|
||||
dtypes of them cannot be bool at the same time, and the shapes of them can be broadcast.
|
||||
- When the inputs are one tensor and one scalar, the scalar could only be a constant.
|
||||
|
||||
Args:
|
||||
x (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
||||
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
|
||||
a bool or a tensor whose data type is
|
||||
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ or
|
||||
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_.
|
||||
y (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
||||
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
|
||||
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool\_.
|
||||
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
||||
|
||||
Returns:
|
||||
Tensor, the shape is the same as the one of the input `x` , `y` after broadcasting,
|
||||
Tensor, the shape is the same as the one of the input `input` , `other` after broadcasting,
|
||||
and the data type is the one with higher precision or higher digits among the two inputs.
|
||||
|
||||
Raises:
|
||||
TypeError: If `x` and `y` is not one of the following: Tensor, number.Number, bool.
|
||||
TypeError: If `input` and `other` is not one of the following: Tensor, number.Number, bool.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
@ -304,7 +305,7 @@ def add(x, y):
|
|||
>>> print(output.dtype)
|
||||
Float32
|
||||
"""
|
||||
return tensor_add(x, y)
|
||||
return _get_cache_prim(P.Add)()(input, other)
|
||||
|
||||
|
||||
def addcdiv(input_data, x1, x2, value):
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
# Copyright 2023 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
import pytest
|
||||
import mindspore.common.dtype as mstype
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore import ops
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
def construct(self, x, other):
|
||||
return x.add(other)
|
||||
|
||||
|
||||
class NetAdd(nn.Cell):
|
||||
def construct(self, x, other):
|
||||
return ops.add(x, other)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_arm_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
|
||||
def test_add(mode):
|
||||
"""
|
||||
Feature: tensor.subtract()
|
||||
Description: Verify the result of tensor.subtract
|
||||
Expectation: success
|
||||
"""
|
||||
context.set_context(mode=mode)
|
||||
net = Net()
|
||||
x = Tensor([1, 2, 3], dtype=mstype.float32)
|
||||
y = Tensor([4, 5, 6], dtype=mstype.float32)
|
||||
output = net(x, y)
|
||||
expected = np.array([5, 7, 9], dtype=np.float32)
|
||||
assert np.allclose(output.asnumpy(), expected)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_arm_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
|
||||
def test_add_alpha(mode):
|
||||
"""
|
||||
Feature: tensor.subtract()
|
||||
Description: Verify the result of tensor.subtract
|
||||
Expectation: success
|
||||
"""
|
||||
context.set_context(mode=mode)
|
||||
net = Net()
|
||||
x = Tensor([1, 2, 3], dtype=mstype.float32)
|
||||
y = Tensor([2, 2, 2], dtype=mstype.float32)
|
||||
output = net(x, y)
|
||||
expected = np.array([3, 4, 5], dtype=np.float32)
|
||||
assert np.allclose(output.asnumpy(), expected)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_arm_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
|
||||
def test_add_ops(mode):
|
||||
"""
|
||||
Feature: tensor.subtract()
|
||||
Description: Verify the result of tensor.subtract
|
||||
Expectation: success
|
||||
"""
|
||||
context.set_context(mode=mode)
|
||||
net = NetAdd()
|
||||
x = Tensor([3, 4, 5], dtype=mstype.float32)
|
||||
y = Tensor([1, 2, 3], dtype=mstype.float32)
|
||||
output = net(x, y)
|
||||
expected = np.array([4, 6, 8], dtype=np.float32)
|
||||
assert np.allclose(output.asnumpy(), expected)
|
Loading…
Reference in New Issue