!47562 add tensor diff frac

Merge pull request !47562 from Henry Shi/tensor_frac_diff
This commit is contained in:
i-robot 2023-01-12 08:18:00 +00:00 committed by Gitee
commit 3e8fba3878
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
16 changed files with 271 additions and 0 deletions

View File

@ -225,6 +225,7 @@ mindspore.ops
mindspore.ops.cosh
mindspore.ops.cosine_similarity
mindspore.ops.cov
mindspore.ops.diff
mindspore.ops.deg2rad
mindspore.ops.digamma
mindspore.ops.div
@ -238,6 +239,7 @@ mindspore.ops
mindspore.ops.floor_mod
mindspore.ops.float_power
mindspore.ops.fmod
mindspore.ops.frac
mindspore.ops.heaviside
mindspore.ops.hypot
mindspore.ops.i0

View File

@ -0,0 +1,6 @@
mindspore.Tensor.diff
=====================
.. py:method:: mindspore.Tensor.diff(x, n=1, axis=-1, prepend=None, append=None)
详情请参考 :func:`mindspore.ops.diff`

View File

@ -0,0 +1,6 @@
mindspore.Tensor.frac
=====================
.. py:method:: mindspore.Tensor.frac()
详情请参考 :func:`mindspore.ops.frac`

View File

@ -93,6 +93,7 @@ mindspore.Tensor
mindspore.Tensor.diag
mindspore.Tensor.diagflat
mindspore.Tensor.diagonal
mindspore.Tensor.diff
mindspore.Tensor.digamma
mindspore.Tensor.div
mindspore.Tensor.divide
@ -119,6 +120,7 @@ mindspore.Tensor
mindspore.Tensor.flush_from_cache
mindspore.Tensor.fmod
mindspore.Tensor.fold
mindspore.Tensor.frac
mindspore.Tensor.from_numpy
mindspore.Tensor.gather
mindspore.Tensor.gather_elements

View File

@ -0,0 +1,22 @@
mindspore.ops.diff
==================
.. py:function:: mindspore.ops.diff(x, n=1, axis=-1, prepend=None, append=None)
沿着给定维度计算输入Tensor的n阶前向差分。
参数:
- **x** (Tensor) - 输入Tensor。x元素的数据类型不支持uint16 uint32 或 uint64。
- **n** 递归计算差分的阶数目前只支持1。默认值1。
- **axis** - 计算差分的维度,默认是最后一维。默认值:-1。
- **prepend** - 在计算差分之前,沿 axis 将值添加到 input 或附加到 input。它们的维度必须与输入的维度相同并且它们的shape必须与输入的shape匹配但 axis 除外。默认值None。
- **append** - 在计算差分之前,沿 axis 将值添加到 input 或附加到 input。它们的维度必须与输入的维度相同并且它们的shape必须与输入的shape匹配但 axis 除外。默认值None。
返回:
Tensor 输入Tensor。
异常:
- **TypeError** - 如果 `x` 不是Tensor。
- **TypeError** - 如果 `x` 的元素的数据类型是uint16 uint32 或 uint64。
- **TypeError** - 如果 `x` 的维度小于1。
- **RuntimeError** - 如果 `n` 不是1。

View File

@ -0,0 +1,15 @@
mindspore.ops.frac
==================
.. py:function:: mindspore.ops.frac(x)
计算 x 中每个元素的小数部分。
参数:
- **x** (Tensor) - 输入Tensor。
返回:
Tensor `x` 的frac计算结果。
异常:
- **TypeError** - 如果 `x` 不是Tensor。

View File

@ -99,6 +99,7 @@
mindspore.Tensor.diag
mindspore.Tensor.diagflat
mindspore.Tensor.diagonal
mindspore.Tensor.diff
mindspore.Tensor.digamma
mindspore.Tensor.div
mindspore.Tensor.divide
@ -125,6 +126,7 @@
mindspore.Tensor.flush_from_cache
mindspore.Tensor.fold
mindspore.Tensor.fmod
mindspore.Tensor.frac
mindspore.Tensor.from_numpy
mindspore.Tensor.gather
mindspore.Tensor.gather_elements

View File

@ -227,6 +227,7 @@ Element-by-Element Operations
mindspore.ops.cov
mindspore.ops.deg2rad
mindspore.ops.digamma
mindspore.ops.diff
mindspore.ops.div
mindspore.ops.divide
mindspore.ops.erf
@ -238,6 +239,7 @@ Element-by-Element Operations
mindspore.ops.floor_mod
mindspore.ops.float_power
mindspore.ops.fmod
mindspore.ops.frac
mindspore.ops.heaviside
mindspore.ops.hypot
mindspore.ops.i0

View File

@ -460,6 +460,8 @@ BuiltInTypeMap &GetMethodMap() {
{"trunc", std::string("trunc")}, // trunc()
{"where", std::string("where")}, // where()
{"imag", std::string("imag")}, // imag()
{"diff", std::string("diff")}, // diff()
{"frac", std::string("frac")}, // frac()
}},
{kObjectTypeRowTensorType,
{

View File

@ -4260,3 +4260,17 @@ def imag(input):
Returns a new tensor containing imaginary value of the input.
"""
return F.imag(input)
def diff(x):
r"""
For details, please refer to :func:`mindspore.ops.diff`.
"""
return F.diff(x)
def frac(x):
r"""
For details, please refer to :func:`mindspore.ops.frac`.
"""
return F.frac(x)

View File

@ -2561,6 +2561,20 @@ class Tensor(Tensor_):
self._init_check()
return tensor_operator_registry.get('matrix_determinant')(self)
def diff(self):
r"""
For details, please refer to :func:`mindspore.ops.diff`.
"""
self._init_check()
return tensor_operator_registry.get('diff')(self)
def frac(self):
r"""
For details, please refer to :func:`mindspore.ops.frac`.
"""
self._init_check()
return tensor_operator_registry.get('frac')(self)
def digamma(self):
r"""
For details, please refer to :func:`mindspore.ops.digamma`.

View File

@ -364,9 +364,11 @@ tensor_operator_registry.register('cholesky', P.Cholesky)
tensor_operator_registry.register('cholesky_inverse', P.CholeskyInverse)
tensor_operator_registry.register('expand', expand)
tensor_operator_registry.register('cumprod', cumprod)
tensor_operator_registry.register('diff', diff)
tensor_operator_registry.register('div', div)
tensor_operator_registry.register('equal', equal)
tensor_operator_registry.register('expm1', expm1)
tensor_operator_registry.register('frac', frac)
tensor_operator_registry.register('isinf', isinf)
tensor_operator_registry.register('isnan', isnan)
tensor_operator_registry.register('is_complex', is_complex)

View File

@ -0,0 +1,46 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
import mindspore.nn as nn
import mindspore.ops as ops
class Net(nn.Cell):
def construct(self, x):
return ops.diff(x)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_diff_normal(mode):
"""
Feature: diff
Description: Verify the result of diff
Expectation: success
"""
ms.set_context(mode=mode)
net = Net()
x = ms.Tensor([1, 3, 2], dtype=ms.float32)
output = net(x)
expect_output = np.array([2, -1])
assert np.allclose(output.asnumpy(), expect_output)

View File

@ -0,0 +1,46 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
import mindspore.nn as nn
import mindspore.ops as ops
class Net(nn.Cell):
def construct(self, x):
return ops.frac(x)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_frac_normal(mode):
"""
Feature: frac
Description: Verify the result of frac
Expectation: success
"""
ms.set_context(mode=mode)
net = Net()
x = ms.Tensor([1, 2.5, -3.2], dtype=ms.float32)
output = net(x)
expect_output = np.array([0.0000, 0.5000, -0.2000])
assert np.allclose(output.asnumpy(), expect_output)

View File

@ -0,0 +1,45 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
import mindspore.nn as nn
class Net(nn.Cell):
def construct(self, x):
return x.diff()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_diff_normal(mode):
"""
Feature: diff
Description: Verify the result of diff
Expectation: success
"""
ms.set_context(mode=mode)
net = Net()
x = ms.Tensor([1, 3, 2], dtype=ms.float32)
output = net(x)
expect_output = np.array([2, -1])
assert np.allclose(output.asnumpy(), expect_output)

View File

@ -0,0 +1,45 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
import mindspore.nn as nn
class Net(nn.Cell):
def construct(self, x):
return x.frac()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_frac_normal(mode):
"""
Feature: frac
Description: Verify the result of frac
Expectation: success
"""
ms.set_context(mode=mode)
net = Net()
x = ms.Tensor([1, 2.5, -3.2], dtype=ms.float32)
output = net(x)
expect_output = np.array([0.0000, 0.5000, -0.2000])
assert np.allclose(output.asnumpy(), expect_output, 1e-3, 1e-3)