!45672 tensor divide op

Merge pull request !45672 from 于振华/op_divided_develop_1116
This commit is contained in:
i-robot 2022-11-18 01:48:37 +00:00 committed by Gitee
commit 4ec3a1f930
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
12 changed files with 216 additions and 0 deletions

View File

@ -187,6 +187,7 @@ mindspore.ops.function
mindspore.ops.cosh
mindspore.ops.deg2rad
mindspore.ops.div
mindspore.ops.divide
mindspore.ops.erf
mindspore.ops.erfc
mindspore.ops.exp

View File

@ -0,0 +1,6 @@
mindspore.Tensor.divide
=======================
.. py:method:: mindspore.Tensor.divide(other, *, rounding_mode=None)
详情请参考 :func:`mindspore.ops.div`

View File

@ -82,6 +82,7 @@ mindspore.Tensor
mindspore.Tensor.diag
mindspore.Tensor.diagonal
mindspore.Tensor.div
mindspore.Tensor.divide
mindspore.Tensor.dtype
mindspore.Tensor.equal
mindspore.Tensor.erf

View File

@ -0,0 +1,7 @@
mindspore.ops.divide
====================
.. py:function:: mindspore.ops.divide(x, other, *, rounding_mode=None)
ops.div()的别名。
详情请参考 :func:`mindspore.ops.div`

View File

@ -88,6 +88,7 @@
mindspore.Tensor.diag
mindspore.Tensor.diagonal
mindspore.Tensor.div
mindspore.Tensor.divide
mindspore.Tensor.dtype
mindspore.Tensor.equal
mindspore.Tensor.erf

View File

@ -188,6 +188,7 @@ Element-by-Element Operations
mindspore.ops.cosh
mindspore.ops.deg2rad
mindspore.ops.div
mindspore.ops.divide
mindspore.ops.erf
mindspore.ops.erfc
mindspore.ops.exp

View File

@ -374,6 +374,7 @@ BuiltInTypeMap &GetMethodMap() {
{"expand", std::string("expand")}, // expand()
{"cumprod", std::string("cumprod")}, // cumprod()
{"div", std::string("div")}, // div()
{"divide", std::string("div")}, // divide()
{"equal", std::string("equal")}, // equal()
{"expm1", std::string("expm1")}, // expm1()
{"dim", prim::kPrimRank}, // P.Rank()

View File

@ -3885,6 +3885,13 @@ class Tensor(Tensor_):
self._init_check()
return tensor_operator_registry.get('div')(self, other, rounding_mode)
def divide(self, other, *, rounding_mode=None):
r"""
For details, please refer to :func:`mindspore.ops.div`.
"""
self._init_check()
return tensor_operator_registry.get('div')(self, other, rounding_mode)
def equal(self, other):
r"""
For details, please refer to :func:`mindspore.ops.equal`.

View File

@ -164,6 +164,7 @@ from .math_func import (
multiply,
tensor_div,
div,
divide,
true_divide,
tensor_floordiv,
floor_div,

View File

@ -824,6 +824,14 @@ def div(input, other, rounding_mode=None):
return output
def divide(x, other, *, rounding_mode=None):
"""
Alias for ops.div().
For details, please refer to :func:`mindspore.ops.div`.
"""
return div(x, other, rounding_mode)
def floor_div(x, y):
"""
Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
@ -7330,6 +7338,7 @@ __all__ = [
'multiply',
'tensor_div',
'div',
'divide',
'true_divide',
'tensor_floordiv',
'floor_div',

View File

@ -0,0 +1,91 @@
import numpy as np
import pytest
import mindspore.common.dtype as mstype
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore import ops
class NetNone(nn.Cell):
def construct(self, x, other):
return ops.divide(x, other)
class NetFloor(nn.Cell):
def construct(self, x, other):
return ops.divide(x, other, rounding_mode="floor")
class NetTrunc(nn.Cell):
def construct(self, x, other):
return ops.divide(x, other, rounding_mode="trunc")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_divide_none(mode):
"""
Feature: tensor.divide()
Description: Verify the result of tensor.divide
Expectation: success
"""
context.set_context(mode=mode)
net = NetNone()
x = Tensor(np.array([1.0, 5.0, 7.5]), mstype.float32)
y = Tensor(np.array([4.0, 2.0, 3.0]), mstype.float32)
output = net(x, y)
expected = np.array([0.25, 2.5, 2.5], dtype=np.float32)
assert np.allclose(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_divide_floor(mode):
"""
Feature: tensor.divide()
Description: Verify the result of tensor.divide
Expectation: success
"""
context.set_context(mode=mode)
net = NetFloor()
x = Tensor(np.array([1.0, 5.0, 9.5]), mstype.float32)
y = Tensor(np.array([4.0, 2.0, 3.0]), mstype.float32)
output = net(x, y)
expected = np.array([0.0, 2.0, 3.0], dtype=np.float32)
assert np.allclose(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_divide_trunc(mode):
"""
Feature: tensor.divide()
Description: Verify the result of tensor.divide
Expectation: success
"""
context.set_context(mode=mode)
net = NetTrunc()
x = Tensor(np.array([1.0, 5.0, 9.5]), mstype.float32)
y = Tensor(np.array([4.0, 2.0, 3.0]), mstype.float32)
output = net(x, y)
expected = np.array([0.0, 2.0, 3.0], dtype=np.float32)
assert np.allclose(output.asnumpy(), expected)

View File

@ -0,0 +1,90 @@
import numpy as np
import pytest
import mindspore.common.dtype as mstype
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
class NetNone(nn.Cell):
def construct(self, x, other):
return x.divide(other)
class NetFloor(nn.Cell):
def construct(self, x, other):
return x.divide(other, rounding_mode="floor")
class NetTrunc(nn.Cell):
def construct(self, x, other):
return x.divide(other, rounding_mode="trunc")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_divide_none(mode):
"""
Feature: tensor.divide()
Description: Verify the result of tensor.divide
Expectation: success
"""
context.set_context(mode=mode)
net = NetNone()
x = Tensor(np.array([1.0, 5.0, 7.5]), mstype.float32)
y = Tensor(np.array([4.0, 2.0, 3.0]), mstype.float32)
output = net(x, y)
expected = np.array([0.25, 2.5, 2.5], dtype=np.float32)
assert np.allclose(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_divide_floor(mode):
"""
Feature: tensor.divide()
Description: Verify the result of tensor.divide
Expectation: success
"""
context.set_context(mode=mode)
net = NetFloor()
x = Tensor(np.array([1.0, 5.0, 9.5]), mstype.float32)
y = Tensor(np.array([4.0, 2.0, 3.0]), mstype.float32)
output = net(x, y)
expected = np.array([0.0, 2.0, 3.0], dtype=np.float32)
assert np.allclose(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_divide_trunc(mode):
"""
Feature: tensor.divide()
Description: Verify the result of tensor.divide
Expectation: success
"""
context.set_context(mode=mode)
net = NetTrunc()
x = Tensor(np.array([1.0, 5.0, 9.5]), mstype.float32)
y = Tensor(np.array([4.0, 2.0, 3.0]), mstype.float32)
output = net(x, y)
expected = np.array([0.0, 2.0, 3.0], dtype=np.float32)
assert np.allclose(output.asnumpy(), expected)