!37734 [MSLITE][CPU] reduce func api fix

Merge pull request !37734 from Greatpan/reduce_doc_fix
This commit is contained in:
i-robot 2022-07-12 09:40:29 +00:00 committed by Gitee
commit ce749de80c
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
8 changed files with 52 additions and 52 deletions

View File

@ -212,13 +212,13 @@ Reduction算子
* - functional
- Description
* - mindspore.ops.reduce_max
* - mindspore.ops.amax
- Refer to :class:`mindspore.ops.ReduceMax`.
* - mindspore.ops.reduce_mean
* - mindspore.ops.mean
- Refer to :class:`mindspore.ops.ReduceMean`.
* - mindspore.ops.reduce_min
* - mindspore.ops.amin
- Refer to :class:`mindspore.ops.ReduceMin`.
* - mindspore.ops.reduce_prod
* - mindspore.ops.prod
- Refer to :class:`mindspore.ops.ReduceProd`.
* - mindspore.ops.reduce_sum
- Refer to :class:`mindspore.ops.ReduceSum`.

View File

@ -211,13 +211,13 @@ Reduction Operators
* - functional
- Description
* - mindspore.ops.reduce_max
* - mindspore.ops.amax
- Refer to :class:`mindspore.ops.ReduceMax`.
* - mindspore.ops.reduce_mean
* - mindspore.ops.mean
- Refer to :class:`mindspore.ops.ReduceMean`.
* - mindspore.ops.reduce_min
* - mindspore.ops.amin
- Refer to :class:`mindspore.ops.ReduceMin`.
* - mindspore.ops.reduce_prod
* - mindspore.ops.prod
- Refer to :class:`mindspore.ops.ReduceProd`.
* - mindspore.ops.reduce_sum
- Refer to :class:`mindspore.ops.ReduceSum`.

View File

@ -159,7 +159,7 @@ BuiltInTypeMap &GetMethodMap() {
{"xdivy", std::string("xdivy")}, // P.Xdivy
{"abs", std::string("abs_")}, // C.abs_
{"mean", std::string("mean")}, // C.mean
{"reduce_prod", std::string("reduce_prod")}, // C.reduce_prod
{"prod", std::string("prod")}, // C.reduce_prod
{"__truediv__", std::string("truediv")}, // C.truediv
{"__floordiv__", std::string("floordiv")}, // C.floordiv
{"__mod__", std::string("mod")}, // C.mod

View File

@ -108,7 +108,7 @@ def prod(x, axis=(), keep_dims=False):
>>> print(output)
6.0
"""
return F.reduce_prod(x, axis, keep_dims)
return F.prod(x, axis, keep_dims)
def addcdiv(input_data, x1, x2, value):

View File

@ -1508,7 +1508,7 @@ class Tensor(Tensor_):
6.0
"""
self._init_check()
return tensor_operator_registry.get('reduce_prod')(self, axis, keep_dims)
return tensor_operator_registry.get('prod')(self, axis, keep_dims)
def select(self, condition, y):
r"""

View File

@ -232,10 +232,10 @@ from .math_func import (
baddbmm,
cummin,
cummax,
reduce_min,
reduce_max,
reduce_mean,
reduce_prod,
amin,
amax,
mean,
prod,
all,
any,
sparse_segment_mean,

View File

@ -3820,7 +3820,7 @@ def logsumexp(x, axis, keep_dims=False):
return x_logsumexp + x_max
def reduce_min(x, axis, keep_dims=False):
def amin(x, axis, keep_dims=False):
r"""
Reduces a dimension of a tensor by the minimum value in the dimension, by default. And also can
reduce a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
@ -3844,7 +3844,7 @@ def reduce_min(x, axis, keep_dims=False):
Examples:
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> output = ops.reduce_min(x, 1, keep_dims=True)
>>> output = ops.amin(x, 1, keep_dims=True)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
@ -3852,25 +3852,25 @@ def reduce_min(x, axis, keep_dims=False):
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
>>> output = ops.reduce_min(x)
>>> output = ops.amin(x)
>>> print(output)
[[[1.]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = opops.reduce_min(x, 0)
>>> output = opops.amin(x, 0)
>>> print(output)
[[[1. 1. 1. 1. 1. 1.]
[2. 2. 2. 2. 2. 2.]
[3. 3. 3. 3. 3. 3.]]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = ops.reduce_min(x, 1)
>>> output = ops.amin(x, 1)
>>> print(output)
[[[1. 1. 1. 1. 1. 1.]]
[[4. 4. 4. 4. 4. 4.]]
[[7. 7. 7. 7. 7. 7.]]]
>>> # case 4: Reduces a dimension along axis 2.
>>> output = ops.reduce_min(x, 2)
>>> output = ops.amin(x, 2)
>>> print(output)
[[[1.]
[2.]
@ -3882,10 +3882,10 @@ def reduce_min(x, axis, keep_dims=False):
[8.]
[9.]]]
"""
return P.ReduceMin(keep_dims)(x, axis)
return _get_cache_prim(P.ReduceMin)(keep_dims)(x, axis)
def reduce_max(x, axis, keep_dims=False):
def amax(x, axis, keep_dims=False):
r"""
Reduces a dimension of a tensor by the maximum value in this dimension, by default. And also can
reduce a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
@ -3909,7 +3909,7 @@ def reduce_max(x, axis, keep_dims=False):
Examples:
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> output = ops.reduce_max(x, 1, keep_dims=True)
>>> output = ops.amax(x, 1, keep_dims=True)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
@ -3917,25 +3917,25 @@ def reduce_max(x, axis, keep_dims=False):
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
>>> output = ops.reduce_max(x)
>>> output = ops.amax(x)
>>> print(output)
[[[9.]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = ops.reduce_max(x, 0)
>>> output = ops.amax(x, 0)
>>> print(output)
[[[7. 7. 7. 7. 7. 7.]
[8. 8. 8. 8. 8. 8.]
[9. 9. 9. 9. 9. 9.]]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = ops.reduce_max(x, 1)
>>> output = ops.amax(x, 1)
>>> print(output)
[[[3. 3. 3. 3. 3. 3.]]
[[6. 6. 6. 6. 6. 6.]]
[[9. 9. 9. 9. 9. 9.]]]
>>> # case 4: Reduces a dimension along axis 2.
>>> output = ops.reduce_max(x, 2)
>>> output = ops.amax(x, 2)
>>> print(output)
[[[1.]
[2.]
@ -3947,10 +3947,10 @@ def reduce_max(x, axis, keep_dims=False):
[8.]
[9.]]]
"""
return P.ReduceMax(keep_dims)(x, axis)
return _get_cache_prim(P.ReduceMax)(keep_dims)(x, axis)
def reduce_mean(x, axis, keep_dims=False):
def mean(x, axis, keep_dims=False):
r"""
Reduces a dimension of a tensor by averaging all elements in the dimension, by default. And also can reduce
a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
@ -3974,7 +3974,7 @@ def reduce_mean(x, axis, keep_dims=False):
Examples:
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> output = ops.reduce_mean(x, 1, keep_dims=True)
>>> output = ops.mean(x, 1, keep_dims=True)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
@ -3983,25 +3983,25 @@ def reduce_mean(x, axis, keep_dims=False):
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
... mindspore.float32)
>>> output = ops.reduce_mean(x)
>>> output = ops.mean(x)
>>> print(output)
[[[5.]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along the axis 0
>>> output = ops.reduce_mean(x, 0)
>>> output = ops.mean(x, 0)
>>> print(output)
[[[4. 4. 4. 4. 4. 4.]
[5. 5. 5. 5. 5. 5.]
[6. 6. 6. 6. 6. 6.]]]
>>> # case 3: Reduces a dimension along the axis 1
>>> output = ops.reduce_mean(x, 1)
>>> output = ops.mean(x, 1)
>>> print(output)
[[[2. 2. 2. 2. 2. 2.]]
[[5. 5. 5. 5. 5. 5.]]
[[8. 8. 8. 8. 8. 8.]]]
>>> # case 4: Reduces a dimension along the axis 2
>>> output = ops.reduce_mean(x, 2)
>>> output = ops.mean(x, 2)
>>> print(output)
[[[ 2.]
[ 2.]
@ -4014,10 +4014,10 @@ def reduce_mean(x, axis, keep_dims=False):
[10.]]]
"""
return P.ReduceMean(keep_dims)(x, axis)
return _get_cache_prim(P.ReduceMean)(keep_dims)(x, axis)
def reduce_prod(x, axis, keep_dims=False):
def prod(x, axis, keep_dims=False):
r"""
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
reduce a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by
@ -4041,7 +4041,7 @@ def reduce_prod(x, axis, keep_dims=False):
Examples:
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> output = ops.reduce_prod(x, 1, keep_dims=True)
>>> output = ops.prod(x, 1, keep_dims=True)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
@ -4049,25 +4049,25 @@ def reduce_prod(x, axis, keep_dims=False):
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
>>> output = ops.reduce_prod(x)
>>> output = ops.prod(x)
>>> print(output)
[[[2.2833798e+33]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = ops.reduce_prod(x, 0)
>>> output = ops.prod(x, 0)
>>> print(output)
[[[ 28. 28. 28. 28. 28. 28.]
[ 80. 80. 80. 80. 80. 80.]
[162. 162. 162. 162. 162. 162.]]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = ops.reduce_prod(x, 1)
>>> output = ops.prod(x, 1)
>>> print(output)
[[[ 6. 6. 6. 6. 6. 6.]]
[[120. 120. 120. 120. 120. 120.]]
[[504. 504. 504. 504. 504. 504.]]]
>>> # case 4: Reduces a dimension along axis 2.
>>> output = ops.reduce_prod(x, 2)
>>> output = ops.prod(x, 2)
>>> print(output)
[[[1.00000e+00]
[6.40000e+01]
@ -4079,7 +4079,7 @@ def reduce_prod(x, axis, keep_dims=False):
[2.62144e+05]
[5.31441e+05]]]
"""
return P.ReduceProd(keep_dims)(x, axis)
return _get_cache_prim(P.ReduceProd)(keep_dims)(x, axis)
def norm(input_x, axis, p=2, keep_dims=False, epsilon=1e-12):
@ -4996,10 +4996,10 @@ __all__ = [
'baddbmm',
'cummin',
'cummax',
'reduce_min',
'reduce_max',
'reduce_mean',
'reduce_prod',
'amin',
'amax',
'mean',
'prod',
'all',
'any',
'sparse_segment_mean',

View File

@ -803,7 +803,7 @@ tensor_operator_registry.register('tan', P.Tan)
tensor_operator_registry.register('cosh', P.Cosh)
tensor_operator_registry.register('pow', P.Pow)
tensor_operator_registry.register('mean', P.ReduceMean)
tensor_operator_registry.register('reduce_prod', reduce_prod)
tensor_operator_registry.register('prod', prod)
tensor_operator_registry.register('round', P.Round)
tensor_operator_registry.register('reshape', P.Reshape)
tensor_operator_registry.register('xlogy', P.Xlogy)