Added accumulate_n functional api and test case.

This commit is contained in:
alashkari 2022-08-23 12:58:02 +00:00
parent 2b33ed4ab1
commit 633921ef1f
4 changed files with 87 additions and 1 deletions

View File

@ -128,6 +128,7 @@ Element-by-Element Operations
:template: classtemplate.rst
mindspore.ops.abs
mindspore.ops.accumulate_n
mindspore.ops.acos
mindspore.ops.acosh
mindspore.ops.add

View File

@ -120,6 +120,7 @@ from .parameter_func import (
index_add,
)
from .math_func import (
accumulate_n,
addn,
absolute,
abs,

View File

@ -5677,6 +5677,42 @@ def remainder(x, y):
out = x - tensor_floordiv(x, y) * y
return out
def accumulate_n(*x):
r"""
Computes accumulation of all input tensors element-wise.
AccumulateNV2 is similar to AddN, but there is a significant difference
among them: AccumulateNV2 will not wait for all of its inputs to be ready
before summing. That is to say, AccumulateNV2 is able to save
memory when inputs are ready at different time since the minimum temporary
storage is proportional to the output size rather than the input size.
Inputs:
- **x** (Union(tuple[Tensor], list[Tensor])) - The input tuple or list
is made up of multiple tensors whose dtype is number to be added together.
Each element of tuple or list should have the same shape.
Outputs:
Tensor, has the same shape and dtype as each entry of the `x`.
Raises:
TypeError: If `x` is neither tuple nor list.
ValueError: If there is an input element with a different shape.
Supported Platforms:
``Ascend``
Examples:
>>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> y = Tensor(np.array([4, 5, 6]), mindspore.float32)
>>> output = ops.accumulate_n(x, y, x, y)
>>> print(output)
[10. 14. 18.]
"""
accumulate_ = _get_cache_prim(P.AccumulateNV2)()
return accumulate_(x)
__all__ = [
'addn',
@ -5816,6 +5852,7 @@ __all__ = [
'frac',
'kron',
'rot90',
'remainder'
'remainder',
'accumulate_n'
]
__all__.sort()

View File

@ -0,0 +1,47 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore import Tensor
from mindspore.ops import functional as F
# all cases tested against dchip
def accumulate_n_forward_functional(nptype):
input_x = Tensor(np.array([1, 2, 3]).astype(nptype))
input_y = Tensor(np.array([4, 5, 6]).astype(nptype))
output = F.accumulate_n(input_x, input_y, input_x, input_y)
expected = np.array([10., 14., 18.])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_accumulate_n_forward_float32_functional():
"""
Feature: test accumulate_n forward.
Description: test float32 inputs.
Expectation: the result match with numpy result
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
accumulate_n_forward_functional(np.float32)
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
accumulate_n_forward_functional(np.float32)