tensor_neg_master

This commit is contained in:
yide12 2022-10-25 20:35:51 +08:00
parent d5dd406703
commit b48f5c41d4
8 changed files with 86 additions and 0 deletions

View File

@ -0,0 +1,9 @@
mindspore.Tensor.negative
==========================
.. py:method:: mindspore.Tensor.negative()
逐元素计算当前Tensor的相反数。
返回:
Tensor每个元素是当前Tensor的相反数。

View File

@ -92,6 +92,7 @@ mindspore.Tensor
mindspore.Tensor.log
mindspore.Tensor.log1p
mindspore.Tensor.logit
mindspore.Tensor.negative
mindspore.Tensor.pow
mindspore.Tensor.round
mindspore.Tensor.sigmoid

View File

@ -98,6 +98,7 @@ Element-wise Methods
mindspore.Tensor.log1p
mindspore.Tensor.logit
mindspore.Tensor.pow
mindspore.Tensor.negative
mindspore.Tensor.round
mindspore.Tensor.sigmoid
mindspore.Tensor.sqrt

View File

@ -231,6 +231,7 @@ BuiltInTypeMap &GetMethodMap() {
{"lerp", std::string("lerp")}, // lerp()
{"log1p", std::string("log1p")}, // P.Log1p()
{"logit", std::string("logit")}, // Logit()
{"negative", std::string("negative")}, // neg()
{"log_matrix_determinant", std::string("log_matrix_determinant")}, // log_matrix_determinant()
{"matrix_determinant", std::string("matrix_determinant")}, // log_matrix_determinant()
{"max", std::string("max")}, // P.reduce_max()

View File

@ -2555,6 +2555,13 @@ def unsorted_segment_prod(x, segment_ids, num_segments):
return F.unsorted_segment_prod(x, segment_ids, num_segments)
def negative(x):
r"""
Return a new tensor with the negative of the elements of input.
"""
return F.neg(x)
def nonzero(x):
"""
Return a Tensor of the positions of all non-zero values.

View File

@ -1870,6 +1870,25 @@ class Tensor(Tensor_):
self._init_check()
return tensor_operator_registry.get('lerp')(self, end, weight)
def negative(self):
r"""
Return a new tensor with the negative of the elements of input.
Returns:
Tensor, with the negative of the elements of the self Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
>>> output = x.negative()
>>> print(output)
[-1. -2. 1. -2. 0. 3.5]
"""
self._init_check()
return tensor_operator_registry.get("negative")(self)
def norm(self, axis, p=2, keep_dims=False, epsilon=1e-12):
"""
Returns the matrix norm or vector norm of a given tensor.

View File

@ -345,6 +345,7 @@ tensor_operator_registry.register('acosh', acosh)
tensor_operator_registry.register('cosh', P.Cosh)
tensor_operator_registry.register('asin', asin)
tensor_operator_registry.register('pow', P.Pow)
tensor_operator_registry.register('negative', neg)
tensor_operator_registry.register('amin', amin)
tensor_operator_registry.register('amax', amax)
tensor_operator_registry.register('mean', P.ReduceMean)

View File

@ -0,0 +1,47 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
class NegNet(nn.Cell):
def construct(self, x):
return x.negative()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_tensor_negative(mode):
"""
Feature: tensor.negative
Description: Verify the result of negative
Expectation: success
"""
ms.set_context(mode=mode)
x = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), ms.float32)
net = NegNet()
output = net(x)
expect_output = [5., -1.5, -3., -100.]
assert np.allclose(output.asnumpy(), expect_output)