forked from mindspore-Ecosystem/mindspore
!46602 for Tensor api: H
Merge pull request !46602 from 于振华/tensor_api_H_1208
This commit is contained in:
commit
78d2192e08
|
@ -0,0 +1,7 @@
|
|||
mindspore.Tensor.H
|
||||
==================
|
||||
|
||||
.. py:method:: mindspore.Tensor.H
|
||||
:property:
|
||||
|
||||
返回共轭和转置的矩阵(2-D张量)的视图。如果x是复数矩阵,x.H等价于self.swapaxes(0, 1).conj(),如果是实数矩阵则等价于self.swapaxes(0, 1)。
|
|
@ -123,6 +123,7 @@ mindspore.Tensor
|
|||
mindspore.Tensor.greater
|
||||
mindspore.Tensor.greater_equal
|
||||
mindspore.Tensor.gt
|
||||
mindspore.Tensor.H
|
||||
mindspore.Tensor.half
|
||||
mindspore.Tensor.hardshrink
|
||||
mindspore.Tensor.has_init
|
||||
|
|
|
@ -129,6 +129,7 @@
|
|||
mindspore.Tensor.greater
|
||||
mindspore.Tensor.greater_equal
|
||||
mindspore.Tensor.gt
|
||||
mindspore.Tensor.H
|
||||
mindspore.Tensor.half
|
||||
mindspore.Tensor.hardshrink
|
||||
mindspore.Tensor.has_init
|
||||
|
|
|
@ -503,6 +503,7 @@ BuiltInTypeMap &GetAttrMap() {
|
|||
{"dtype", prim::kPrimDType}, // C.dtype_
|
||||
{"size", std::string("size_")}, // C.size_
|
||||
{"ndim", std::string("ndim_")}, // C.ndim_
|
||||
{"H", std::string("H")}, // C.H
|
||||
{"T", std::string("T_")}, // C.T_
|
||||
{"itemsize", std::string("itemsize_")}, // C.itemsize_
|
||||
{"nbytes", std::string("nbytes_")}, // C.nbytes_
|
||||
|
|
|
@ -219,6 +219,14 @@ def bincount(x, weights=None, minlength=0):
|
|||
return F.bincount(x, weights, minlength)
|
||||
|
||||
|
||||
def H(x):
|
||||
"""Returns a view of a matrix (2-D tensor) conjugated and transposed."""
|
||||
output = x.swapaxes(0, 1)
|
||||
if x.dtype in (mstype.complex64, mstype.complex128):
|
||||
return output.conj()
|
||||
return output
|
||||
|
||||
|
||||
def size_(x):
|
||||
"""
|
||||
Return the number of elements in tensor `x`.
|
||||
|
|
|
@ -449,6 +449,17 @@ class Tensor(Tensor_):
|
|||
"""Return the number of tensor dimensions."""
|
||||
return len(self._shape)
|
||||
|
||||
@property
|
||||
def H(self):
|
||||
"""
|
||||
Returns a view of a matrix (2-D tensor) conjugated and transposed.
|
||||
x.H is equivalent to x.swapaxes(0, 1).conj() for complex matrices and x.swapaxes(0, 1) for real matrices.
|
||||
"""
|
||||
output = self.swapaxes(0, 1)
|
||||
if self.dtype in (mstype.complex64, mstype.complex128):
|
||||
return output.conj()
|
||||
return output
|
||||
|
||||
@property
|
||||
def has_init(self):
|
||||
"""Whether tensor is initialized."""
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
# Copyright 2022 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore as ms
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
def construct(self, x):
|
||||
return x.H
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_arm_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
|
||||
@pytest.mark.skip(reason="No support")
|
||||
def test_tensor_H_complex(mode):
|
||||
"""
|
||||
Feature: tensor.positive
|
||||
Description: Verify the result of positive
|
||||
Expectation: success
|
||||
"""
|
||||
ms.set_context(mode=mode)
|
||||
x = Tensor([[-1.5 + 7.8j, 3 + 5.75j]])
|
||||
net = Net()
|
||||
output = net(x)
|
||||
expect_output = [[-1.5000-7.8000j],
|
||||
[3.0000-5.7500j]]
|
||||
assert np.allclose(output.asnumpy(), expect_output)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_arm_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
|
||||
def test_tensor_positive_01(mode):
|
||||
"""
|
||||
Feature: tensor.positive
|
||||
Description: Verify the result of positive
|
||||
Expectation: success
|
||||
"""
|
||||
ms.set_context(mode=mode)
|
||||
x = Tensor(np.array([[12., -51, 4], [6, 167, -68]]), ms.float32)
|
||||
net = Net()
|
||||
output = net(x)
|
||||
expect_output = [[12.0, 6.0],
|
||||
[-51.0, 167.0],
|
||||
[4.0, -68.0]]
|
||||
assert np.allclose(output.asnumpy(), expect_output)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_arm_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
|
||||
def test_tensor_positive_02(mode):
|
||||
"""
|
||||
Feature: tensor.positive
|
||||
Description: Verify the result of positive
|
||||
Expectation: success
|
||||
"""
|
||||
ms.set_context(mode=mode)
|
||||
x = Tensor(np.array([[12., -51, 4], [6, 167, -68], [-4, 24, -41], [-4, 24, -41]]), ms.float32)
|
||||
net = Net()
|
||||
output = net(x)
|
||||
expect_output = [[12.0, 6., -4.0, -4.0],
|
||||
[-51.0, 167.0, 24.0, 24.0],
|
||||
[4.0, -68.0, -41.0, -41.0]]
|
||||
assert np.allclose(output.asnumpy(), expect_output)
|
Loading…
Reference in New Issue