[feat][assistant][I4XJGA] add new Ascend operator Deg2Rad

This commit is contained in:
qkeys 2022-05-10 15:22:52 +08:00
parent ed67d9d4e8
commit f475205a48
3 changed files with 70 additions and 2 deletions

View File

@ -36,7 +36,7 @@ from .math_func import (addn, absolute, abs, tensor_add, add, neg_tensor, neg, t
invert, minimum, floor, logical_not, logical_or, logical_and, sin, cos, tan,
asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh, atan2, bitwise_and, bitwise_or,
bitwise_xor, erf, erfc, cdist, bessel_i0, bessel_i0e, bessel_j0, bessel_j1, bessel_k0,
bessel_k0e, bessel_y0, bessel_y1, bessel_i1, bessel_i1e, bessel_k1, bessel_k1e, exp2)
bessel_k0e, bessel_y0, bessel_y1, bessel_i1, bessel_i1e, bessel_k1, bessel_k1e, exp2, deg2rad)
from .nn_func import (fast_gelu, hardshrink)
from .linalg_func import svd

View File

@ -15,14 +15,17 @@
"""Defines math operators with functional form."""
import math
import numpy as np
from mindspore.common import dtype as mstype
from mindspore.ops.primitive import constexpr
from mindspore.ops import operations as P
from ..operations.math_ops import (BesselJ0, BesselJ1, BesselK0, BesselK0e, BesselY0, BesselY1, BesselK1,
BesselK1e)
from ...common import dtype as mstype
from ...common.tensor import Tensor
from ..._c_expression import Tensor as Tensor_
from ..._checkparam import Validator as validator
@constexpr
@ -2485,6 +2488,50 @@ def bessel_k1e(x):
"""
return bessel_k1e_(x)
@constexpr
def _check_input_dtype(param_name, input_dtype, allow_dtypes, cls_name):
validator.check_type_name(param_name, input_dtype, allow_dtypes, cls_name)
def deg2rad(x):
"""
Calculates a new tensor with each of the elements of `x` converted from angles in degrees to radians.
Args:
- **x** (Tensor[Number]) - The input tensor. It must be a positive-definite matrix.
With float16, float32 or float64 data type.
Outputs:
Tensor, has the same dtype as the `x`.
Raises:
TypeError: If `x` is not a Tensor.
TypeError: If dtype of `x` isn't float16, float32 or float64.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x = Tensor(np.array([[90.0, -90.0], [180.0, -180.0], [270.0, -270.0]]).astype(np.float32))
>>> op = nn.Deg2Rad()
>>> output = op(x)
>>> print(output)
[[ 1.5707964 -1.5707964]
[ 3.1415927 -3.1415927]
[ 4.712389 -4.712389 ]]
"""
if not isinstance(x, (Tensor, Tensor_)):
raise TypeError("The input x must be tensor")
dtype_op = P.DType()
x_dtype = dtype_op(x)
_check_input_dtype("x", x_dtype, [mstype.float16, mstype.float32, mstype.float64], "")
if x_dtype == mstype.float16:
out = x * (Tensor(math.pi / 180.0).astype(mstype.float16))
else:
out = x * math.pi / 180.0
return out
#####################################
# Reduction Operation Functions.
#####################################
@ -2618,6 +2665,7 @@ __all__ = [
'bessel_i1e',
'bessel_k1',
'bessel_k1e',
'exp2'
'exp2',
'deg2rad'
]
__all__.sort()

View File

@ -417,6 +417,15 @@ class Exp2Func(nn.Cell):
return y
class Deg2radNet(nn.Cell):
def __init__(self):
super(Deg2radNet, self).__init__()
self.deg2rad = ops.deg2rad
def construct(self, x):
return self.deg2rad(x)
test_case_math_ops = [
('MatMulGrad', {
'block': GradWrap(NetWithLoss(MatMulNet())),
@ -486,6 +495,11 @@ test_case_math_ops = [
'block': Exp2Func(),
'desc_inputs': [Tensor(np.array([1.0, 2.0, 3.0], np.float16))],
}),
('Deg2rad', {
'block': Deg2radNet(),
'desc_inputs': [Tensor(np.array([[90.0, -90.0], [180.0, -180.0], [270.0, -270.0]], np.float32))],
'desc_bprop': [Tensor(np.array([[90.0, -90.0], [180.0, -180.0], [270.0, -270.0]], np.float32))],
}),
]
test_case_lists = [test_case_math_ops]
@ -544,6 +558,12 @@ raise_set = [
'desc_inputs': [Tensor(np.array([1.1, 2.2, 8.1, 2.1], np.float32)),
Tensor(np.array([0.2, 1.2, 2.1, 3.4], np.float32))],
'skip': ['backward']}),
('Deg2rad_1_Error', {
'block': (lambda x: Deg2radNet(), {'exception': TypeError}),
'desc_inputs': [0]}),
('Deg2rad_2_Error', {
'block': (lambda x: Deg2radNet(), {'exception': TypeError}),
'desc_inputs': [Tensor(np.array([[90, -90], [180, -180], [270, -270]], np.int32))]}),
]