From 0a2525a522ae00d8975a4e6e425bec7abb1194b7 Mon Sep 17 00:00:00 2001 From: wangrao Date: Tue, 9 Feb 2021 13:02:59 +0800 Subject: [PATCH] add sin, cos, tan, atan for cpu --- .../cpu/arithmetic_self_cpu_kernel.cc | 70 +++++++++++++------ .../cpu/arithmetic_self_cpu_kernel.h | 16 +++++ .../backend/kernel_compiler/cpu/cpu_kernel.h | 5 ++ .../cpu/eltwise_grad_cpu_kernel.cc | 25 +++++++ .../cpu/eltwise_grad_cpu_kernel.h | 9 +++ mindspore/core/base/core_ops.h | 1 + mindspore/ops/operations/math_ops.py | 12 ++-- tests/st/ops/cpu/test_atan_grad_op.py | 46 ++++++++++++ tests/st/ops/cpu/test_atan_op.py | 46 ++++++++++++ tests/st/ops/cpu/test_cos_op.py | 46 ++++++++++++ tests/st/ops/cpu/test_sin_op.py | 46 ++++++++++++ tests/st/ops/cpu/test_tan_op.py | 46 ++++++++++++ 12 files changed, 339 insertions(+), 29 deletions(-) create mode 100644 tests/st/ops/cpu/test_atan_grad_op.py create mode 100644 tests/st/ops/cpu/test_atan_op.py create mode 100644 tests/st/ops/cpu/test_cos_op.py create mode 100644 tests/st/ops/cpu/test_sin_op.py create mode 100644 tests/st/ops/cpu/test_tan_op.py diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/arithmetic_self_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/arithmetic_self_cpu_kernel.cc index 90fed15b5d4..a779ce551c7 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/arithmetic_self_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/arithmetic_self_cpu_kernel.cc @@ -16,6 +16,7 @@ #include #include #include +#include #include "backend/kernel_compiler/cpu/arithmetic_self_cpu_kernel.h" #include "runtime/device/cpu/cpu_device_address.h" @@ -107,6 +108,34 @@ void ACos(const T *in, T *out, size_t start, size_t end) { out[i] = acos(in[i]); } } + +template +void Atan(const T *in, T *out, size_t start, size_t end) { + for (size_t i = start; i < end; i++) { + out[i] = atan(in[i]); + } +} + +template +void Sin(const T *in, T *out, size_t start, size_t end) { + for (size_t i = start; i < end; i++) { + out[i] = sin(in[i]); + } +} + +template +void Cos(const T *in, T *out, size_t start, size_t end) { + for (size_t i = start; i < end; i++) { + out[i] = cos(in[i]); + } +} + +template +void Tan(const T *in, T *out, size_t start, size_t end) { + for (size_t i = start; i < end; i++) { + out[i] = tan(in[i]); + } +} } // namespace void ArithmeticSelfCPUKernel::InitKernel(const CNodePtr &kernel_node) { @@ -134,6 +163,14 @@ void ArithmeticSelfCPUKernel::InitKernel(const CNodePtr &kernel_node) { operate_type_ = ASIN; } else if (kernel_name == prim::kPrimACos->name()) { operate_type_ = ACOS; + } else if (kernel_name == prim::kPrimAtan->name()) { + operate_type_ = ATAN; + } else if (kernel_name == prim::kPrimSin->name()) { + operate_type_ = SIN; + } else if (kernel_name == prim::kPrimCos->name()) { + operate_type_ = COS; + } else if (kernel_name == prim::kPrimTan->name()) { + operate_type_ = TAN; } dtype_ = AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0); target_dtype_ = AnfAlgo::GetOutputInferDataType(kernel_node, 0); @@ -214,31 +251,18 @@ void ArithmeticSelfCPUKernel::LaunchKernel(const std::vector &inputs MS_LOG(ERROR) << "Invalid value: once_compute_size " << once_compute_size; return; } + static const std::map> + kArithmeticOpFuncMap = {{SQUARE, Square}, {SIGN, Sign}, + {NEG, Neg}, {LOGICALNOT, LogicalNot}, + {ONESLIKE, OnesLike}, {ZEROSLIKE, ZerosLike}, + {FLOOR, Floor}, {RECIPROCAL, Reciprocal}, + {GELU, Gelu}, {SIN, Sin}, + {COS, Cos}, {TAN, Tan}, + {ASIN, Asin}, {ACOS, ACos}, + {ATAN, Atan}}; while (start < lens) { size_t end = (start + once_compute_size) > lens ? lens : (start + once_compute_size); - if (operate_type_ == SQUARE) { - threads.emplace_back(std::thread(Square, input, output, start, end)); - } else if (operate_type_ == NEG) { - threads.emplace_back(std::thread(Neg, input, output, start, end)); - } else if (operate_type_ == LOGICALNOT) { - threads.emplace_back(std::thread(LogicalNot, input, output, start, end)); - } else if (operate_type_ == ONESLIKE) { - threads.emplace_back(std::thread(OnesLike, input, output, start, end)); - } else if (operate_type_ == ZEROSLIKE) { - threads.emplace_back(std::thread(ZerosLike, input, output, start, end)); - } else if (operate_type_ == SIGN) { - threads.emplace_back(std::thread(Sign, input, output, start, end)); - } else if (operate_type_ == FLOOR) { - threads.emplace_back(std::thread(Floor, input, output, start, end)); - } else if (operate_type_ == RECIPROCAL) { - threads.emplace_back(std::thread(Reciprocal, input, output, start, end)); - } else if (operate_type_ == GELU) { - threads.emplace_back(std::thread(Gelu, input, output, start, end)); - } else if (operate_type_ == ASIN) { - threads.emplace_back(std::thread(Asin, input, output, start, end)); - } else if (operate_type_ == ACOS) { - threads.emplace_back(std::thread(ACos, input, output, start, end)); - } + threads.emplace_back(std::thread(kArithmeticOpFuncMap.at(operate_type_), input, output, start, end)); start += once_compute_size; } for (size_t i = 0; i < threads.size(); ++i) { diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/arithmetic_self_cpu_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/arithmetic_self_cpu_kernel.h index c2b972d92d4..5922294976c 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/arithmetic_self_cpu_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/arithmetic_self_cpu_kernel.h @@ -78,6 +78,22 @@ MS_REG_CPU_KERNEL(ACos, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputA ArithmeticSelfCPUKernel); MS_REG_CPU_KERNEL(ACos, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32), ArithmeticSelfCPUKernel); +MS_REG_CPU_KERNEL(Atan, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), + ArithmeticSelfCPUKernel); +MS_REG_CPU_KERNEL(Atan, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32), + ArithmeticSelfCPUKernel); +MS_REG_CPU_KERNEL(Sin, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), + ArithmeticSelfCPUKernel); +MS_REG_CPU_KERNEL(Sin, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32), + ArithmeticSelfCPUKernel); +MS_REG_CPU_KERNEL(Cos, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), + ArithmeticSelfCPUKernel); +MS_REG_CPU_KERNEL(Cos, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32), + ArithmeticSelfCPUKernel); +MS_REG_CPU_KERNEL(Tan, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), + ArithmeticSelfCPUKernel); +MS_REG_CPU_KERNEL(Tan, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32), + ArithmeticSelfCPUKernel); } // namespace kernel } // namespace mindspore diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/cpu_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/cpu_kernel.h index e614a627357..a24ce767a79 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/cpu_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/cpu_kernel.h @@ -95,8 +95,13 @@ enum OperateType { GELUGRAD, ASIN, ACOS, + ATAN, ASINGRAD, ACOSGRAD, + ATANGRAD, + SIN, + COS, + TAN, }; class CPUKernel : public kernel::KernelMod { diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/eltwise_grad_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/eltwise_grad_cpu_kernel.cc index 40baef0a128..f7b6341963e 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/eltwise_grad_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/eltwise_grad_cpu_kernel.cc @@ -132,6 +132,27 @@ void EltWiseGradCPUKernel::ACosGrad(const T *input1, const T *input2, T *out, si } } +template +void EltWiseGradCPUKernel::AtanGrad(const T *input1, const T *input2, T *out, size_t start, size_t end) { + for (size_t i = start; i < end; i++) { + T dividend = input2[i]; + T divisor = 1 + input1[i] * input1[i]; + if (divisor == 0) { + if (dividend == 0) { + out[i] = std::numeric_limits::quiet_NaN(); + continue; + } + if (std::numeric_limits::has_infinity) { + out[i] = dividend > 0 ? std::numeric_limits::infinity() : -std::numeric_limits::infinity(); + } else { + out[i] = dividend > 0 ? std::numeric_limits::max() : std::numeric_limits::min(); + } + continue; + } + out[i] = dividend / divisor; + } +} + void EltWiseGradCPUKernel::InitKernel(const CNodePtr &kernel_node) { MS_EXCEPTION_IF_NULL(kernel_node); std::string kernel_name = AnfAlgo::GetCNodeName(kernel_node); @@ -153,6 +174,8 @@ void EltWiseGradCPUKernel::InitKernel(const CNodePtr &kernel_node) { operate_type_ = ASINGRAD; } else if (kernel_name == "ACosGrad") { operate_type_ = ACOSGRAD; + } else if (kernel_name == "AtanGrad") { + operate_type_ = ATANGRAD; } else { MS_LOG(EXCEPTION) << "Not support " << kernel_name; } @@ -238,6 +261,8 @@ void EltWiseGradCPUKernel::LaunchKernel(const std::vector &inputs, c threads.emplace_back(std::thread(&EltWiseGradCPUKernel::AsinGrad, this, input1, input2, output, start, end)); } else if (operate_type_ == ACOSGRAD) { threads.emplace_back(std::thread(&EltWiseGradCPUKernel::ACosGrad, this, input1, input2, output, start, end)); + } else if (operate_type_ == ATANGRAD) { + threads.emplace_back(std::thread(&EltWiseGradCPUKernel::AtanGrad, this, input1, input2, output, start, end)); } else { MS_LOG(EXCEPTION) << "Not support " << operate_type_; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/eltwise_grad_cpu_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/eltwise_grad_cpu_kernel.h index 02ab749c469..d3211d28a21 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/eltwise_grad_cpu_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/eltwise_grad_cpu_kernel.h @@ -54,6 +54,8 @@ class EltWiseGradCPUKernel : public CPUKernel { void AsinGrad(const T *input1, const T *input2, T *out, size_t start, size_t end); template void ACosGrad(const T *input1, const T *input2, T *out, size_t start, size_t end); + template + void AtanGrad(const T *input1, const T *input2, T *out, size_t start, size_t end); std::vector input_shape0_; std::vector input_shape1_; std::vector input_element_num0_; @@ -109,6 +111,13 @@ MS_REG_CPU_KERNEL( MS_REG_CPU_KERNEL( ACosGrad, KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32), EltWiseGradCPUKernel); +MS_REG_CPU_KERNEL( + AtanGrad, + KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), + EltWiseGradCPUKernel); +MS_REG_CPU_KERNEL( + AtanGrad, KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32), + EltWiseGradCPUKernel); } // namespace kernel } // namespace mindspore diff --git a/mindspore/core/base/core_ops.h b/mindspore/core/base/core_ops.h index d8f6c44a64d..93d826681ec 100644 --- a/mindspore/core/base/core_ops.h +++ b/mindspore/core/base/core_ops.h @@ -387,6 +387,7 @@ inline const PrimitivePtr kPrimSign = std::make_shared("Sign"); inline const PrimitivePtr kPrimACos = std::make_shared("ACos"); inline const PrimitivePtr kPrimAsinGrad = std::make_shared("AsinGrad"); inline const PrimitivePtr kPrimACosGrad = std::make_shared("ACosGrad"); +inline const PrimitivePtr kPrimAtanGrad = std::make_shared("AtanGrad"); inline const PrimitivePtr kPrimFloorMod = std::make_shared("FloorMod"); inline const PrimitivePtr kPrimWhere = std::make_shared("Where"); diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index f15ae1fab8d..93ea6323ad7 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -3341,7 +3341,7 @@ class Cos(PrimitiveWithInfer): Tensor, has the same shape as `input_x`. Supported Platforms: - ``Ascend`` ``GPU`` + ``Ascend`` ``GPU`` ``CPU`` Examples: >>> cos = ops.Cos() @@ -3378,7 +3378,7 @@ class ACos(PrimitiveWithInfer): Tensor, has the same shape as `input_x`. Supported Platforms: - ``Ascend`` ``GPU`` + ``Ascend`` ``GPU`` ``CPU`` Examples: >>> acos = ops.ACos() @@ -3411,7 +3411,7 @@ class Sin(PrimitiveWithInfer): Tensor, has the same shape as `input_x`. Supported Platforms: - ``Ascend`` ``GPU`` + ``Ascend`` ``GPU`` ``CPU`` Examples: >>> sin = ops.Sin() @@ -3448,7 +3448,7 @@ class Asin(PrimitiveWithInfer): Tensor, has the same shape as `input_x`. Supported Platforms: - ``Ascend`` ``GPU`` + ``Ascend`` ``GPU`` ``CPU`` Examples: >>> asin = ops.Asin() @@ -3665,7 +3665,7 @@ class Tan(PrimitiveWithInfer): Tensor, has the same shape as `input_x`. Supported Platforms: - ``Ascend`` + ``Ascend`` ``CPU`` Examples: >>> tan = ops.Tan() @@ -3703,7 +3703,7 @@ class Atan(PrimitiveWithInfer): A Tensor, has the same type as the input. Supported Platforms: - ``Ascend`` ``GPU`` + ``Ascend`` ``GPU`` ``CPU`` Examples: >>> input_x = Tensor(np.array([1.0, 0.0]), mindspore.float32) diff --git a/tests/st/ops/cpu/test_atan_grad_op.py b/tests/st/ops/cpu/test_atan_grad_op.py new file mode 100644 index 00000000000..4635b5c9b04 --- /dev/null +++ b/tests/st/ops/cpu/test_atan_grad_op.py @@ -0,0 +1,46 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import pytest + +import mindspore.nn as nn +from mindspore import Tensor +from mindspore import context +from mindspore.ops.operations import _grad_ops as G + +context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + + +class NetAtanGrad(nn.Cell): + def __init__(self): + super(NetAtanGrad, self).__init__() + self.atanGrad = G.AtanGrad() + + def construct(self, x, dy): + return self.atanGrad(x, dy) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_atan_grad(): + x = np.array([-0.5, 0, 0.5]).astype('float32') + dy = np.array([1, 0, -1]).astype('float32') + atan_grad = NetAtanGrad() + output = atan_grad(Tensor(x), Tensor(dy)) + print(output) + expect = dy / (1 + x * x) + assert np.allclose(output.asnumpy(), expect) diff --git a/tests/st/ops/cpu/test_atan_op.py b/tests/st/ops/cpu/test_atan_op.py new file mode 100644 index 00000000000..9ae986226f4 --- /dev/null +++ b/tests/st/ops/cpu/test_atan_op.py @@ -0,0 +1,46 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import pytest + +import mindspore.nn as nn +from mindspore import Tensor +from mindspore import context +from mindspore.ops import operations as P + +context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + + +class NetAtan(nn.Cell): + def __init__(self): + super(NetAtan, self).__init__() + self.atan = P.Atan() + + def construct(self, x): + return self.atan(x) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_atan(): + np_array = np.array([-1, -0.5, 0, 0.5, 1]).astype('float32') + input_x = Tensor(np_array) + net = NetAtan() + output = net(input_x) + print(output) + expect = np.arctan(np_array) + assert np.allclose(output.asnumpy(), expect) diff --git a/tests/st/ops/cpu/test_cos_op.py b/tests/st/ops/cpu/test_cos_op.py new file mode 100644 index 00000000000..ec0f7456a39 --- /dev/null +++ b/tests/st/ops/cpu/test_cos_op.py @@ -0,0 +1,46 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import pytest + +import mindspore.nn as nn +from mindspore import Tensor +from mindspore import context +from mindspore.ops import operations as P + +context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + + +class NetCos(nn.Cell): + def __init__(self): + super(NetCos, self).__init__() + self.cos = P.Cos() + + def construct(self, x): + return self.cos(x) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_cos(): + np_array = np.array([-1, -0.5, 0, 0.5, 1]).astype('float32') + input_x = Tensor(np_array) + net = NetCos() + output = net(input_x) + print(output) + expect = np.cos(np_array) + assert np.allclose(output.asnumpy(), expect) diff --git a/tests/st/ops/cpu/test_sin_op.py b/tests/st/ops/cpu/test_sin_op.py new file mode 100644 index 00000000000..5da7f3ad827 --- /dev/null +++ b/tests/st/ops/cpu/test_sin_op.py @@ -0,0 +1,46 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import pytest + +import mindspore.nn as nn +from mindspore import Tensor +from mindspore import context +from mindspore.ops import operations as P + +context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + + +class NetSin(nn.Cell): + def __init__(self): + super(NetSin, self).__init__() + self.sin = P.Sin() + + def construct(self, x): + return self.sin(x) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_sin(): + np_array = np.array([-1, -0.5, 0, 0.5, 1]).astype('float32') + input_x = Tensor(np_array) + net = NetSin() + output = net(input_x) + print(output) + expect = np.sin(np_array) + assert np.allclose(output.asnumpy(), expect) diff --git a/tests/st/ops/cpu/test_tan_op.py b/tests/st/ops/cpu/test_tan_op.py new file mode 100644 index 00000000000..416208476ba --- /dev/null +++ b/tests/st/ops/cpu/test_tan_op.py @@ -0,0 +1,46 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import pytest + +import mindspore.nn as nn +from mindspore import Tensor +from mindspore import context +from mindspore.ops import operations as P + +context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + + +class NetTan(nn.Cell): + def __init__(self): + super(NetTan, self).__init__() + self.tan = P.Tan() + + def construct(self, x): + return self.tan(x) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_tan(): + np_array = np.array([-1, -0.5, 0, 0.5, 1]).astype('float32') + input_x = Tensor(np_array) + net = NetTan() + output = net(input_x) + print(output) + expect = np.tan(np_array) + assert np.allclose(output.asnumpy(), expect)