!34207 [ops]add the bessel_y0 and bessel_y1 CPU operator

Merge pull request !34207 from maoyaomin/mym_bessel_0507
This commit is contained in:
i-robot 2022-05-12 01:18:46 +00:00 committed by Gitee
commit 708188ac27
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
19 changed files with 959 additions and 3 deletions

View File

@ -73,6 +73,8 @@ functional算子是经过初始化后的Primitive可以直接作为函数使
mindspore.ops.bessel_j1
mindspore.ops.bessel_k0
mindspore.ops.bessel_k0e
mindspore.ops.bessel_y0
mindspore.ops.bessel_y1
mindspore.ops.bitwise_and
mindspore.ops.bitwise_or
mindspore.ops.bitwise_xor

View File

@ -0,0 +1,19 @@
mindspore.ops.bessel_y0
=====================
.. py:class:: mindspore.ops.bessel_y0
逐元素计算并返回输入Tensor的Bessel y0函数值
**输入:**
- **x** (Tensor) - 任意维度的Tensor。数据类型应为float16float32或float64。
**输出:**
Tensorshape和数据类型与 `x` 相同。
**异常:**
- **TypeError** - `x`不是Tensor。
- **TypeError** - `x`的数据类型不是float16float32或float64。

View File

@ -0,0 +1,19 @@
mindspore.ops.bessel_y1
=====================
.. py:class:: mindspore.ops.bessel_y1
逐元素计算并返回输入Tensor的Bessel y1函数值
**输入:**
- **x** (Tensor) - 任意维度的Tensor。数据类型应为float16float32或float64。
**输出:**
Tensorshape和数据类型与 `x` 相同。
**异常:**
- **TypeError** - `x`不是Tensor。
- **TypeError** - `x`的数据类型不是float16float32或float64。

View File

@ -73,6 +73,8 @@ Element-by-Element Operations
mindspore.ops.bessel_j1
mindspore.ops.bessel_k0
mindspore.ops.bessel_k0e
mindspore.ops.bessel_y0
mindspore.ops.bessel_y1
mindspore.ops.bitwise_and
mindspore.ops.bitwise_or
mindspore.ops.bitwise_xor

View File

@ -0,0 +1,205 @@
/**
* Copyright 2019-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <functional>
#include <map>
#include "plugin/device/cpu/kernel/bessel_y0_cpu_kernel.h"
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "plugin/device/cpu/kernel/bessel_j0_cpu_kernel.h"
#include "mindspore/core/ops/bessel_y0.h"
#include "abstract/utils.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kBesselY0InputsNum = 1;
constexpr size_t kBesselY0OutputsNum = 1;
} // namespace
double BesselY0CpuKernelMod::polevl(double x, const double coef[], int N) {
double ans;
int i;
const double *p;
p = coef;
ans = *p++;
i = N;
do {
ans = ans * x + *p++;
} while (--i);
return (ans);
}
double BesselY0CpuKernelMod::p1evl(double x, const double coef[], int N) {
double ans;
const double *p;
int i;
p = coef;
ans = x + *p++;
i = N - 1;
do {
ans = ans * x + *p++;
} while (--i);
return (ans);
}
double BesselY0CpuKernelMod::y0(double x) {
const double PP[] = {
7.96936729297347051624E-4, 8.28352392107440799803E-2, 1.23953371646414299388E0, 5.44725003058768775090E0,
8.74716500199817011941E0, 5.30324038235394892183E0, 9.99999999999999997821E-1,
};
const double PQ[] = {
9.24408810558863637013E-4, 8.56288474354474431428E-2, 1.25352743901058953537E0, 5.47097740330417105182E0,
8.76190883237069594232E0, 5.30605288235394617618E0, 1.00000000000000000218E0,
};
const double QP[] = {
-1.13663838898469149931E-2, -1.28252718670509318512E0, -1.95539544257735972385E1, -9.32060152123768231369E1,
-1.77681167980488050595E2, -1.47077505154951170175E2, -5.14105326766599330220E1, -6.05014350600728481186E0,
};
const double QQ[] = {
6.43178256118178023184E1, 8.56430025976980587198E2, 3.88240183605401609683E3, 7.24046774195652478189E3,
5.93072701187316984827E3, 2.06209331660327847417E3, 2.42005740240291393179E2,
};
const double YP[] = {
1.55924367855235737965E4, -1.46639295903971606143E7, 5.43526477051876500413E9, -9.82136065717911466409E11,
8.75906394395366999549E13, -3.46628303384729719441E15, 4.42733268572569800351E16, -1.84950800436986690637E16,
};
const double YQ[] = {
1.04128353664259848412E3, 6.26107330137134956842E5, 2.68919633393814121987E8, 8.64002487103935000337E10,
2.02979612750105546709E13, 3.17157752842975028269E15, 2.50596256172653059228E17,
};
const double PIO4 = .78539816339744830962;
const double SQ2OPI = .79788456080286535588;
const double NPY_2_PI = 0.6366197723675814;
const double BAR = 5.0;
const double ZERO = 0.0;
const double FIVE = 5.0;
const double FIVE_SQUARED = 25.0;
const int DEG_P = 6;
const int DEG_Q = 7;
double z, p, w, q, xn;
if (x <= BAR) {
if (x == ZERO) {
return -INFINITY;
} else if (x < ZERO) {
return NAN;
}
z = x * x;
w = polevl(z, YP, DEG_Q) / p1evl(z, YQ, DEG_Q);
w += NPY_2_PI * log(x) * BesselJ0CpuKernelMod::j0(x);
return (w);
}
w = FIVE / x;
z = FIVE_SQUARED / (x * x);
p = polevl(z, PP, DEG_P) / polevl(z, PQ, DEG_P);
q = polevl(z, QP, DEG_Q) / p1evl(z, QQ, DEG_Q);
xn = x - PIO4;
p = p * sin(xn) + w * q * cos(xn);
return (p * SQ2OPI / sqrt(x));
}
template <typename T>
void BesselY0CpuKernelMod::BesselY0Func(const T *input, T *output, size_t start, size_t end) {
for (size_t i = start; i < end; i++) {
double input_ = static_cast<double>(input[i]);
double output_ = y0(input_);
output[i] = static_cast<T>(output_);
}
}
bool BesselY0CpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
auto kernel_ptr = std::dynamic_pointer_cast<ops::BesselY0>(base_operator);
if (!kernel_ptr) {
MS_LOG(ERROR) << "For 'BesselY0CpuKernelMod', BaseOperatorPtr can not dynamic cast to BesselY0 before initialize!";
return false;
}
kernel_name_ = kernel_ptr->name();
if (inputs.size() != kBesselY0InputsNum || outputs.size() != kBesselY0OutputsNum) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "': input and output size should be " << kBesselY0InputsNum << " and "
<< kBesselY0OutputsNum << ", but get " << inputs.size() << " and " << outputs.size();
return false;
}
input_shape_ = inputs[0]->GetShapeVector();
output_shape_ = outputs[0]->GetShapeVector();
input_dtype_ = inputs[0]->GetDtype();
input_size_ = std::accumulate(input_shape_.begin(), input_shape_.end(), 1, std::multiplies<size_t>());
switch (input_dtype_) {
case kNumberTypeFloat64:
kernel_func_ = &BesselY0CpuKernelMod::LaunchKernel<double>;
break;
case kNumberTypeFloat32:
kernel_func_ = &BesselY0CpuKernelMod::LaunchKernel<float>;
break;
case kNumberTypeFloat16:
kernel_func_ = &BesselY0CpuKernelMod::LaunchKernel<float16>;
break;
default:
MS_LOG(ERROR) << "BesselY0 kernel does not support " << TypeIdToString(input_dtype_);
return false;
}
return true;
}
int BesselY0CpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &others) {
int ret = 0;
if ((ret = NativeCpuKernelMod::Resize(base_operator, inputs, outputs, others)) != 0) {
MS_LOG(WARNING) << kernel_name_ << " reinit failed.";
return ret;
}
return 0;
}
template <typename T>
bool BesselY0CpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &outputs) {
const auto *input = reinterpret_cast<T *>(inputs[0]->addr);
auto output = reinterpret_cast<T *>(outputs[0]->addr);
auto end = inputs[0]->size / sizeof(T);
auto task = std::bind(BesselY0Func<T>, input, output, 0, end);
ParallelLaunchAutoSearch(task, input_size_, this, &parallel_search_info_);
return true;
}
std::vector<KernelAttr> BesselY0CpuKernelMod::GetOpSupport() {
std::vector<KernelAttr> support_list = {
KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64),
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16)};
return support_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, BesselY0, BesselY0CpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,65 @@
/**
* Copyright 2019-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_BESSEL_Y0_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_BESSEL_Y0_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include <map>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class BesselY0CpuKernelMod : public NativeCpuKernelMod {
public:
BesselY0CpuKernelMod() = default;
~BesselY0CpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &others = std::map<uint32_t, tensor::TensorPtr>()) override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs) override {
return kernel_func_(this, inputs, outputs);
}
static double polevl(double x, const double coef[], int N);
static double p1evl(double x, const double coef[], int N);
static double y0(double x);
template <typename T>
static void BesselY0Func(const T *input, T *output, size_t start, size_t end);
protected:
std::vector<KernelAttr> GetOpSupport() override;
private:
template <typename T>
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
using BesselKernel = std::function<bool(BesselY0CpuKernelMod *, const std::vector<kernel::AddressPtr> &,
const std::vector<kernel::AddressPtr> &)>;
BesselKernel kernel_func_;
size_t input_size_;
std::vector<int64_t> input_shape_;
std::vector<int64_t> output_shape_;
TypeId input_dtype_;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_BESSEL_Y0_CPU_KERNEL_H_

View File

@ -0,0 +1,209 @@
/**
* Copyright 2019-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmath>
#include <functional>
#include <map>
#include "plugin/device/cpu/kernel/bessel_y1_cpu_kernel.h"
#include "plugin/device/cpu/kernel/bessel_j1_cpu_kernel.h"
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "mindspore/core/ops/bessel_y1.h"
#include "abstract/utils.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kBesselY1InputsNum = 1;
constexpr size_t kBesselY1OutputsNum = 1;
} // namespace
double BesselY1CpuKernelMod::polevl(double x, const double coef[], int N) {
double ans;
int i;
const double *p;
p = coef;
ans = *p++;
i = N;
do {
ans = ans * x + *p++;
} while (--i);
return (ans);
}
double BesselY1CpuKernelMod::p1evl(double x, const double coef[], int N) {
double ans;
const double *p;
int i;
p = coef;
ans = x + *p++;
i = N - 1;
do {
ans = ans * x + *p++;
} while (--i);
return (ans);
}
double BesselY1CpuKernelMod::y1(double x) {
const double PP[7] = {
7.62125616208173112003E-4, 7.31397056940917570436E-2, 1.12719608129684925192E0, 5.11207951146807644818E0,
8.42404590141772420927E0, 5.21451598682361504063E0, 1.00000000000000000254E0,
};
const double PQ[7] = {
5.71323128072548699714E-4, 6.88455908754495404082E-2, 1.10514232634061696926E0, 5.07386386128601488557E0,
8.39985554327604159757E0, 5.20982848682361821619E0, 9.99999999999999997461E-1,
};
const double QP[8] = {
5.10862594750176621635E-2, 4.98213872951233449420E0, 7.58238284132545283818E1, 3.66779609360150777800E2,
7.10856304998926107277E2, 5.97489612400613639965E2, 2.11688757100572135698E2, 2.52070205858023719784E1,
};
const double QQ[8] = {
1.00000000000000000000E0, 7.42373277035675149943E1, 1.05644886038262816351E3, 4.98641058337653607651E3,
9.56231892404756170795E3, 7.99704160447350683650E3, 2.82619278517639096600E3, 3.36093607810698293419E2,
};
const double YP[6] = {
1.26320474790178026440E9, -6.47355876379160291031E11, 1.14509511541823727583E14,
-8.12770255501325109621E15, 2.02439475713594898196E17, -7.78877196265950026825E17,
};
const double YQ[9] = {
1.00000000000000000000E0, 5.94301592346128195359E2, 2.35564092943068577943E5,
7.34811944459721705660E7, 1.87601316108706159478E10, 3.88231277496238566008E12,
6.20557727146953693363E14, 6.87141087355300489866E16, 3.97270608116560655612E18,
};
const double NPY_2_PI = 0.6366197723675814;
const double THPIO4 = 2.35619449019234492885;
const double SQ2OPI = .79788456080286535588;
const double BAR = 5.0;
const double ZERO = 0.0;
const double ONE = 1.0;
const int DEG_P = 6;
const int DEG_Q = 7;
const int DEG_5 = 5;
const int DEG_8 = 8;
double w, z, p, q, xn;
if (x <= BAR) {
if (x == ZERO) {
return -INFINITY;
} else if (x < ZERO) {
return NAN;
}
z = x * x;
w = x * (polevl(z, YP, DEG_5) / polevl(z, YQ, DEG_8));
w += NPY_2_PI * (BesselJ1CpuKernelMod::j1(x) * log(x) - ONE / x);
return (w);
}
w = BAR / x;
z = w * w;
p = polevl(z, PP, DEG_P) / polevl(z, PQ, DEG_P);
q = polevl(z, QP, DEG_Q) / p1evl(z, QQ, DEG_Q);
xn = x - THPIO4;
p = p * sin(xn) + w * q * cos(xn);
return (p * SQ2OPI / sqrt(x));
}
template <typename T>
void BesselY1CpuKernelMod::BesselY1Func(const T *input, T *output, size_t start, size_t end) {
for (size_t i = start; i < end; i++) {
double input_ = static_cast<double>(input[i]);
double output_ = y1(input_);
output[i] = static_cast<T>(output_);
}
}
bool BesselY1CpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
auto kernel_ptr = std::dynamic_pointer_cast<ops::BesselY1>(base_operator);
if (!kernel_ptr) {
MS_LOG(ERROR) << "For 'BesselY1CpuKernelMod', BaseOperatorPtr can not dynamic cast to BesselY1 before initialize!";
return false;
}
kernel_name_ = kernel_ptr->name();
if (inputs.size() != kBesselY1InputsNum || outputs.size() != kBesselY1OutputsNum) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "': input and output size should be " << kBesselY1InputsNum << " and "
<< kBesselY1OutputsNum << ", but get " << inputs.size() << " and " << outputs.size();
return false;
}
input_shape_ = inputs[0]->GetShapeVector();
output_shape_ = outputs[0]->GetShapeVector();
input_dtype_ = inputs[0]->GetDtype();
input_size_ = std::accumulate(input_shape_.begin(), input_shape_.end(), 1, std::multiplies<size_t>());
switch (input_dtype_) {
case kNumberTypeFloat64:
kernel_func_ = &BesselY1CpuKernelMod::LaunchKernel<double>;
break;
case kNumberTypeFloat32:
kernel_func_ = &BesselY1CpuKernelMod::LaunchKernel<float>;
break;
case kNumberTypeFloat16:
kernel_func_ = &BesselY1CpuKernelMod::LaunchKernel<float16>;
break;
default:
MS_LOG(ERROR) << "BesselY1 kernel does not support " << TypeIdToString(input_dtype_);
return false;
}
return true;
}
int BesselY1CpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &others) {
int ret = 0;
if ((ret = NativeCpuKernelMod::Resize(base_operator, inputs, outputs, others)) != 0) {
MS_LOG(WARNING) << kernel_name_ << " reinit failed.";
return ret;
}
return 0;
}
template <typename T>
bool BesselY1CpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &outputs) {
const auto *input = reinterpret_cast<T *>(inputs[0]->addr);
auto output = reinterpret_cast<T *>(outputs[0]->addr);
auto end = inputs[0]->size / sizeof(T);
auto task = std::bind(BesselY1Func<T>, input, output, 0, end);
ParallelLaunchAutoSearch(task, input_size_, this, &parallel_search_info_);
return true;
}
std::vector<KernelAttr> BesselY1CpuKernelMod::GetOpSupport() {
std::vector<KernelAttr> support_list = {
KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64),
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16)};
return support_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, BesselY1, BesselY1CpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,65 @@
/**
* Copyright 2019-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_BESSEL_Y1_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_BESSEL_Y1_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include <map>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class BesselY1CpuKernelMod : public NativeCpuKernelMod {
public:
BesselY1CpuKernelMod() = default;
~BesselY1CpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &others = std::map<uint32_t, tensor::TensorPtr>()) override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs) override {
return kernel_func_(this, inputs, outputs);
}
static double polevl(double x, const double coef[], int N);
static double p1evl(double x, const double coef[], int N);
static double y1(double x);
template <typename T>
static void BesselY1Func(const T *input, T *output, size_t start, size_t end);
protected:
std::vector<KernelAttr> GetOpSupport() override;
private:
template <typename T>
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
using BesselKernel = std::function<bool(BesselY1CpuKernelMod *, const std::vector<kernel::AddressPtr> &,
const std::vector<kernel::AddressPtr> &)>;
BesselKernel kernel_func_;
size_t input_size_;
std::vector<int64_t> input_shape_;
std::vector<int64_t> output_shape_;
TypeId input_dtype_;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_BESSEL_Y1_CPU_KERNEL_H_

View File

@ -0,0 +1,58 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/bessel_y0.h"
#include <algorithm>
#include <set>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "abstract/ops/primitive_infer_map.h"
#include "mindapi/src/helper.h"
namespace mindspore {
namespace ops {
namespace {
abstract::ShapePtr BesselY0InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->GetShapeTrack())[kShape];
return std::make_shared<abstract::Shape>(in_shape);
}
TypePtr BesselY0InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
auto x_type = input_args[kInputIndex0]->BuildType();
const std::set<TypePtr> valid_types = {kFloat16, kFloat32, kFloat64};
(void)CheckAndConvertUtils::CheckTensorTypeValid("x", x_type, valid_types, prim->name());
return x_type;
}
} // namespace
MIND_API_OPERATOR_IMPL(BesselY0, BaseOperator);
AbstractBasePtr BesselY0Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
const int64_t kInputNum = 1;
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, kInputNum,
primitive->name());
for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
auto infer_type = BesselY0InferType(primitive, input_args);
auto infer_shape = BesselY0InferShape(primitive, input_args);
return abstract::MakeAbstract(infer_shape, infer_type);
}
REGISTER_PRIMITIVE_EVAL_IMPL(BesselY0, prim::kPrimBesselY0, BesselY0Infer, nullptr, true);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,44 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_BESSEL_Y0_H_
#define MINDSPORE_CORE_OPS_BESSEL_Y0_H_
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ops/base_operator.h"
#include "mindapi/base/types.h"
namespace mindspore {
namespace ops {
constexpr auto kNameBesselY0 = "BesselY0";
/// \brief BesselY0 is used to compute bessel y0 value for input tensor.
/// \note Param x type must be float16, float32 or float64.
class MIND_API BesselY0 : public BaseOperator {
public:
MIND_API_BASE_MEMBER(BesselY0);
/// \brief Constructor.
BesselY0() : BaseOperator(kNameBesselY0) { InitIOName({"x"}, {"output"}); }
};
abstract::AbstractBasePtr BesselY0Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<abstract::AbstractBasePtr> &input_args);
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_Bessel_Y0_H_

View File

@ -0,0 +1,58 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/bessel_y1.h"
#include <algorithm>
#include <set>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "abstract/ops/primitive_infer_map.h"
#include "mindapi/src/helper.h"
namespace mindspore {
namespace ops {
namespace {
abstract::ShapePtr BesselY1InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->GetShapeTrack())[kShape];
return std::make_shared<abstract::Shape>(in_shape);
}
TypePtr BesselY1InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
auto x_type = input_args[kInputIndex0]->BuildType();
const std::set<TypePtr> valid_types = {kFloat16, kFloat32, kFloat64};
(void)CheckAndConvertUtils::CheckTensorTypeValid("x", x_type, valid_types, prim->name());
return x_type;
}
} // namespace
MIND_API_OPERATOR_IMPL(BesselY1, BaseOperator);
AbstractBasePtr BesselY1Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
const int64_t kInputNum = 1;
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, kInputNum,
primitive->name());
for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
auto infer_type = BesselY1InferType(primitive, input_args);
auto infer_shape = BesselY1InferShape(primitive, input_args);
return abstract::MakeAbstract(infer_shape, infer_type);
}
REGISTER_PRIMITIVE_EVAL_IMPL(BesselY1, prim::kPrimBesselY1, BesselY1Infer, nullptr, true);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,44 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_BESSEL_Y1_H_
#define MINDSPORE_CORE_OPS_BESSEL_Y1_H_
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ops/base_operator.h"
#include "mindapi/base/types.h"
namespace mindspore {
namespace ops {
constexpr auto kNameBesselY1 = "BesselY1";
/// \brief BesselY1 is used to compute bessel y1 value for input tensor.
/// \note Param x type must be float16, float32 or float64.
class MIND_API BesselY1 : public BaseOperator {
public:
MIND_API_BASE_MEMBER(BesselY1);
/// \brief Constructor.
BesselY1() : BaseOperator(kNameBesselY1) { InitIOName({"x"}, {"output"}); }
};
abstract::AbstractBasePtr BesselY1Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<abstract::AbstractBasePtr> &input_args);
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_Bessel_Y1_H_

View File

@ -458,6 +458,8 @@ GVAR_DEF(PrimitivePtr, kPrimBesselI0e, std::make_shared<Primitive>("BesselI0e"))
GVAR_DEF(PrimitivePtr, kPrimBesselI1e, std::make_shared<Primitive>("BesselI1e"));
GVAR_DEF(PrimitivePtr, kPrimBesselJ0, std::make_shared<Primitive>("BesselJ0"));
GVAR_DEF(PrimitivePtr, kPrimBesselJ1, std::make_shared<Primitive>("BesselJ1"));
GVAR_DEF(PrimitivePtr, kPrimBesselY0, std::make_shared<Primitive>("BesselY0"));
GVAR_DEF(PrimitivePtr, kPrimBesselY1, std::make_shared<Primitive>("BesselY1"));
GVAR_DEF(PrimitivePtr, kPrimTanhGrad, std::make_shared<Primitive>("TanhGrad"));
GVAR_DEF(PrimitivePtr, kPrimPooling, std::make_shared<Primitive>("Pooling"));
GVAR_DEF(PrimitivePtr, kPrimPoolingGrad, std::make_shared<Primitive>("PoolingGrad"));

View File

@ -303,3 +303,5 @@ get_unop_vmap_rule = vmap_rules_getters.register(P.BesselI0)(get_unop_vmap_rule)
get_unop_vmap_rule = vmap_rules_getters.register(P.BesselI0e)(get_unop_vmap_rule)
get_unop_vmap_rule = vmap_rules_getters.register(P.BesselK0)(get_unop_vmap_rule)
get_unop_vmap_rule = vmap_rules_getters.register(P.BesselK0e)(get_unop_vmap_rule)
get_unop_vmap_rule = vmap_rules_getters.register(P.BesselY0)(get_unop_vmap_rule)
get_unop_vmap_rule = vmap_rules_getters.register(P.BesselY1)(get_unop_vmap_rule)

View File

@ -34,7 +34,7 @@ from .math_func import (addn, absolute, abs, tensor_add, add, neg_tensor, neg, t
log, maximum, invert, minimum, floor, logical_not, logical_or, logical_and, sin, cos, tan,
asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh, atan2, bitwise_and, bitwise_or,
bitwise_xor, erf, erfc, cdist, bessel_i0, bessel_i0e, bessel_j0, bessel_j1, bessel_k0,
bessel_k0e)
bessel_k0e, bessel_y0, bessel_y1)
__all__ = []
__all__.extend(array_func.__all__)

View File

@ -1536,6 +1536,62 @@ def bessel_k0e(x):
return bessel_k0e_(x)
bessel_y0_ = P.BesselY0()
def bessel_y0(x):
r"""
Computes the Bessel y0 function of x element-wise.
Args:
x (Tensor): The input tensor. The data type must be float16, float32 or float64.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
Returns:
Tensor, has the same shape and dtype as the `x`.
Raises:
TypeError: If `x` is not a Tensor.
TypeError: If dtype of `x` is not float16, float32 or float64.
Supported Platforms:
``CPU``
Examples:
>>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
>>> output = ops.bessel_y0(x)
>>> print(output)
[-0.44451874 0.08825696 0.51037567 -0.01694074]
"""
return bessel_y0_(x)
bessel_y1_ = P.BesselY1()
def bessel_y1(x):
r"""
Computes the Bessel y1 function of x element-wise.
Args:
x (Tensor): The input tensor. The data type must be float16, float32 or float64.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
Returns:
Tensor, has the same shape and dtype as the `x`.
Raises:
TypeError: If `x` is not a Tensor.
TypeError: If dtype of `x` is not float16, float32 or float64.
Supported Platforms:
``CPU``
Examples:
>>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
>>> output = ops.bessel_y1(x)
>>> print(output)
[-1.47147239 -0.78121282 -0.10703243 0.39792571]
"""
return bessel_y1_(x)
#####################################
# Comparison Operation Functions.
#####################################
@ -2198,6 +2254,8 @@ __all__ = [
'bessel_i0',
'bessel_i0e',
'bessel_k0',
'bessel_k0e'
'bessel_k0e',
'bessel_y0',
'bessel_y1'
]
__all__.sort()

View File

@ -75,7 +75,7 @@ from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AccumulateNV2, AssignAdd, A
Sin, Sqrt, Rsqrt, BesselI0, BesselI1, BesselI0e, BesselI1e, TruncateDiv, TruncateMod, Addcdiv,
Addcmul, Square, Sub, TensorAdd, Add, Sign, Round, SquareSumAll, Atan, Atanh, Cosh, Sinh, Eps,
Tan, MatrixInverse, IndexAdd, Erfinv, Conj, Real, Imag, Complex, Trunc, IsClose, LuSolve,
CholeskyInverse, BesselJ0, BesselJ1, BesselK0, BesselK0e)
CholeskyInverse, BesselJ0, BesselJ1, BesselK0, BesselK0e, BesselY0, BesselY1)
from .nn_ops import (LSTM, SGD, Adam, AdamWeightDecay, FusedSparseAdam, FusedSparseLazyAdam, AdamNoUpdateParam,
ApplyMomentum, BatchNorm, BiasAdd, Conv2D, Conv3D, Conv2DTranspose, Conv3DTranspose,
DepthwiseConv2dNative,

View File

@ -4769,6 +4769,70 @@ class BesselJ1(Primitive):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
class BesselY0(Primitive):
"""
Computes BesselY0 of input element-wise.
Inputs:
- **x** (Tensor) - The shape of tensor is
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
Data type must be float16, float32 or float64.
Outputs:
Tensor, has the same shape as `x`.
Raises:
TypeError: If `x` is not a Tensor of float16, float32.
Supported Platforms:
``CPU``
Examples:
>>> bessel_y0 = ops.BesselY0()
>>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
>>> output = bessel_y0(x)
>>> print(output)
[-0.44451873 0.08825696 0.51037567 -0.01694074]
"""
@prim_attr_register
def __init__(self):
"""Initialize BesselY0"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
class BesselY1(Primitive):
"""
Computes BesselY1 of input element-wise.
Inputs:
- **x** (Tensor) - The shape of tensor is
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
Data type must be float16, float32 or float64.
Outputs:
Tensor, has the same shape as `x`.
Raises:
TypeError: If `x` is not a Tensor of float16, float32.
Supported Platforms:
``CPU``
Examples:
>>> bessel_y1 = ops.BesselY1()
>>> x = Tensor(np.array([0.5, 1., 2., 4.]), mindspore.float32)
>>> output = bessel_y1(x)
>>> print(output)
[-1.47147239 -0.78121282 -0.10703243 0.39792571]
"""
@prim_attr_register
def __init__(self):
"""Initialize BesselY1"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
class Inv(Primitive):
r"""
Computes Reciprocal of input tensor element-wise.

View File

@ -139,3 +139,43 @@ def test_bessel_k0e(dtype, eps):
output = F.bessel_k0e(x)
diff = output.asnumpy() - expect
assert np.all(diff < error)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('dtype, eps', [(np.float16, 1.0e-3), (np.float32, 1.0e-6), (np.float64, 1.0e-6)])
def test_bessel_y0(dtype, eps):
"""
Feature: bessel y0 function
Description: test cases for BesselY0
Expectation: the result matches scipy
"""
x = Tensor(np.array([0.5, 1., 2., 4.]).astype(dtype))
expect = np.array([-0.44451874, 0.08825696, 0.51037567, -0.01694074]).astype(dtype)
error = np.ones(shape=[4]) * eps
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
output = F.bessel_y0(x)
diff = output.asnumpy() - expect
assert np.all(diff < error)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('dtype, eps', [(np.float16, 1.0e-3), (np.float32, 1.0e-6), (np.float64, 1.0e-6)])
def test_bessel_y1(dtype, eps):
"""
Feature: bessel y1 function
Description: test cases for BesselY1
Expectation: the result matches scipy
"""
x = Tensor(np.array([0.5, 1., 2., 4.]).astype(dtype))
expect = np.array([-1.47147239, -0.78121282, -0.10703243, 0.39792571]).astype(dtype)
error = np.ones(shape=[4]) * eps
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
output = F.bessel_y1(x)
diff = output.asnumpy() - expect
assert np.all(diff < error)