[feat][assistant][I48O73] add new operator log1p

This commit is contained in:
zheng_pengfei 2022-04-24 12:20:31 +08:00
parent 2cf5fec6d9
commit 1c7707eb4c
7 changed files with 177 additions and 2 deletions

View File

@ -0,0 +1,75 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/log1p_cpu_kernel.h"
#include <cmath>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kLog1pInputsNum = 1;
constexpr size_t kLog1pOutputsNum = 1;
} // namespace
void Log1pCpuKernelMod::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
kernel_name_ = common::AnfAlgo::GetCNodeName(kernel_node);
input_dtype_ = AnfAlgo::GetInputDeviceDataType(kernel_node, 0);
if (input_dtype_ != kNumberTypeFloat16 && input_dtype_ != kNumberTypeFloat32 && input_dtype_ != kNumberTypeFloat64 &&
input_dtype_ != kNumberTypeComplex64 && input_dtype_ != kNumberTypeComplex128) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_
<< "', the dtype of input should be Float16, Float32, Float64, Complex64 or Complex128, but got: "
<< input_dtype_;
}
}
bool Log1pCpuKernelMod::Launch(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &,
const std::vector<kernel::AddressPtr> &outputs) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kLog1pInputsNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kLog1pOutputsNum, kernel_name_);
if (input_dtype_ == kNumberTypeFloat16) {
LaunchKernel<float16>(inputs, outputs);
} else if (input_dtype_ == kNumberTypeFloat32) {
LaunchKernel<float>(inputs, outputs);
} else if (input_dtype_ == kNumberTypeFloat64) {
LaunchKernel<double>(inputs, outputs);
} else if (input_dtype_ == kNumberTypeComplex64) {
LaunchKernel<std::complex<float>>(inputs, outputs);
} else if (input_dtype_ == kNumberTypeComplex128) {
LaunchKernel<std::complex<double>>(inputs, outputs);
} else {
MS_LOG(EXCEPTION) << "For '" << kernel_name_
<< "', the dtype of input should be Float16, Float32, Float64, Complex64 or Complex128, but got: "
<< TypeIdLabel(input_dtype_);
}
return true;
}
template <typename T>
void Log1pCpuKernelMod::LaunchKernel(const std::vector<AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &outputs) {
const auto *input = reinterpret_cast<T *>(inputs[0]->addr);
auto *output = reinterpret_cast<T *>(outputs[0]->addr);
size_t elem_num = inputs[0]->size / sizeof(T);
for (size_t i = 0; i < elem_num; i++) {
output[i] = log(input[i] + T(1));
}
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, Log1p, Log1pCpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,61 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_LOG1P_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_LOG1P_CPU_KERNEL_H_
#include <complex>
#include <vector>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
using complex64 = std::complex<float>;
using complex128 = std::complex<double>;
namespace mindspore {
namespace kernel {
class Log1pCpuKernelMod : public DeprecatedNativeCpuKernelMod {
public:
Log1pCpuKernelMod() = default;
~Log1pCpuKernelMod() override = default;
void InitKernel(const CNodePtr &kernelNode) override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
protected:
std::vector<KernelAttr> GetOpSupport() override {
static std::vector<KernelAttr> support_list = {
KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16),
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64),
KernelAttr().AddInputAttr(kNumberTypeComplex64).AddOutputAttr(kNumberTypeComplex64),
KernelAttr().AddInputAttr(kNumberTypeComplex128).AddOutputAttr(kNumberTypeComplex128)};
return support_list;
}
private:
template <typename T>
void LaunchKernel(const std::vector<AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &outputs);
private:
TypeId input_dtype_{kTypeUnknown};
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_LOG1P_CPU_KERNEL_H_

View File

@ -40,7 +40,7 @@ TypePtr Log1pInferType(const PrimitivePtr &prim, const std::vector<AbstractBaseP
MS_EXCEPTION_IF_NULL(prim);
auto prim_name = prim->name();
// check
std::set<TypePtr> valid_index_types = {kFloat16, kFloat32};
std::set<TypePtr> valid_index_types = {kFloat16, kFloat32, kFloat64, kComplex64, kComplex128};
auto x_type = input_args[0]->BuildType();
(void)CheckAndConvertUtils::CheckTensorTypeValid("x", input_args[0]->BuildType(), valid_index_types, prim_name);
return x_type;

View File

@ -41,6 +41,7 @@ from .get_next import _get_next_aicpu
from .print_tensor import _print_aicpu
from .topk import _top_k_aicpu
from .logical_xor import _logical_xor_aicpu
from .log1p import _log1p_aicpu
from .asin import _asin_aicpu
from .asin_grad import _asin_grad_aicpu
from .is_finite import _is_finite_aicpu

View File

@ -0,0 +1,34 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Log1p op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
log1p_op_info = AiCPURegOp("Log1p") \
.fusion_type("OPAQUE") \
.input(0, "x", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.F64_Default) \
.dtype_format(DataType.C64_Default, DataType.C64_Default) \
.dtype_format(DataType.C128_Default, DataType.C128_Default) \
.get_op_info()
@op_info_register(log1p_op_info)
def _log1p_aicpu():
"""Log1p AiCPU register"""
return

View File

@ -2622,7 +2622,7 @@ class Log1p(Primitive):
TypeError: If dtype of `x` is neither float16 nor float32.
Supported Platforms:
``Ascend`` ``GPU``
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)

View File

@ -1266,6 +1266,10 @@ test_case_math_ops = [
'block': P.Expm1(),
'desc_inputs': [[2, 3]],
'desc_bprop': [[2, 3]]}),
('Log1p', {
'block': P.Log1p(),
'desc_inputs': [[1, 2, 3]],
'desc_bprop': [[1, 2, 3]]}),
('Erf', {
'block': P.Erf(),
'desc_inputs': [Tensor(np.array([-2, -1, 0, 1, 2]).astype(np.float16))],