From 7fd8fe6ce7f79e28fecef5fa2cf012cac8b006dc Mon Sep 17 00:00:00 2001 From: zheng_pengfei <18865382565@163.com> Date: Sat, 14 May 2022 17:01:50 +0800 Subject: [PATCH] [feat][assistant][I48O8Z] aicpu add new operator log --- .../cpu/kernel/arithmetic_self_cpu_kernel.cc | 97 ++++++++---- .../cpu/kernel/mkldnn/eltwise_cpu_kernel.cc | 3 +- .../cpu/kernel/mkldnn/eltwise_cpu_kernel.h | 1 - mindspore/core/ops/log.cc | 142 ++++++++++++++++-- .../mindspore/ops/_op_impl/aicpu/__init__.py | 1 + .../mindspore/ops/_op_impl/aicpu/log.py | 37 +++++ .../mindspore/ops/operations/math_ops.py | 21 +-- tests/ut/python/ops/test_ops.py | 5 + 8 files changed, 245 insertions(+), 62 deletions(-) create mode 100644 mindspore/python/mindspore/ops/_op_impl/aicpu/log.py diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/arithmetic_self_cpu_kernel.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/arithmetic_self_cpu_kernel.cc index b25716ddead..6715571717b 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/arithmetic_self_cpu_kernel.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/arithmetic_self_cpu_kernel.cc @@ -53,6 +53,7 @@ constexpr auto kAtan = "Atan"; constexpr auto kSin = "Sin"; constexpr auto kCos = "Cos"; constexpr auto kTan = "Tan"; +constexpr auto kLog = "Log"; constexpr auto kSinh = "Sinh"; constexpr auto kCosh = "Cosh"; constexpr auto kAsinh = "Asinh"; @@ -432,6 +433,26 @@ void Abs(ArithmeticSelfCpuKernelFunc *content, const T *in, T *out, size_t size) } } +template +void Log(ArithmeticSelfCpuKernelFunc *content, const T *in, T *out, size_t size) { + auto task = [&in, &out](size_t start, size_t end) { + for (size_t i = start; i < end; i++) { + out[i] = static_cast(log(static_cast(in[i]))); + } + }; + ParallelLaunchAutoSearch(task, size, content, &content->parallel_search_info_); +} + +template +void ComplexLog(ArithmeticSelfCpuKernelFunc *content, const T *in, T *out, size_t size) { + auto task = [&in, &out](size_t start, size_t end) { + for (size_t i = start; i < end; i++) { + out[i] = static_cast(log(in[i])); + } + }; + ParallelLaunchAutoSearch(task, size, content, &content->parallel_search_info_); +} + template void Sqrt(ArithmeticSelfCpuKernelFunc *content, const T *in, T *out, size_t size) { auto task = [&in, &out](size_t start, size_t end) { @@ -567,33 +588,20 @@ void ArithmeticSelfCpuKernelFunc::LaunchKernel(const std::vector &in const size_t lens = outputs[0]->size / sizeof(T); static const std::unordered_map> - arithmeticSelfFuncMap{{prim::kPrimSquare->name(), Square}, - {prim::kPrimSign->name(), Sign}, - {prim::kPrimNeg->name(), Neg}, - {prim::kPrimAtanh->name(), Atanh}, - {prim::kPrimAcosh->name(), Acosh}, - {prim::kPrimFloor->name(), Floor}, - {prim::kPrimSin->name(), Sin}, - {prim::kPrimGeLU->name(), Gelu}, - {prim::kPrimCos->name(), Cos}, - {prim::kPrimTan->name(), Tan}, - {prim::kPrimAsin->name(), Asin}, - {prim::kPrimACos->name(), ACos}, - {prim::kPrimAtan->name(), Atan}, - {prim::kPrimSinh->name(), Sinh}, - {prim::kPrimCosh->name(), Cosh}, - {prim::kPrimAsinh->name(), Asinh}, - {prim::kPrimReciprocal->name(), Reciprocal}, - {prim::kPrimInv->name(), Inv}, - {prim::kPrimInvert->name(), Invert}, - {prim::kPrimRint->name(), Rint}, - {prim::kPrimRound->name(), Round}, - {prim::kPrimAbs->name(), Abs}, - {prim::kPrimSqrt->name(), Sqrt}, - {prim::kPrimRsqrt->name(), Rsqrt}, - {prim::kPrimErf->name(), Erf}, - {prim::kPrimErfc->name(), Erfc}, - {prim::kPrimSoftsign->name(), Softsign}, + arithmeticSelfFuncMap{{prim::kPrimSquare->name(), Square}, {prim::kPrimSign->name(), Sign}, + {prim::kPrimNeg->name(), Neg}, {prim::kPrimAtanh->name(), Atanh}, + {prim::kPrimAcosh->name(), Acosh}, {prim::kPrimFloor->name(), Floor}, + {prim::kPrimSin->name(), Sin}, {prim::kPrimGeLU->name(), Gelu}, + {prim::kPrimCos->name(), Cos}, {prim::kPrimLog->name(), Log}, + {prim::kPrimTan->name(), Tan}, {prim::kPrimAsin->name(), Asin}, + {prim::kPrimACos->name(), ACos}, {prim::kPrimAtan->name(), Atan}, + {prim::kPrimSinh->name(), Sinh}, {prim::kPrimCosh->name(), Cosh}, + {prim::kPrimAsinh->name(), Asinh}, {prim::kPrimReciprocal->name(), Reciprocal}, + {prim::kPrimInv->name(), Inv}, {prim::kPrimInvert->name(), Invert}, + {prim::kPrimRint->name(), Rint}, {prim::kPrimRound->name(), Round}, + {prim::kPrimAbs->name(), Abs}, {prim::kPrimSqrt->name(), Sqrt}, + {prim::kPrimRsqrt->name(), Rsqrt}, {prim::kPrimErf->name(), Erf}, + {prim::kPrimErfc->name(), Erfc}, {prim::kPrimSoftsign->name(), Softsign}, {prim::kPrimRelu->name(), Relu}}; const auto func_pair = arithmeticSelfFuncMap.find(kernel_name_); @@ -617,7 +625,8 @@ void ArithmeticSelfCpuKernelFunc::LaunchKernelComplex(const std::vectorname(), ComplexSinh}, {prim::kPrimCosh->name(), ComplexCosh}, {prim::kPrimSin->name(), ComplexSin}, {prim::kPrimCos->name(), ComplexCos}, {prim::kPrimRsqrt->name(), Rsqrt}, {prim::kPrimTan->name(), Tan}, - {prim::kPrimAtanh->name(), Atanh}, {prim::kPrimSign->name(), ComplexSign}}; + {prim::kPrimAtanh->name(), Atanh}, {prim::kPrimSign->name(), ComplexSign}, + {prim::kPrimLog->name(), ComplexLog}}; const auto func_pair = arithmeticSelfFuncMap.find(kernel_name_); if (arithmeticSelfFuncMap.find(kernel_name_) == arithmeticSelfFuncMap.end()) { MS_LOG(EXCEPTION) << "For 'ArithmeticSelf', it does not support " << kernel_name_ << " with complex as input. "; @@ -649,6 +658,30 @@ class SqrtMKLKernelFunc : public CpuKernelFunc, private EltWiseCpuKernelMod { } }; +// MKLDNN Log +class LogMKLKernelFunc : public CpuKernelFunc, private EltWiseCpuKernelMod { + public: + LogMKLKernelFunc() : EltWiseCpuKernelMod(kLog) {} + ~LogMKLKernelFunc() override = default; + + void InitFunc(const BaseOperatorPtr &base_operator, const std::vector &inputs, + const std::vector &outputs) override { + EltWiseCpuKernelMod::Init(base_operator, inputs, outputs); + } + + int Resize(const BaseOperatorPtr &base_operator, const std::vector &inputs, + const std::vector &outputs, + const std::map &inputsOnHost) override { + // The Resize of EltWiseCpuKernelMod must be called here. + return EltWiseCpuKernelMod::Resize(base_operator, inputs, outputs, inputsOnHost); + } + + bool RunFunc(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs) override { + return EltWiseCpuKernelMod::Launch(inputs, workspace, outputs); + } +}; + std::shared_ptr CreateArithSelfFunc() { return std::make_shared(); } using ArithFuncCreator = std::function()>; static std::map>> arith_kernel_attr_list_map = { @@ -766,6 +799,12 @@ static std::map {{KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64), CreateArithSelfFunc}, {KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), []() { return std::make_shared(); }}}}, + {kLog, + {{KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64), CreateArithSelfFunc}, + {KernelAttr().AddInputAttr(kNumberTypeComplex64).AddOutputAttr(kNumberTypeComplex64), CreateArithSelfFunc}, + {KernelAttr().AddInputAttr(kNumberTypeComplex128).AddOutputAttr(kNumberTypeComplex128), CreateArithSelfFunc}, + {KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), + []() { return std::make_shared(); }}}}, {kErf, {{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), CreateArithSelfFunc}, {KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64), CreateArithSelfFunc}}}, @@ -927,6 +966,8 @@ MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Abs, []() { return std::make_shared(kAbs); }); MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Sqrt, []() { return std::make_shared(kSqrt); }); +MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Log, + []() { return std::make_shared(kLog); }); MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Erf, []() { return std::make_shared(kErf); }); MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Erfc, diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/eltwise_cpu_kernel.cc b/mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/eltwise_cpu_kernel.cc index c48076aebb7..41c1d4eb718 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/eltwise_cpu_kernel.cc +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/eltwise_cpu_kernel.cc @@ -146,7 +146,7 @@ std::mapname(), {{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), &EltWiseCpuKernelMod::LaunchKernel}}}, {kSigmoid, @@ -169,7 +169,6 @@ MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Elu, []() { return std::mak MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, ReLU6, []() { return std::make_shared(kReLU6); }); MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Exp, []() { return std::make_shared(kExp); }); -MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Log, []() { return std::make_shared(kLog); }); MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Sigmoid, []() { return std::make_shared(kSigmoid); }); MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Tanh, diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/eltwise_cpu_kernel.h b/mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/eltwise_cpu_kernel.h index e699cc47782..b82948ac24e 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/eltwise_cpu_kernel.h +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/mkldnn/eltwise_cpu_kernel.h @@ -30,7 +30,6 @@ constexpr auto kElu = "Elu"; constexpr auto kReLU = "ReLU"; constexpr auto kReLU6 = "ReLU6"; constexpr auto kExp = "Exp"; -constexpr auto kLog = "Log"; constexpr auto kSigmoid = "Sigmoid"; constexpr auto kTanh = "Tanh"; constexpr auto kSoftplus = "Softplus"; diff --git a/mindspore/core/ops/log.cc b/mindspore/core/ops/log.cc index ee4526643c6..47d8aa81f7e 100644 --- a/mindspore/core/ops/log.cc +++ b/mindspore/core/ops/log.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020-2021 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include "ops/op_utils.h" #include "utils/check_convert_utils.h" #include "abstract/ops/primitive_infer_map.h" @@ -28,21 +30,40 @@ namespace mindspore { namespace ops { namespace { +using complex64 = std::complex; +using complex128 = std::complex; + +template +void ImpleLog(void *origin, void *target, size_t size) { + MS_EXCEPTION_IF_NULL(origin); + MS_EXCEPTION_IF_NULL(target); + auto origin_data = reinterpret_cast(origin); + auto target_data = reinterpret_cast(target); + for (size_t i = 0; i < size; ++i) { + target_data[i] = static_cast(log(static_cast(origin_data[i]))); + } +} + +template +void ImpleComplexLog(void *origin, void *target, size_t size) { + MS_EXCEPTION_IF_NULL(origin); + MS_EXCEPTION_IF_NULL(target); + auto origin_data = reinterpret_cast(origin); + auto target_data = reinterpret_cast(target); + for (size_t i = 0; i < size; ++i) { + target_data[i] = static_cast(log(origin_data[i])); + } +} + abstract::ShapePtr LogInferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); auto prim_name = primitive->name(); - (void)CheckAndConvertUtils::CheckInteger("input numbers", int64_t(input_args.size()), kEqual, 1, prim_name); - for (const auto &item : input_args) { - MS_EXCEPTION_IF_NULL(item); - } - auto shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape()); - auto in_shape = shape_map[kShape]; - auto min_shape = shape_map[kMinShape]; - auto max_shape = shape_map[kMaxShape]; - if (min_shape.size() != 0 && max_shape.size() != 0) { - return std::make_shared(in_shape, min_shape, max_shape); - } - return std::make_shared(in_shape); + (void)CheckAndConvertUtils::CheckArgs(prim_name, input_args, 0); + auto x = input_args[kInputIndex0]->BuildShape(); + MS_EXCEPTION_IF_NULL(x); + auto shape_element = x->cast(); + MS_EXCEPTION_IF_NULL(shape_element); + return shape_element; } TypePtr LogInferType(const PrimitivePtr &prim, const std::vector &input_args) { @@ -56,7 +77,98 @@ TypePtr LogInferType(const PrimitivePtr &prim, const std::vectorBuildType()); std::set valid_params_types = {kTensorType}; (void)CheckAndConvertUtils::CheckSubClass("x_type", input_args[0]->BuildType(), valid_params_types, op_name); - return CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name()); + (void)CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types_with_complex, prim->name()); + return input_args[0]->BuildType(); +} +ValuePtr LogInferValue(const PrimitivePtr &prim, const std::vector &input_args) { + MS_EXCEPTION_IF_NULL(prim); + if (input_args.empty()) { + return nullptr; + } + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } + auto x = input_args[kInputIndex0]->BuildValue(); + if (x == nullptr) { + return nullptr; + } + MS_EXCEPTION_IF_NULL(x); + auto x_tensor = x->cast(); + if (x_tensor == nullptr) { + return nullptr; + } + MS_EXCEPTION_IF_NULL(x_tensor); + auto data_size = x_tensor->DataSize(); + auto dtype = x_tensor->data_type(); + auto infer_shape = LogInferShape(prim, input_args); + MS_EXCEPTION_IF_NULL(infer_shape); + auto shape = infer_shape->shape(); + auto result_tensor = std::make_shared(dtype, shape); // same shape and dtype + auto x_datac = x_tensor->data_c(); + MS_EXCEPTION_IF_NULL(result_tensor); + auto result_datac = result_tensor->data_c(); + switch (dtype) { + case kNumberTypeInt8: { + ImpleLog(x_datac, result_datac, data_size); + break; + } + case kNumberTypeInt16: { + ImpleLog(x_datac, result_datac, data_size); + break; + } + case kNumberTypeInt32: { + ImpleLog(x_datac, result_datac, data_size); + break; + } + case kNumberTypeInt64: { + ImpleLog(x_datac, result_datac, data_size); + break; + } + case kNumberTypeUInt8: { + ImpleLog(x_datac, result_datac, data_size); + break; + } + case kNumberTypeUInt16: { + ImpleLog(x_datac, result_datac, data_size); + break; + } + case kNumberTypeUInt32: { + ImpleLog(x_datac, result_datac, data_size); + break; + } + case kNumberTypeUInt64: { + ImpleLog(x_datac, result_datac, data_size); + break; + } + case kNumberTypeFloat16: { + ImpleLog(x_datac, result_datac, data_size); + break; + } + case kNumberTypeFloat32: { + ImpleLog(x_datac, result_datac, data_size); + break; + } + case kNumberTypeFloat64: { + ImpleLog(x_datac, result_datac, data_size); + break; + } + case kNumberTypeComplex64: { + ImpleComplexLog>(x_datac, result_datac, data_size); + break; + } + case kNumberTypeComplex128: { + ImpleComplexLog>(x_datac, result_datac, data_size); + break; + } + default: { + MS_EXCEPTION(TypeError) + << "For '" << prim->name() + << "', the supported data type is ['int8', 'int16', 'int32', 'int64', 'uint8', " + "'uint16','uint32', 'uint64','float16', 'float32', 'float64', 'complex64', 'complex128'], but got " + << x_tensor->ToString(); + } + } + return result_tensor; } } // namespace @@ -65,6 +177,6 @@ AbstractBasePtr LogInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr const std::vector &input_args) { return abstract::MakeAbstract(LogInferShape(primitive, input_args), LogInferType(primitive, input_args)); } -REGISTER_PRIMITIVE_C(kNameLog, Log); +REGISTER_PRIMITIVE_EVAL_IMPL(Log, prim::kPrimLog, LogInfer, LogInferValue, true); } // namespace ops } // namespace mindspore diff --git a/mindspore/python/mindspore/ops/_op_impl/aicpu/__init__.py b/mindspore/python/mindspore/ops/_op_impl/aicpu/__init__.py index 13328da027b..61aaf7f7b65 100644 --- a/mindspore/python/mindspore/ops/_op_impl/aicpu/__init__.py +++ b/mindspore/python/mindspore/ops/_op_impl/aicpu/__init__.py @@ -25,6 +25,7 @@ from .blackman_window import _blackman_window_aicpu from .no_repeat_ngram import _no_repeat_ngram_aicpu from .init_data_set_queue import _init_data_set_queue_aicpu from .embedding_lookup import _embedding_lookup_aicpu +from .log import _log_aicpu from .padding import _padding_aicpu from .gather import _gather_aicpu from .gather_grad import _gather_grad_aicpu diff --git a/mindspore/python/mindspore/ops/_op_impl/aicpu/log.py b/mindspore/python/mindspore/ops/_op_impl/aicpu/log.py new file mode 100644 index 00000000000..b64664b61e5 --- /dev/null +++ b/mindspore/python/mindspore/ops/_op_impl/aicpu/log.py @@ -0,0 +1,37 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Log op""" +from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType + +log_op_info = AiCPURegOp("Log") \ + .fusion_type("OPAQUE") \ + .input(0, "x", "required") \ + .output(0, "y", "required") \ + .attr("base", "float") \ + .attr("scale", "float") \ + .attr("shift", "float") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F64_Default, DataType.F64_Default) \ + .dtype_format(DataType.C64_Default, DataType.C64_Default) \ + .dtype_format(DataType.C128_Default, DataType.C128_Default) \ + .get_op_info() + + +@op_info_register(log_op_info) +def _log_aicpu(): + """Log AiCPU register""" + return diff --git a/mindspore/python/mindspore/ops/operations/math_ops.py b/mindspore/python/mindspore/ops/operations/math_ops.py index 530d7389096..d70a88b9299 100644 --- a/mindspore/python/mindspore/ops/operations/math_ops.py +++ b/mindspore/python/mindspore/ops/operations/math_ops.py @@ -2413,7 +2413,7 @@ class HistogramFixedWidth(PrimitiveWithInfer): return y_dtype -class Log(PrimitiveWithInfer): +class Log(Primitive): """ Returns the natural logarithm of a tensor element-wise. @@ -2434,21 +2434,10 @@ class Log(PrimitiveWithInfer): def __init__(self): """Initialize Log.""" self.init_prim_io_names(inputs=['x'], outputs=['y']) - - def infer_shape(self, x): - return x - - def infer_dtype(self, x): - validator.check_subclass("x", x, mstype.tensor, self.name) - return x - - def infer_value(self, x): - if x is not None: - x = x.asnumpy() - out = np.log(x) - out = np.array(out, x.dtype) - return Tensor(out) - return None + self.add_prim_attr("cust_aicpu", self.name) + self.add_prim_attr('base', -1.0) + self.add_prim_attr('scale', 1.0) + self.add_prim_attr('shift', 0.0) class Log1p(Primitive): diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index ab1796ecd96..52a573109db 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -1739,6 +1739,11 @@ test_case_math_ops = [ 'block': P.GreaterEqual(), 'desc_inputs': [[2, 3, 4, 1], [4, 5]], 'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))]}), + ('Log', { + 'block': P.Log(), + 'desc_inputs': [Tensor(np.array([1.0, 2.0, 4.0], np.float32))], + 'desc_bprop': [Tensor(np.array([1.0, 2.0, 4.0], np.float32))], + 'skip': ['backward']}), ('LogicalNot', { 'block': P.LogicalNot(), 'desc_inputs': [Tensor(np.zeros((3, 4, 5), np.bool_))],