forked from mindspore-Ecosystem/mindspore
!33167 [feat][assistant][I48O8Z] aicpu add new operator log
Merge pull request !33167 from 郑鹏飞/log
This commit is contained in:
commit
e5c2dcbabb
|
@ -53,6 +53,7 @@ constexpr auto kAtan = "Atan";
|
|||
constexpr auto kSin = "Sin";
|
||||
constexpr auto kCos = "Cos";
|
||||
constexpr auto kTan = "Tan";
|
||||
constexpr auto kLog = "Log";
|
||||
constexpr auto kSinh = "Sinh";
|
||||
constexpr auto kCosh = "Cosh";
|
||||
constexpr auto kAsinh = "Asinh";
|
||||
|
@ -432,6 +433,26 @@ void Abs(ArithmeticSelfCpuKernelFunc *content, const T *in, T *out, size_t size)
|
|||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void Log(ArithmeticSelfCpuKernelFunc *content, const T *in, T *out, size_t size) {
|
||||
auto task = [&in, &out](size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
out[i] = static_cast<T>(log(static_cast<double>(in[i])));
|
||||
}
|
||||
};
|
||||
ParallelLaunchAutoSearch(task, size, content, &content->parallel_search_info_);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void ComplexLog(ArithmeticSelfCpuKernelFunc *content, const T *in, T *out, size_t size) {
|
||||
auto task = [&in, &out](size_t start, size_t end) {
|
||||
for (size_t i = start; i < end; i++) {
|
||||
out[i] = static_cast<T>(log(in[i]));
|
||||
}
|
||||
};
|
||||
ParallelLaunchAutoSearch(task, size, content, &content->parallel_search_info_);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void Sqrt(ArithmeticSelfCpuKernelFunc *content, const T *in, T *out, size_t size) {
|
||||
auto task = [&in, &out](size_t start, size_t end) {
|
||||
|
@ -567,33 +588,20 @@ void ArithmeticSelfCpuKernelFunc::LaunchKernel(const std::vector<AddressPtr> &in
|
|||
const size_t lens = outputs[0]->size / sizeof(T);
|
||||
static const std::unordered_map<std::string,
|
||||
std::function<void(ArithmeticSelfCpuKernelFunc *, const T *, T *, size_t)>>
|
||||
arithmeticSelfFuncMap{{prim::kPrimSquare->name(), Square<T>},
|
||||
{prim::kPrimSign->name(), Sign<T>},
|
||||
{prim::kPrimNeg->name(), Neg<T>},
|
||||
{prim::kPrimAtanh->name(), Atanh<T>},
|
||||
{prim::kPrimAcosh->name(), Acosh<T>},
|
||||
{prim::kPrimFloor->name(), Floor<T>},
|
||||
{prim::kPrimSin->name(), Sin<T>},
|
||||
{prim::kPrimGeLU->name(), Gelu<T>},
|
||||
{prim::kPrimCos->name(), Cos<T>},
|
||||
{prim::kPrimTan->name(), Tan<T>},
|
||||
{prim::kPrimAsin->name(), Asin<T>},
|
||||
{prim::kPrimACos->name(), ACos<T>},
|
||||
{prim::kPrimAtan->name(), Atan<T>},
|
||||
{prim::kPrimSinh->name(), Sinh<T>},
|
||||
{prim::kPrimCosh->name(), Cosh<T>},
|
||||
{prim::kPrimAsinh->name(), Asinh<T>},
|
||||
{prim::kPrimReciprocal->name(), Reciprocal<T>},
|
||||
{prim::kPrimInv->name(), Inv<T>},
|
||||
{prim::kPrimInvert->name(), Invert<T>},
|
||||
{prim::kPrimRint->name(), Rint<T>},
|
||||
{prim::kPrimRound->name(), Round<T>},
|
||||
{prim::kPrimAbs->name(), Abs<T>},
|
||||
{prim::kPrimSqrt->name(), Sqrt<T>},
|
||||
{prim::kPrimRsqrt->name(), Rsqrt<T>},
|
||||
{prim::kPrimErf->name(), Erf<T>},
|
||||
{prim::kPrimErfc->name(), Erfc<T>},
|
||||
{prim::kPrimSoftsign->name(), Softsign<T>},
|
||||
arithmeticSelfFuncMap{{prim::kPrimSquare->name(), Square<T>}, {prim::kPrimSign->name(), Sign<T>},
|
||||
{prim::kPrimNeg->name(), Neg<T>}, {prim::kPrimAtanh->name(), Atanh<T>},
|
||||
{prim::kPrimAcosh->name(), Acosh<T>}, {prim::kPrimFloor->name(), Floor<T>},
|
||||
{prim::kPrimSin->name(), Sin<T>}, {prim::kPrimGeLU->name(), Gelu<T>},
|
||||
{prim::kPrimCos->name(), Cos<T>}, {prim::kPrimLog->name(), Log<T>},
|
||||
{prim::kPrimTan->name(), Tan<T>}, {prim::kPrimAsin->name(), Asin<T>},
|
||||
{prim::kPrimACos->name(), ACos<T>}, {prim::kPrimAtan->name(), Atan<T>},
|
||||
{prim::kPrimSinh->name(), Sinh<T>}, {prim::kPrimCosh->name(), Cosh<T>},
|
||||
{prim::kPrimAsinh->name(), Asinh<T>}, {prim::kPrimReciprocal->name(), Reciprocal<T>},
|
||||
{prim::kPrimInv->name(), Inv<T>}, {prim::kPrimInvert->name(), Invert<T>},
|
||||
{prim::kPrimRint->name(), Rint<T>}, {prim::kPrimRound->name(), Round<T>},
|
||||
{prim::kPrimAbs->name(), Abs<T>}, {prim::kPrimSqrt->name(), Sqrt<T>},
|
||||
{prim::kPrimRsqrt->name(), Rsqrt<T>}, {prim::kPrimErf->name(), Erf<T>},
|
||||
{prim::kPrimErfc->name(), Erfc<T>}, {prim::kPrimSoftsign->name(), Softsign<T>},
|
||||
{prim::kPrimRelu->name(), Relu<T>}};
|
||||
|
||||
const auto func_pair = arithmeticSelfFuncMap.find(kernel_name_);
|
||||
|
@ -617,7 +625,8 @@ void ArithmeticSelfCpuKernelFunc::LaunchKernelComplex(const std::vector<AddressP
|
|||
{prim::kPrimSinh->name(), ComplexSinh<T>}, {prim::kPrimCosh->name(), ComplexCosh<T>},
|
||||
{prim::kPrimSin->name(), ComplexSin<T>}, {prim::kPrimCos->name(), ComplexCos<T>},
|
||||
{prim::kPrimRsqrt->name(), Rsqrt<T>}, {prim::kPrimTan->name(), Tan<T>},
|
||||
{prim::kPrimAtanh->name(), Atanh<T>}, {prim::kPrimSign->name(), ComplexSign<T>}};
|
||||
{prim::kPrimAtanh->name(), Atanh<T>}, {prim::kPrimSign->name(), ComplexSign<T>},
|
||||
{prim::kPrimLog->name(), ComplexLog<T>}};
|
||||
const auto func_pair = arithmeticSelfFuncMap.find(kernel_name_);
|
||||
if (arithmeticSelfFuncMap.find(kernel_name_) == arithmeticSelfFuncMap.end()) {
|
||||
MS_LOG(EXCEPTION) << "For 'ArithmeticSelf', it does not support " << kernel_name_ << " with complex as input. ";
|
||||
|
@ -649,6 +658,30 @@ class SqrtMKLKernelFunc : public CpuKernelFunc, private EltWiseCpuKernelMod {
|
|||
}
|
||||
};
|
||||
|
||||
// MKLDNN Log
|
||||
class LogMKLKernelFunc : public CpuKernelFunc, private EltWiseCpuKernelMod {
|
||||
public:
|
||||
LogMKLKernelFunc() : EltWiseCpuKernelMod(kLog) {}
|
||||
~LogMKLKernelFunc() override = default;
|
||||
|
||||
void InitFunc(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) override {
|
||||
EltWiseCpuKernelMod::Init(base_operator, inputs, outputs);
|
||||
}
|
||||
|
||||
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs,
|
||||
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) override {
|
||||
// The Resize of EltWiseCpuKernelMod must be called here.
|
||||
return EltWiseCpuKernelMod::Resize(base_operator, inputs, outputs, inputsOnHost);
|
||||
}
|
||||
|
||||
bool RunFunc(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
|
||||
const std::vector<AddressPtr> &outputs) override {
|
||||
return EltWiseCpuKernelMod::Launch(inputs, workspace, outputs);
|
||||
}
|
||||
};
|
||||
|
||||
std::shared_ptr<CpuKernelFunc> CreateArithSelfFunc() { return std::make_shared<ArithmeticSelfCpuKernelFunc>(); }
|
||||
using ArithFuncCreator = std::function<std::shared_ptr<CpuKernelFunc>()>;
|
||||
static std::map<std::string, std::vector<std::pair<KernelAttr, ArithFuncCreator>>> arith_kernel_attr_list_map = {
|
||||
|
@ -766,6 +799,12 @@ static std::map<std::string, std::vector<std::pair<KernelAttr, ArithFuncCreator>
|
|||
{{KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64), CreateArithSelfFunc},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
[]() { return std::make_shared<SqrtMKLKernelFunc>(); }}}},
|
||||
{kLog,
|
||||
{{KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64), CreateArithSelfFunc},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeComplex64).AddOutputAttr(kNumberTypeComplex64), CreateArithSelfFunc},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeComplex128).AddOutputAttr(kNumberTypeComplex128), CreateArithSelfFunc},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
[]() { return std::make_shared<LogMKLKernelFunc>(); }}}},
|
||||
{kErf,
|
||||
{{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), CreateArithSelfFunc},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64), CreateArithSelfFunc}}},
|
||||
|
@ -927,6 +966,8 @@ MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Abs,
|
|||
[]() { return std::make_shared<ArithmeticSelfCpuKernelMod>(kAbs); });
|
||||
MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Sqrt,
|
||||
[]() { return std::make_shared<ArithmeticSelfCpuKernelMod>(kSqrt); });
|
||||
MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Log,
|
||||
[]() { return std::make_shared<ArithmeticSelfCpuKernelMod>(kLog); });
|
||||
MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Erf,
|
||||
[]() { return std::make_shared<ArithmeticSelfCpuKernelMod>(kErf); });
|
||||
MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Erfc,
|
||||
|
|
|
@ -146,7 +146,7 @@ std::map<std::string, std::vector<std::pair<KernelAttr, EltWiseCpuKernelMod::Elt
|
|||
{kExp,
|
||||
{{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
&EltWiseCpuKernelMod::LaunchKernel}}},
|
||||
{kLog,
|
||||
{prim::kPrimLog->name(),
|
||||
{{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
&EltWiseCpuKernelMod::LaunchKernel}}},
|
||||
{kSigmoid,
|
||||
|
@ -169,7 +169,6 @@ MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Elu, []() { return std::mak
|
|||
MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, ReLU6,
|
||||
[]() { return std::make_shared<EltWiseCpuKernelMod>(kReLU6); });
|
||||
MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Exp, []() { return std::make_shared<EltWiseCpuKernelMod>(kExp); });
|
||||
MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Log, []() { return std::make_shared<EltWiseCpuKernelMod>(kLog); });
|
||||
MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Sigmoid,
|
||||
[]() { return std::make_shared<EltWiseCpuKernelMod>(kSigmoid); });
|
||||
MS_KERNEL_FACTORY_REG_BY_CREATOR(NativeCpuKernelMod, Tanh,
|
||||
|
|
|
@ -30,7 +30,6 @@ constexpr auto kElu = "Elu";
|
|||
constexpr auto kReLU = "ReLU";
|
||||
constexpr auto kReLU6 = "ReLU6";
|
||||
constexpr auto kExp = "Exp";
|
||||
constexpr auto kLog = "Log";
|
||||
constexpr auto kSigmoid = "Sigmoid";
|
||||
constexpr auto kTanh = "Tanh";
|
||||
constexpr auto kSoftplus = "Softplus";
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2020-2021 Huawei Technologies Co., Ltd
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -20,6 +20,8 @@
|
|||
#include <set>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <complex>
|
||||
#include <cmath>
|
||||
#include "ops/op_utils.h"
|
||||
#include "utils/check_convert_utils.h"
|
||||
#include "abstract/ops/primitive_infer_map.h"
|
||||
|
@ -28,21 +30,40 @@
|
|||
namespace mindspore {
|
||||
namespace ops {
|
||||
namespace {
|
||||
using complex64 = std::complex<float>;
|
||||
using complex128 = std::complex<double>;
|
||||
|
||||
template <typename T>
|
||||
void ImpleLog(void *origin, void *target, size_t size) {
|
||||
MS_EXCEPTION_IF_NULL(origin);
|
||||
MS_EXCEPTION_IF_NULL(target);
|
||||
auto origin_data = reinterpret_cast<T *>(origin);
|
||||
auto target_data = reinterpret_cast<T *>(target);
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
target_data[i] = static_cast<T>(log(static_cast<double>(origin_data[i])));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void ImpleComplexLog(void *origin, void *target, size_t size) {
|
||||
MS_EXCEPTION_IF_NULL(origin);
|
||||
MS_EXCEPTION_IF_NULL(target);
|
||||
auto origin_data = reinterpret_cast<T *>(origin);
|
||||
auto target_data = reinterpret_cast<T *>(target);
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
target_data[i] = static_cast<T>(log(origin_data[i]));
|
||||
}
|
||||
}
|
||||
|
||||
abstract::ShapePtr LogInferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", int64_t(input_args.size()), kEqual, 1, prim_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
auto shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape());
|
||||
auto in_shape = shape_map[kShape];
|
||||
auto min_shape = shape_map[kMinShape];
|
||||
auto max_shape = shape_map[kMaxShape];
|
||||
if (min_shape.size() != 0 && max_shape.size() != 0) {
|
||||
return std::make_shared<abstract::Shape>(in_shape, min_shape, max_shape);
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(in_shape);
|
||||
(void)CheckAndConvertUtils::CheckArgs<abstract::AbstractTensor>(prim_name, input_args, 0);
|
||||
auto x = input_args[kInputIndex0]->BuildShape();
|
||||
MS_EXCEPTION_IF_NULL(x);
|
||||
auto shape_element = x->cast<abstract::ShapePtr>();
|
||||
MS_EXCEPTION_IF_NULL(shape_element);
|
||||
return shape_element;
|
||||
}
|
||||
|
||||
TypePtr LogInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
|
@ -56,7 +77,98 @@ TypePtr LogInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr
|
|||
(void)types.emplace("x", input_args[0]->BuildType());
|
||||
std::set<TypePtr> valid_params_types = {kTensorType};
|
||||
(void)CheckAndConvertUtils::CheckSubClass("x_type", input_args[0]->BuildType(), valid_params_types, op_name);
|
||||
return CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name());
|
||||
(void)CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types_with_complex, prim->name());
|
||||
return input_args[0]->BuildType();
|
||||
}
|
||||
ValuePtr LogInferValue(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(prim);
|
||||
if (input_args.empty()) {
|
||||
return nullptr;
|
||||
}
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
auto x = input_args[kInputIndex0]->BuildValue();
|
||||
if (x == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
MS_EXCEPTION_IF_NULL(x);
|
||||
auto x_tensor = x->cast<tensor::TensorPtr>();
|
||||
if (x_tensor == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
MS_EXCEPTION_IF_NULL(x_tensor);
|
||||
auto data_size = x_tensor->DataSize();
|
||||
auto dtype = x_tensor->data_type();
|
||||
auto infer_shape = LogInferShape(prim, input_args);
|
||||
MS_EXCEPTION_IF_NULL(infer_shape);
|
||||
auto shape = infer_shape->shape();
|
||||
auto result_tensor = std::make_shared<tensor::Tensor>(dtype, shape); // same shape and dtype
|
||||
auto x_datac = x_tensor->data_c();
|
||||
MS_EXCEPTION_IF_NULL(result_tensor);
|
||||
auto result_datac = result_tensor->data_c();
|
||||
switch (dtype) {
|
||||
case kNumberTypeInt8: {
|
||||
ImpleLog<int8_t>(x_datac, result_datac, data_size);
|
||||
break;
|
||||
}
|
||||
case kNumberTypeInt16: {
|
||||
ImpleLog<int16_t>(x_datac, result_datac, data_size);
|
||||
break;
|
||||
}
|
||||
case kNumberTypeInt32: {
|
||||
ImpleLog<int32_t>(x_datac, result_datac, data_size);
|
||||
break;
|
||||
}
|
||||
case kNumberTypeInt64: {
|
||||
ImpleLog<int64_t>(x_datac, result_datac, data_size);
|
||||
break;
|
||||
}
|
||||
case kNumberTypeUInt8: {
|
||||
ImpleLog<uint8_t>(x_datac, result_datac, data_size);
|
||||
break;
|
||||
}
|
||||
case kNumberTypeUInt16: {
|
||||
ImpleLog<uint16_t>(x_datac, result_datac, data_size);
|
||||
break;
|
||||
}
|
||||
case kNumberTypeUInt32: {
|
||||
ImpleLog<uint32_t>(x_datac, result_datac, data_size);
|
||||
break;
|
||||
}
|
||||
case kNumberTypeUInt64: {
|
||||
ImpleLog<uint64_t>(x_datac, result_datac, data_size);
|
||||
break;
|
||||
}
|
||||
case kNumberTypeFloat16: {
|
||||
ImpleLog<float16>(x_datac, result_datac, data_size);
|
||||
break;
|
||||
}
|
||||
case kNumberTypeFloat32: {
|
||||
ImpleLog<float>(x_datac, result_datac, data_size);
|
||||
break;
|
||||
}
|
||||
case kNumberTypeFloat64: {
|
||||
ImpleLog<double>(x_datac, result_datac, data_size);
|
||||
break;
|
||||
}
|
||||
case kNumberTypeComplex64: {
|
||||
ImpleComplexLog<std::complex<float>>(x_datac, result_datac, data_size);
|
||||
break;
|
||||
}
|
||||
case kNumberTypeComplex128: {
|
||||
ImpleComplexLog<std::complex<double>>(x_datac, result_datac, data_size);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
MS_EXCEPTION(TypeError)
|
||||
<< "For '" << prim->name()
|
||||
<< "', the supported data type is ['int8', 'int16', 'int32', 'int64', 'uint8', "
|
||||
"'uint16','uint32', 'uint64','float16', 'float32', 'float64', 'complex64', 'complex128'], but got "
|
||||
<< x_tensor->ToString();
|
||||
}
|
||||
}
|
||||
return result_tensor;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
|
@ -65,6 +177,6 @@ AbstractBasePtr LogInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr
|
|||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
return abstract::MakeAbstract(LogInferShape(primitive, input_args), LogInferType(primitive, input_args));
|
||||
}
|
||||
REGISTER_PRIMITIVE_C(kNameLog, Log);
|
||||
REGISTER_PRIMITIVE_EVAL_IMPL(Log, prim::kPrimLog, LogInfer, LogInferValue, true);
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -27,6 +27,7 @@ from .blackman_window import _blackman_window_aicpu
|
|||
from .no_repeat_ngram import _no_repeat_ngram_aicpu
|
||||
from .init_data_set_queue import _init_data_set_queue_aicpu
|
||||
from .embedding_lookup import _embedding_lookup_aicpu
|
||||
from .log import _log_aicpu
|
||||
from .padding import _padding_aicpu
|
||||
from .gather import _gather_aicpu
|
||||
from .gather_grad import _gather_grad_aicpu
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Log op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
||||
|
||||
log_op_info = AiCPURegOp("Log") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.input(0, "x", "required") \
|
||||
.output(0, "y", "required") \
|
||||
.attr("base", "float") \
|
||||
.attr("scale", "float") \
|
||||
.attr("shift", "float") \
|
||||
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
|
||||
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
|
||||
.dtype_format(DataType.F64_Default, DataType.F64_Default) \
|
||||
.dtype_format(DataType.C64_Default, DataType.C64_Default) \
|
||||
.dtype_format(DataType.C128_Default, DataType.C128_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(log_op_info)
|
||||
def _log_aicpu():
|
||||
"""Log AiCPU register"""
|
||||
return
|
|
@ -2481,7 +2481,7 @@ class HistogramFixedWidth(PrimitiveWithInfer):
|
|||
return y_dtype
|
||||
|
||||
|
||||
class Log(PrimitiveWithInfer):
|
||||
class Log(Primitive):
|
||||
"""
|
||||
Returns the natural logarithm of a tensor element-wise.
|
||||
|
||||
|
@ -2502,21 +2502,10 @@ class Log(PrimitiveWithInfer):
|
|||
def __init__(self):
|
||||
"""Initialize Log."""
|
||||
self.init_prim_io_names(inputs=['x'], outputs=['y'])
|
||||
|
||||
def infer_shape(self, x):
|
||||
return x
|
||||
|
||||
def infer_dtype(self, x):
|
||||
validator.check_subclass("x", x, mstype.tensor, self.name)
|
||||
return x
|
||||
|
||||
def infer_value(self, x):
|
||||
if x is not None:
|
||||
x = x.asnumpy()
|
||||
out = np.log(x)
|
||||
out = np.array(out, x.dtype)
|
||||
return Tensor(out)
|
||||
return None
|
||||
self.add_prim_attr("cust_aicpu", self.name)
|
||||
self.add_prim_attr('base', -1.0)
|
||||
self.add_prim_attr('scale', 1.0)
|
||||
self.add_prim_attr('shift', 0.0)
|
||||
|
||||
|
||||
class Log1p(Primitive):
|
||||
|
|
|
@ -1793,6 +1793,11 @@ test_case_math_ops = [
|
|||
'block': P.GreaterEqual(),
|
||||
'desc_inputs': [[2, 3, 4, 1], [4, 5]],
|
||||
'desc_bprop': [Tensor(np.ones((2, 3, 4, 5), np.bool_))]}),
|
||||
('Log', {
|
||||
'block': P.Log(),
|
||||
'desc_inputs': [Tensor(np.array([1.0, 2.0, 4.0], np.float32))],
|
||||
'desc_bprop': [Tensor(np.array([1.0, 2.0, 4.0], np.float32))],
|
||||
'skip': ['backward']}),
|
||||
('LogicalNot', {
|
||||
'block': P.LogicalNot(),
|
||||
'desc_inputs': [Tensor(np.zeros((3, 4, 5), np.bool_))],
|
||||
|
|
Loading…
Reference in New Issue