[feat] [assistant] [I48OB2] add new operator Pow

[feat] [assistant] [I48OB2] add new operator Pow

[feat] [assistant] [I48OB2] add new operator Pow

[feat] [assistant] [I48OB2] add new operator Pow

[feat] [assistant] [I48OB2] add new operator Pow

[feat] [assistant] [I48OB2] add new operator Pow

[feat] [assistant] [I48OB2] add new operator Pow

[feat] [assistant] [I48OB2] add new operator Pow

[feat] [assistant] [I48OB2] add new operator Pow

[feat] [assistant] [I48OB2] add new operator Pow

[feat] [assistant] [I48OB2] add new operator Pow

[feat] [assistant] [I48OB2] add new operator Pow

[feat] [assistant] [I48OB2] add new operator Pow

[feat] [assistant] [I48OB2] add new operator Pow
This commit is contained in:
windhxs 2021-11-02 14:11:07 +08:00
parent 4daacb6f7f
commit 2028aff643
5 changed files with 103 additions and 14 deletions

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,6 +15,9 @@
*/
#include "ops/pow.h"
#include <set>
#include <utility>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "abstract/primitive_infer_map.h"
@ -22,28 +25,63 @@
namespace mindspore {
namespace ops {
namespace {
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
abstract::ShapePtr PowInferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto op_name = primitive->name();
return BroadCastInferShape(op_name, input_args);
auto prim_name = primitive->name();
auto x1_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex0]->BuildShape());
auto x2_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape());
auto x1_shape = x1_shape_map[kShape];
auto x2_shape = x2_shape_map[kShape];
auto x1_min_shape = x1_shape_map[kMinShape];
auto x1_max_shape = x1_shape_map[kMaxShape];
auto x2_min_shape = x2_shape_map[kMinShape];
auto x2_max_shape = x2_shape_map[kMaxShape];
if (x1_shape == x2_shape) {
return std::make_shared<abstract::Shape>(x1_shape, x1_min_shape, x1_max_shape);
}
auto broadcast_shape = CalBroadCastShape(x1_shape, x2_shape, prim_name);
auto min_broadcast_shape = CalBroadCastShape(x1_min_shape, x2_min_shape, prim_name);
auto max_broadcast_shape = CalBroadCastShape(x1_max_shape, x2_max_shape, prim_name);
return std::make_shared<abstract::Shape>(broadcast_shape, min_broadcast_shape, max_broadcast_shape);
}
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
TypePtr PowInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
TypePtr x1_type = input_args[kInputIndex0]->BuildType();
TypePtr x2_type = input_args[kInputIndex1]->BuildType();
std::set<TypePtr> complex_valid_types = {kComplex64, kComplex128};
if (complex_valid_types.count(x1_type) || complex_valid_types.count(x2_type)) {
std::map<std::pair<TypePtr, TypePtr>, TypePtr> type_infer_dict;
(void)type_infer_dict.emplace(std::make_pair(kComplex64, kComplex64), kComplex64);
(void)type_infer_dict.emplace(std::make_pair(kComplex128, kComplex128), kComplex128);
(void)type_infer_dict.emplace(std::make_pair(kComplex128, kComplex64), kComplex128);
(void)type_infer_dict.emplace(std::make_pair(kComplex64, kComplex128), kComplex128);
if (!type_infer_dict.count(std::make_pair(x1_type, x2_type))) {
MS_EXCEPTION(TypeError) << "Complex math binary op expecting Tensor [complex64, complex64],"
<< "[complex64, float32], [float32, complex64], [complex128, complex128],"
<< "[complex128, float64], [float64, complex128],"
<< "but got : " << x1_type->meta_type() << "," << x2_type->meta_type();
return type_infer_dict[std::make_pair(x1_type, x2_type)];
}
}
std::map<std::string, TypePtr> types;
(void)types.emplace("x", input_args[0]->BuildType());
(void)types.emplace("y", input_args[1]->BuildType());
return CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name());
(void)types.emplace("x1", x1_type);
(void)types.emplace("x2", x2_type);
(void)CheckAndConvertUtils::CheckTensorTypeSame(types, common_valid_types, prim->name());
return x1_type;
}
} // namespace
AbstractBasePtr PowInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args),
InferShape(primitive, input_args)->shape());
auto prim_name = primitive->name();
const int64_t kInputNum = 2;
(void)CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, kInputNum, prim_name);
auto infer_type = PowInferType(primitive, input_args);
auto infer_shape = PowInferShape(primitive, input_args);
return abstract::MakeAbstract(infer_shape, infer_type);
}
REGISTER_PRIMITIVE_C(kNamePow, Pow);
REGISTER_PRIMITIVE_EVAL_IMPL(Pow, prim::kPrimPow, PowInfer, nullptr, true);
} // namespace ops
} // namespace mindspore

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -42,6 +42,7 @@ class MS_CORE_API Pow : public PrimitiveC {
};
AbstractBasePtr PowInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args);
using kPrimPowPtr = std::shared_ptr<Pow>;
} // namespace ops
} // namespace mindspore

View File

@ -233,6 +233,7 @@ from .logsoftmax import _logsoftmax_tbe
from .logsoftmax_ds import _logsoftmax_ds_tbe
from .select import _select_tbe
from .pow import _pow_tbe
from .pow_ds import _pow_ds_tbe
from .maximum import _maximum_tbe
from .minimum import _minimum_tbe
from .minimum_ds import _minimum_ds_tbe

View File

@ -0,0 +1,42 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Pow op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
pow_ds_op_info = TBERegOp("Pow") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("pow.so") \
.compute_cost(10) \
.kernel_name("pow") \
.partial_flag(True) \
.dynamic_shape(True) \
.input(0, "x1", False, "required", "all") \
.input(1, "x2", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.op_pattern("broadcast") \
.dtype_format(DataType.I8_None, DataType.I8_None, DataType.I8_None) \
.dtype_format(DataType.U8_None, DataType.U8_None, DataType.U8_None) \
.dtype_format(DataType.I32_None, DataType.I32_None, DataType.I32_None) \
.dtype_format(DataType.F16_None, DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None, DataType.F32_None) \
.get_op_info()
@op_info_register(pow_ds_op_info)
def _pow_ds_tbe():
"""Pow TBE register"""
return

View File

@ -2065,7 +2065,7 @@ class Reciprocal(PrimitiveWithInfer):
return None
class Pow(_MathBinaryOp):
class Pow(Primitive):
"""
Computes a tensor to the power of the second input.
@ -2113,6 +2113,13 @@ class Pow(_MathBinaryOp):
[ 1. 16. 64.]
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_attr_register
def __init__(self):
"""Initialize _BinaryOp"""
self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
def infer_value(self, x, power):
if x is not None and power is not None:
x = x.asnumpy()