forked from mindspore-Ecosystem/mindspore
[feat] [assistant] [I3T927] add new math operator Lerp
This commit is contained in:
parent
ed6bc4d113
commit
bf23333b27
|
@ -505,6 +505,7 @@ inline const PrimitivePtr kPrimIdentityMath = std::make_shared<Primitive>("Ident
|
|||
inline const PrimitivePtr kPrimIsNan = std::make_shared<Primitive>("IsNan");
|
||||
inline const PrimitivePtr kPrimIsInf = std::make_shared<Primitive>("IsInf");
|
||||
inline const PrimitivePtr kPrimIsFinite = std::make_shared<Primitive>("IsFinite");
|
||||
inline const PrimitivePtr kPrimLerp = std::make_shared<Primitive>("Lerp");
|
||||
inline const PrimitivePtr kPrimSquareSumAll = std::make_shared<Primitive>("SquareSumAll");
|
||||
|
||||
// Statements
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <algorithm>
|
||||
#include "ops/lerp.h"
|
||||
#include "ops/op_utils.h"
|
||||
#include "utils/check_convert_utils.h"
|
||||
#include "abstract/primitive_infer_map.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
namespace {
|
||||
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto op_name = primitive->name();
|
||||
CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 3, op_name);
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
auto start_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape());
|
||||
auto start_shape = start_shape_map[kShape];
|
||||
auto end_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape());
|
||||
auto end_shape = end_shape_map[kShape];
|
||||
auto weight_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[2]->BuildShape());
|
||||
auto weight_shape = weight_shape_map[kShape];
|
||||
auto broadcast_shape = CalBroadCastShape(start_shape, end_shape, op_name, "start", "end");
|
||||
if (input_args[2]->isa<abstract::AbstractTensor>()) {
|
||||
CalBroadCastShape(start_shape, weight_shape, op_name, "start", "weight");
|
||||
CalBroadCastShape(end_shape, weight_shape, op_name, "end", "weight");
|
||||
broadcast_shape = CalBroadCastShape(broadcast_shape, weight_shape, op_name);
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(broadcast_shape);
|
||||
}
|
||||
|
||||
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
for (const auto &item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
auto op_name = prim->name();
|
||||
CheckAndConvertUtils::CheckInteger("input numbers", input_args.size(), kEqual, 3, op_name);
|
||||
std::map<std::string, TypePtr> types;
|
||||
types.emplace("start", input_args[0]->BuildType());
|
||||
types.emplace("end", input_args[1]->BuildType());
|
||||
if (input_args[2]->isa<abstract::AbstractTensor>()) {
|
||||
types.emplace("weight", input_args[2]->BuildType());
|
||||
} else {
|
||||
CheckAndConvertUtils::CheckSubClass("weight", input_args[2]->BuildType(), {kFloat}, op_name);
|
||||
}
|
||||
return CheckAndConvertUtils::CheckTensorTypeSame(types, {kFloat16, kFloat32}, op_name);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
AbstractBasePtr LerpInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args),
|
||||
InferShape(primitive, input_args)->shape());
|
||||
}
|
||||
REGISTER_PRIMITIVE_EVAL_IMPL(Lerp, prim::kPrimLerp, LerpInfer, nullptr, true);
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,42 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CORE_OPS_LERP_H_
|
||||
#define MINDSPORE_CORE_OPS_LERP_H_
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
|
||||
#include "ops/primitive_c.h"
|
||||
#include "ops/op_utils.h"
|
||||
#include "abstract/abstract_value.h"
|
||||
#include "utils/check_convert_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
constexpr auto kNameLerp = "Lerp";
|
||||
class Lerp : public PrimitiveC {
|
||||
public:
|
||||
Lerp() : PrimitiveC(kNameLerp) { InitIOName({"start", "end", "weight"}, {"output"}); }
|
||||
~Lerp() = default;
|
||||
MS_DECLARE_PARENT(Lerp, PrimitiveC);
|
||||
};
|
||||
|
||||
AbstractBasePtr LerpInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args);
|
||||
using PrimLerpPtr = std::shared_ptr<Lerp>;
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CORE_OPS_LERP_H_
|
|
@ -26,7 +26,8 @@
|
|||
namespace mindspore {
|
||||
namespace ops {
|
||||
std::vector<int64_t> CalBroadCastShape(std::vector<int64_t> x_shape, std::vector<int64_t> y_shape,
|
||||
const std::string &op_name) {
|
||||
const std::string &op_name, const std::string &op_x_name,
|
||||
const std::string &op_y_name) {
|
||||
if (x_shape == y_shape) {
|
||||
return x_shape;
|
||||
}
|
||||
|
@ -47,7 +48,8 @@ std::vector<int64_t> CalBroadCastShape(std::vector<int64_t> x_shape, std::vector
|
|||
} else if (x_shape[x_length + i] == y_shape[y_length + i]) {
|
||||
broadcast_shape.push_back(x_shape[x_length + i]);
|
||||
} else {
|
||||
MS_EXCEPTION(ValueError) << "For op " << op_name << ", the two input can not broadcast";
|
||||
MS_EXCEPTION(ValueError) << "For op " << op_name << ", the two input '" << op_x_name << "' and '" << op_y_name
|
||||
<< "' can not broadcast";
|
||||
}
|
||||
}
|
||||
return broadcast_shape;
|
||||
|
|
|
@ -260,6 +260,9 @@ const std::set<TypePtr> all_types = {
|
|||
kUInt16, kUInt32, kUInt64, kFloat, kFloat16, kFloat32, kFloat64, kComplex64,
|
||||
};
|
||||
|
||||
std::vector<int64_t> CalBroadCastShape(std::vector<int64_t> x_shape, std::vector<int64_t> y_shape,
|
||||
const std::string &op_name, const std::string &op_x_name = "input1",
|
||||
const std::string &op_y_name = "input2");
|
||||
abstract::ShapePtr BroadCastInferShape(const std::string &op_name, const std::vector<AbstractBasePtr> &input_args);
|
||||
} // namespace mindspore::ops
|
||||
#endif // MINDSPORE_CORE_OPS_OP_UTILS_H
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
"""grad experimental impl."""
|
||||
from .._grad.grad_base import get_bprop_fn
|
||||
from . import grad_math_ops
|
||||
from . import grad_array_ops
|
||||
from . import grad_inner_ops
|
||||
from . import grad_nn_ops
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Define the grad rules of math related operations."""
|
||||
|
||||
from mindspore.common import dtype as mstype
|
||||
from .. import functional as F
|
||||
from .. import operations as P
|
||||
from .._grad.grad_base import bprop_getters
|
||||
from .._grad.grad_math_ops import binop_grad_common
|
||||
from ..composite.multitype_ops.zeros_like_impl import zeros_like
|
||||
|
||||
@bprop_getters.register(P.Lerp)
|
||||
def get_bprop_index_lerp(self):
|
||||
"""Generate bprop for Lerp"""
|
||||
mul_op = P.Mul()
|
||||
sub_op = P.Sub()
|
||||
is_instance_op = P.IsInstance()
|
||||
|
||||
def bprop(start, end, weight, out, dout):
|
||||
dout = F.cast(dout, mstype.float32)
|
||||
dstart = mul_op(dout, 1 - weight)
|
||||
dend = mul_op(dout, weight)
|
||||
dweight = mul_op(dout, sub_op(end, start))
|
||||
dstart, dend = binop_grad_common(start, end, dstart, dend)
|
||||
if is_instance_op(weight, mstype.number) is True:
|
||||
dweight = zeros_like(weight)
|
||||
else:
|
||||
_, dweight = binop_grad_common(start, weight, dstart, dweight)
|
||||
dweight = F.cast(dweight, F.dtype(weight))
|
||||
dstart = F.cast(dstart, F.dtype(start))
|
||||
dend = F.cast(dend, F.dtype(end))
|
||||
return dstart, dend, dweight
|
||||
|
||||
return bprop
|
|
@ -142,6 +142,7 @@ from .one_hot import _one_hot_tbe
|
|||
from .one_hot_ds import _one_hot_ds_tbe
|
||||
from .equal import _equal_tbe
|
||||
from .equal_ds import _equal_ds_tbe
|
||||
from .lerp import _lerp_tbe
|
||||
from .less import _less_tbe
|
||||
from .less_equal import _less_equal_tbe
|
||||
from .logical_and import _logical_and_tbe
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Lerp op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
|
||||
|
||||
lerp_op_info = TBERegOp("Lerp") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.async_flag(False) \
|
||||
.binfile_name("lerp.so") \
|
||||
.compute_cost(10) \
|
||||
.kernel_name("lerp") \
|
||||
.partial_flag(True) \
|
||||
.input(0, "start", False, "required", "all") \
|
||||
.input(1, "end", False, "required", "all") \
|
||||
.input(2, "weight", False, "required", "all") \
|
||||
.output(0, "output", False, "required", "all") \
|
||||
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
|
||||
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(lerp_op_info)
|
||||
def _lerp_tbe():
|
||||
"""Lerp TBE register"""
|
||||
return
|
|
@ -51,7 +51,7 @@ from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AccumulateNV2, AssignAdd, A
|
|||
BitwiseXor, Inv, Invert, ApproximateEqual, InplaceAdd, InplaceSub,
|
||||
ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd, ReduceAny,
|
||||
Cos, Div, DivNoNan, Equal, EqualCount, Exp, Expm1, Erf, Erfc, Floor, FloorDiv, FloorMod, Ceil,
|
||||
Acosh, Greater, GreaterEqual, Less, LessEqual, Log, Log1p, LogicalAnd, Mod,
|
||||
Acosh, Greater, GreaterEqual, Lerp, Less, LessEqual, Log, Log1p, LogicalAnd, Mod,
|
||||
LogicalNot, LogicalOr, MatMul, Maximum, MulNoNan,
|
||||
Minimum, Mul, Neg, NMSWithMask, NotEqual,
|
||||
NPUAllocFloatStatus, NPUClearFloatStatus, LinSpace,
|
||||
|
@ -218,6 +218,7 @@ __all__ = [
|
|||
'ReduceMean',
|
||||
'LayerNorm',
|
||||
'Rank',
|
||||
'Lerp',
|
||||
'Less',
|
||||
'LessEqual',
|
||||
'RealDiv',
|
||||
|
|
|
@ -3529,6 +3529,53 @@ class GreaterEqual(_LogicBinaryOp):
|
|||
return None
|
||||
|
||||
|
||||
class Lerp(Primitive):
|
||||
"""
|
||||
Does a linear interpolation of two tensors start and end based on a float or tensor weight.
|
||||
|
||||
If `weight` is a tensor, the shapes of three inputs need to be broadcast;
|
||||
If `weight` is a float, the shapes of `start` and `end` need to be broadcast.
|
||||
|
||||
.. math::
|
||||
|
||||
output_{i} = start_{i} + weight_{i} * (end_{i} - start_{i})
|
||||
|
||||
Inputs:
|
||||
- **start** (Tensor) - The tensor with the starting points. Data type must be float16 or float32.
|
||||
- **end** (Tensor) - The tensor with the ending points. Data type must be float16 or float32.
|
||||
- **weight** (Union[float, Tensor]) – The weight for the interpolation formula. Must be a float
|
||||
or a scalar tensor with float16 or float32 data type.
|
||||
|
||||
Outputs:
|
||||
Tensor, has the same type and shape as input `start`.
|
||||
|
||||
Raises:
|
||||
TypeError: If `start` or `end` is not a tensor.
|
||||
TypeError: If `weight` is neither float nor tensor.
|
||||
TypeError: If dtype of `start` or `end` is neither float16 nor float32.
|
||||
TypeError: If dtype of `weight` is neither float16 nor float32 when it is a tensor.
|
||||
TypeError: If `start` and `end` have different data types.
|
||||
TypeError: If `start`, `end` and `weight` have different data types when `weight` is a tensor.
|
||||
ValueError: If `end` could not be broadcast to a tensor with shape of `start`.
|
||||
ValueError: If `weight` could not be broadcast to tensors with shapes of `start` and `end` when it is a tensor.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend``
|
||||
|
||||
Examples:
|
||||
>>> start = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
|
||||
>>> end = Tensor(np.array([10., 10., 10., 10.]), mindspore.float32)
|
||||
>>> lerp = ops.Lerp()
|
||||
>>> output = lerp(start, end, 0.5)
|
||||
>>> print(output)
|
||||
[5.5 6. 6.5 7. ]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
def __init__(self):
|
||||
self.init_prim_io_names(inputs=['start', 'end', 'weight'], outputs=['output'])
|
||||
|
||||
|
||||
class Less(_LogicBinaryOp):
|
||||
r"""
|
||||
Computes the boolean value of :math:`x < y` element-wise.
|
||||
|
|
|
@ -1239,6 +1239,12 @@ test_case_math_ops = [
|
|||
'desc_inputs': [Tensor(np.random.rand(24000, 4).astype(np.bool_))],
|
||||
'desc_bprop': [[256, 4], [256, 4]],
|
||||
'skip': ['backward']}),
|
||||
('Lerp', {
|
||||
'block': P.Lerp(),
|
||||
'desc_inputs': [Tensor(np.array([1., 2., 3., 4.]).astype(np.float32)),
|
||||
Tensor(np.array([10., 10., 10., 10.]).astype(np.float32)),
|
||||
Tensor(0.5, mstype.float32)],
|
||||
'desc_bprop': [Tensor(np.array([1., 2., 3., 4.]).astype(np.float32))]}),
|
||||
('LessEqual', {
|
||||
'block': P.LessEqual(),
|
||||
'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16)),
|
||||
|
|
Loading…
Reference in New Issue