[feat][assistant][I3PYD4] add new data operator HShrink and HShrinkGrad

This commit is contained in:
zhangjie 2021-06-11 20:53:04 +08:00 committed by danansheng
parent accb87606a
commit b04036e13c
8 changed files with 193 additions and 0 deletions

View File

@ -376,6 +376,7 @@ inline const PrimitivePtr kFusedMulAdd = std::make_shared<Primitive>("FusedMulAd
inline const PrimitivePtr kPrimSoftShrink = std::make_shared<Primitive>("SoftShrink");
inline const PrimitivePtr kPrimSoftShrinkGrad = std::make_shared<Primitive>("SoftShrinkGrad");
inline const PrimitivePtr kPrimHShrink = std::make_shared<Primitive>("HShrink");
inline const PrimitivePtr kPrimHShrinkGrad = std::make_shared<Primitive>("HShrinkGrad");
// Comm ops
inline const PrimitivePtr kPrimMirror = std::make_shared<Primitive>("_MirrorOperator");

View File

@ -0,0 +1,63 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/grad/hshrink_grad.h"
#include <string>
#include <algorithm>
#include <map>
#include <memory>
#include <set>
#include <vector>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "abstract/primitive_infer_map.h"
namespace mindspore {
namespace ops {
abstract::ShapePtr HShrinkGradInferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();
auto gradients_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
auto features_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
CheckAndConvertUtils::Check("gradients_shape", gradients_shape, kEqual, "features_shape", features_shape, prim_name,
TypeError);
return std::make_shared<abstract::Shape>(gradients_shape);
}
TypePtr HShrinkGradInferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(prim);
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, prim->name());
for (const auto &item : input_args) {
MS_EXCEPTION_IF_NULL(item);
}
std::map<std::string, TypePtr> types;
const std::set<TypePtr> valid_types = {kFloat16, kFloat32};
types.emplace("gradients", input_args[0]->BuildType());
types.emplace("features", input_args[1]->BuildType());
return CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name());
}
AbstractBasePtr HShrinkGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
return std::make_shared<abstract::AbstractTensor>(HShrinkGradInferType(primitive, input_args),
HShrinkGradInferShape(primitive, input_args)->shape());
}
REGISTER_PRIMITIVE_EVAL_IMPL(HShrinkGrad, prim::kPrimHShrinkGrad, HShrinkGradInfer, nullptr, true);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,43 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_HShrink_GRAD_H_
#define MINDSPORE_CORE_OPS_HShrink_GRAD_H_
#include <map>
#include <vector>
#include <string>
#include <memory>
#include "ops/primitive_c.h"
#include "abstract/abstract_value.h"
#include "utils/check_convert_utils.h"
namespace mindspore {
namespace ops {
constexpr auto kNameHShrinkGrad = "HShrinkGrad";
class HShrinkGrad : public PrimitiveC {
public:
HShrinkGrad() : PrimitiveC(kNameHShrinkGrad) { InitIOName({"gradients", "features"}, {"backprops"}); }
~HShrinkGrad() = default;
MS_DECLARE_PARENT(HShrinkGrad, PrimitiveC);
};
AbstractBasePtr HShrinkGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args);
using PrimHShrinkGradPtr = std::shared_ptr<HShrinkGrad>;
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_HShrink_GRAD_H_

View File

@ -44,3 +44,15 @@ def get_bprop_softshrink(self):
return (dx,)
return bprop
@bprop_getters.register(P.HShrink)
def get_bprop_hshrink(self):
"""Grad definition for `HShrinkGrad` operation."""
grad = G.HShrinkGrad()
def bprop(features, out, gradients):
dx = grad(gradients, features)
return (dx,)
return bprop

View File

@ -395,3 +395,4 @@ from .soft_shrink_grad import _soft_shrink_grad_tbe
from .hsigmoid_grad import _hsigmoid_grad_tbe
from .hsigmoid import _hsigmoid_tbe
from .hshrink import _hshrink_tbe
from .hshrink_grad import _hshrink_grad_tbe

View File

@ -0,0 +1,37 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""HShrinkGrad op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
hshrink_grad_op_info = TBERegOp("HShrinkGrad") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("hard_shrink_grad.so") \
.compute_cost(10) \
.kernel_name("hard_shrink_grad") \
.partial_flag(True) \
.attr("lambda", "optional", "float", "all", "0.5") \
.input(0, "gradients", False, "required", "all") \
.input(1, "features", False, "required", "all") \
.output(0, "backprops", False, "required", "all") \
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(hshrink_grad_op_info)
def _hshrink_grad_tbe():
"""HShrinkGrad TBE register"""
return

View File

@ -2212,3 +2212,34 @@ class SoftShrinkGrad(Primitive):
self.init_prim_io_names(inputs=['input_grad', 'input_x'], outputs=['output'])
validator.check_value_type("lambd", lambd, [float], self.name)
validator.check_number("lambd", lambd, 0, Rel.GE, self.name)
class HShrinkGrad(Primitive):
"""
Computes gradients for HShrinkGrad operation.
Args:
lambd (float): the λ value for the Hardshrink formulation. Default: 0.5
Inputs:
- **gradients** (Tensor) - the gradients of loss to output of HShrink function.
Currently gradients data type only support float16 and float32.
- **features** (Tensor) - Must be the input `input_x` of the forward operator HSHrink.
Currently features data type only support float16 and float32.
Outputs:
backprops - Tensor, with the same shape and data type as `features`.
Rasise:
TypeError: If `lambd` is not a float.
TypeError: If shape of `gradients` is not the same as `features`.
TypeError: If dtype of `gradients` is not the same as `features`.
TypeError: If dtype of `gradients` or `features` is neither float16 nor float32.
Supported Platforms:
``Ascend``
"""
@prim_attr_register
def __init__(self, lambd=0.5):
validator.check_value_type("lambd", lambd, [float], self.name)

View File

@ -2209,6 +2209,11 @@ test_case_nn_ops = [
'desc_inputs': [Tensor(np.array([[0.5, 1, 2.0], [0.0533, 0.0776, -2.1233]]), mstype.float32)],
'desc_bprop': [],
'skip': ['backward']}),
('HShrinkGrad', {
'block': G.HShrinkGrad(),
'desc_inputs': [Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), mstype.float16),
Tensor(np.array([[-4, -3, -2], [1, 2, 4]]), mstype.float16)],
'skip': ['backward']}),
]
test_case_array_ops = [