From 33e93e296c38d9cdfc95090e7feed6a38921c236 Mon Sep 17 00:00:00 2001 From: wanglixin Date: Fri, 23 Jul 2021 16:33:16 +0800 Subject: [PATCH] [fix][assistant][I3PYD0] fix bug in the Ascend operator HSigmoid and HSigmoidGrad --- .gitignore | 3 - mindspore/core/ops/grad/hsigmoid_grad.cc | 7 ++- mindspore/core/ops/grad/hsigmoid_grad.h | 1 + mindspore/core/ops/hsigmoid.cc | 9 +-- mindspore/core/ops/hsigmoid.h | 5 ++ mindspore/nn/layer/activation.py | 8 +-- mindspore/ops/_op_impl/tbe/hsigmoid.py | 14 +++-- mindspore/ops/operations/_grad_ops.py | 27 +------- mindspore/ops/operations/nn_ops.py | 80 ++++++++++++------------ tests/ut/python/ops/test_ops.py | 1 + 10 files changed, 71 insertions(+), 84 deletions(-) diff --git a/.gitignore b/.gitignore index 43b0d1e5a99..9a0fc864723 100644 --- a/.gitignore +++ b/.gitignore @@ -104,9 +104,6 @@ mindspore/.commit_id # lite test file mindspore/lite/test/do_test/ -HSigmoid_Test/ -.vs - # lite opencl compile file *.cl.inc diff --git a/mindspore/core/ops/grad/hsigmoid_grad.cc b/mindspore/core/ops/grad/hsigmoid_grad.cc index c129d61988b..657e99a1037 100644 --- a/mindspore/core/ops/grad/hsigmoid_grad.cc +++ b/mindspore/core/ops/grad/hsigmoid_grad.cc @@ -32,6 +32,10 @@ namespace ops { namespace { abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector &input_args) { MS_EXCEPTION_IF_NULL(primitive); + CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, primitive->name()); + for (const auto &item : input_args) { + MS_EXCEPTION_IF_NULL(item); + } auto prim_name = primitive->name(); auto grads_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape]; auto input_x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape]; @@ -45,7 +49,7 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector & for (const auto &item : input_args) { MS_EXCEPTION_IF_NULL(item); } - const std::set valid_types = {kFloat16, kFloat32}; + const std::set valid_types = {kInt8, kInt16, kInt32, kInt64, kFloat16, kFloat32}; std::map types; types.emplace("grads", input_args[0]->BuildType()); types.emplace("input_x", input_args[1]->BuildType()); @@ -58,6 +62,5 @@ AbstractBasePtr HSigmoidGradInfer(const abstract::AnalysisEnginePtr &, const Pri return std::make_shared(InferType(primitive, input_args), InferShape(primitive, input_args)->shape()); } -REGISTER_PRIMITIVE_EVAL_IMPL(HSigmoidGrad, prim::kPrimHSigmoidGrad, HSigmoidGradInfer, nullptr, true); } // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/grad/hsigmoid_grad.h b/mindspore/core/ops/grad/hsigmoid_grad.h index 0aebfa4e4eb..eb1ec65a41e 100644 --- a/mindspore/core/ops/grad/hsigmoid_grad.h +++ b/mindspore/core/ops/grad/hsigmoid_grad.h @@ -34,6 +34,7 @@ class HSigmoidGrad : public PrimitiveC { ~HSigmoidGrad() = default; MS_DECLARE_PARENT(HSigmoidGrad, PrimitiveC); }; + AbstractBasePtr HSigmoidGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args); using PrimHSigmoidGradPtr = std::shared_ptr; diff --git a/mindspore/core/ops/hsigmoid.cc b/mindspore/core/ops/hsigmoid.cc index 83818886a7f..4b8ffa5c63d 100644 --- a/mindspore/core/ops/hsigmoid.cc +++ b/mindspore/core/ops/hsigmoid.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,24 +29,21 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vectorGetShapeTrack())[kShape]; return std::make_shared(in_shape); } + TypePtr InferType(const PrimitivePtr &prim, const std::vector &input_args) { if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) { MS_LOG(EXCEPTION) << "nullptr"; } std::map types; - const std::set valid_types = {kFloat16, kFloat32}; + const std::set valid_types = {kInt8, kInt16, kInt32, kInt64, kFloat16, kFloat32}; types.emplace("input_x", input_args[0]->BuildType()); return CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name()); } - } // namespace AbstractBasePtr HSigmoidInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, const std::vector &input_args) { return std::make_shared(InferType(primitive, input_args), InferShape(primitive, input_args)->shape()); } - -REGISTER_PRIMITIVE_EVAL_IMPL(HSigmoid, prim::kPrimHSigmoid, HSigmoidInfer, nullptr, true); - } // namespace ops } // namespace mindspore diff --git a/mindspore/core/ops/hsigmoid.h b/mindspore/core/ops/hsigmoid.h index f094e168e3b..e6615c6c934 100644 --- a/mindspore/core/ops/hsigmoid.h +++ b/mindspore/core/ops/hsigmoid.h @@ -13,6 +13,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +#ifndef MINDSPORE_CORE_OPS_HSIGMOID_H_ +#define MINDSPORE_CORE_OPS_HSIGMOID_H_ #include #include @@ -36,3 +39,5 @@ AbstractBasePtr HSigmoidInfer(const abstract::AnalysisEnginePtr &, const Primiti using PrimHSigmoidPtr = std::shared_ptr; } // namespace ops } // namespace mindspore + +#endif // MINDSPORE_CORE_OPS_HSIGMOID_H_ diff --git a/mindspore/nn/layer/activation.py b/mindspore/nn/layer/activation.py index fbd28740fe5..b947e5eb873 100644 --- a/mindspore/nn/layer/activation.py +++ b/mindspore/nn/layer/activation.py @@ -675,14 +675,14 @@ class HSigmoid(Cell): where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor. Inputs: - - **x** (Tensor) - The input of HSigmoid, data type must be float16 or float32. - The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions. + - **input_x** (Tensor) - The input of HSigmoid. The shape is :math:`(N,*)` where :math:`*` means, any number of + additional dimensions. Outputs: - Tensor, with the same type and shape as the `x`. + Tensor, with the same type and shape as the `input_x`. Raises: - TypeError: If dtype of `x` is neither float16 nor float32. + TypeError: If `input_x` is not a Tensor. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` diff --git a/mindspore/ops/_op_impl/tbe/hsigmoid.py b/mindspore/ops/_op_impl/tbe/hsigmoid.py index f5c8d44a10d..efa64bfa35d 100644 --- a/mindspore/ops/_op_impl/tbe/hsigmoid.py +++ b/mindspore/ops/_op_impl/tbe/hsigmoid.py @@ -1,4 +1,4 @@ -# Copyright 2020 Huawei Technologies Co., Ltd +# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,9 +27,15 @@ hsigmoid_op_info = TBERegOp("HSigmoid") \ .attr("beta", "optional", "float", "all", "0.5") \ .input(0, "input_x", False, "required", "all") \ .output(0, "output_y", False, "required", "all") \ - .op_pattern("formatAgnostic") \ - .dtype_format(DataType.F16_None, DataType.F16_None) \ - .dtype_format(DataType.F32_None, DataType.F32_None) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_NHWC, DataType.F16_NHWC) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_NHWC, DataType.F32_NHWC) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.I32_NHWC, DataType.I32_NHWC) \ .get_op_info() diff --git a/mindspore/ops/operations/_grad_ops.py b/mindspore/ops/operations/_grad_ops.py index 746cd7c9cf2..3da2deb2403 100644 --- a/mindspore/ops/operations/_grad_ops.py +++ b/mindspore/ops/operations/_grad_ops.py @@ -1740,31 +1740,8 @@ class HSwishGrad(_ActivationGrad): """Gets the gradient of HSwish operation.""" -class HSigmoidGrad(Primitive): - """ - Gets the gradient of HSigmoid operation. - - Inputs: - - **grads** (Tensor) - The gradients of loss to output of HSigmoid function. Currently - grads data type only support float16 and float32. - - **input_x** (Tensor) - Must be the input `input_x` of the forward operator HSigmoid. - Currentlyinput_x data type only support float16 and float32. - - Outputs: - - **output** (Tensor) - With the same shape and data type as `input_x`. - - Raises: - TypeError: If shape of `grads` is not the same as `input_x`. - TypeError: If dtype of `grads` is not the same as `input_x`. - TypeError: If dtype of `grads` or `input_x` is neither float16 nor float32. - - Supported Platforms: - ``Ascend`` ``GPU`` ``CPU`` - """ - - @prim_attr_register - def __init__(self): - self.init_prim_io_names(inputs=['grads', 'input_x'], outputs=['output']) +class HSigmoidGrad(_ActivationGrad): + """Gets the gradient of HSigmoid operation.""" class SigmoidCrossEntropyWithLogitsGrad(PrimitiveWithInfer): diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index cb469ae76f0..1dc6985471c 100755 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -794,6 +794,46 @@ class Sigmoid(PrimitiveWithInfer): return input_x +class HSigmoid(Primitive): + r""" + Hard sigmoid activation function. + + Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape. + + Hard sigmoid is defined as: + + .. math:: + + \text{hsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6})), + + where :math:`x_i` is an element of the input Tensor. + + Inputs: + - **input_x** (Tensor) - Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of + additional dimensions. + + Outputs: + Tensor, with the same type and shape as the `input_x`. + + Raises: + TypeError: If `input_x` is not a Tensor. + + Supported Platforms: + ``Ascend`` ``GPU`` ``CPU`` + + Examples: + >>> hsigmoid = ops.HSigmoid() + >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mstype.float16) + >>> result = hsigmoid(input_x) + >>> print(result) + [0.3333 0.1666 0.5 0.8335 0.6665] + """ + @prim_attr_register + def __init__(self): + """Initialize HSigmoid.""" + self.init_prim_io_names(inputs=['input_x'], outputs=['output']) + + class Tanh(PrimitiveWithInfer): r""" Tanh activation function. @@ -8667,43 +8707,3 @@ class SoftShrink(Primitive): """Initialize SoftShrink""" validator.check_value_type("lambd", lambd, [float], self.name) validator.check_number("lambd", lambd, 0, Rel.GE, self.name) - -class HSigmoid(Primitive): - r""" - Hard sigmoid activation function. - - Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape. - - Hard sigmoid is defined as: - - .. math:: - - \text{hsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6})), - - where :math:`x_i` is an element of the input Tensor. - - Inputs: - - **input_x** (Tensor) - Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of - additional dimensions, with float16 or float32 data type. - - Outputs: - Tensor, with the same type and shape as the `input_x`. - - Raises: - TypeError: If `input_x` is not a Tensor. - TypeError: If dtype of `input_x` is neither float16 nor float32. - - Supported Platforms: - ``Ascend`` ``GPU`` ``CPU`` - - Examples: - >>> hsigmoid = ops.HSigmoid() - >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mstype.float16) - >>> result = hsigmoid(input_x) - >>> print(result) - [0.3333 0.1666 0.5 0.8335 0.6665] - """ - @prim_attr_register - def __init__(self): - """Initialize HSigmoid.""" - self.init_prim_io_names(inputs=['input_x'], outputs=['output']) diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 5820433ea9f..9ba497d8f90 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -2154,6 +2154,7 @@ test_case_nn_ops = [ 'desc_inputs': [Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), mstype.float16), Tensor(np.array([[-3, -2, 0], [1, 2, 4]]), mstype.float16)], 'desc_bprop': [], + 'skip': ['backward']}), ('HSigmoidGrad', { 'block': G.HSigmoidGrad(), 'desc_inputs': [Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), mstype.float16),