forked from mindspore-Ecosystem/mindspore
[fix][assistant][I3PYD0] fix bug in the Ascend operator HSigmoid and HSigmoidGrad
This commit is contained in:
parent
b6c575689d
commit
33e93e296c
|
@ -104,9 +104,6 @@ mindspore/.commit_id
|
||||||
|
|
||||||
# lite test file
|
# lite test file
|
||||||
mindspore/lite/test/do_test/
|
mindspore/lite/test/do_test/
|
||||||
HSigmoid_Test/
|
|
||||||
.vs
|
|
||||||
|
|
||||||
|
|
||||||
# lite opencl compile file
|
# lite opencl compile file
|
||||||
*.cl.inc
|
*.cl.inc
|
||||||
|
|
|
@ -32,6 +32,10 @@ namespace ops {
|
||||||
namespace {
|
namespace {
|
||||||
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||||
MS_EXCEPTION_IF_NULL(primitive);
|
MS_EXCEPTION_IF_NULL(primitive);
|
||||||
|
CheckAndConvertUtils::CheckInteger("input number", input_args.size(), kEqual, 2, primitive->name());
|
||||||
|
for (const auto &item : input_args) {
|
||||||
|
MS_EXCEPTION_IF_NULL(item);
|
||||||
|
}
|
||||||
auto prim_name = primitive->name();
|
auto prim_name = primitive->name();
|
||||||
auto grads_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
auto grads_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
|
||||||
auto input_x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
auto input_x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
|
||||||
|
@ -45,7 +49,7 @@ TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &
|
||||||
for (const auto &item : input_args) {
|
for (const auto &item : input_args) {
|
||||||
MS_EXCEPTION_IF_NULL(item);
|
MS_EXCEPTION_IF_NULL(item);
|
||||||
}
|
}
|
||||||
const std::set<TypePtr> valid_types = {kFloat16, kFloat32};
|
const std::set<TypePtr> valid_types = {kInt8, kInt16, kInt32, kInt64, kFloat16, kFloat32};
|
||||||
std::map<std::string, TypePtr> types;
|
std::map<std::string, TypePtr> types;
|
||||||
types.emplace("grads", input_args[0]->BuildType());
|
types.emplace("grads", input_args[0]->BuildType());
|
||||||
types.emplace("input_x", input_args[1]->BuildType());
|
types.emplace("input_x", input_args[1]->BuildType());
|
||||||
|
@ -58,6 +62,5 @@ AbstractBasePtr HSigmoidGradInfer(const abstract::AnalysisEnginePtr &, const Pri
|
||||||
return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args),
|
return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args),
|
||||||
InferShape(primitive, input_args)->shape());
|
InferShape(primitive, input_args)->shape());
|
||||||
}
|
}
|
||||||
REGISTER_PRIMITIVE_EVAL_IMPL(HSigmoidGrad, prim::kPrimHSigmoidGrad, HSigmoidGradInfer, nullptr, true);
|
|
||||||
} // namespace ops
|
} // namespace ops
|
||||||
} // namespace mindspore
|
} // namespace mindspore
|
||||||
|
|
|
@ -34,6 +34,7 @@ class HSigmoidGrad : public PrimitiveC {
|
||||||
~HSigmoidGrad() = default;
|
~HSigmoidGrad() = default;
|
||||||
MS_DECLARE_PARENT(HSigmoidGrad, PrimitiveC);
|
MS_DECLARE_PARENT(HSigmoidGrad, PrimitiveC);
|
||||||
};
|
};
|
||||||
|
|
||||||
AbstractBasePtr HSigmoidGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
AbstractBasePtr HSigmoidGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||||
const std::vector<AbstractBasePtr> &input_args);
|
const std::vector<AbstractBasePtr> &input_args);
|
||||||
using PrimHSigmoidGradPtr = std::shared_ptr<HSigmoidGrad>;
|
using PrimHSigmoidGradPtr = std::shared_ptr<HSigmoidGrad>;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -29,24 +29,21 @@ abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<A
|
||||||
auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->GetShapeTrack())[kShape];
|
auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->GetShapeTrack())[kShape];
|
||||||
return std::make_shared<abstract::Shape>(in_shape);
|
return std::make_shared<abstract::Shape>(in_shape);
|
||||||
}
|
}
|
||||||
|
|
||||||
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||||
if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) {
|
if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) {
|
||||||
MS_LOG(EXCEPTION) << "nullptr";
|
MS_LOG(EXCEPTION) << "nullptr";
|
||||||
}
|
}
|
||||||
std::map<std::string, TypePtr> types;
|
std::map<std::string, TypePtr> types;
|
||||||
const std::set<TypePtr> valid_types = {kFloat16, kFloat32};
|
const std::set<TypePtr> valid_types = {kInt8, kInt16, kInt32, kInt64, kFloat16, kFloat32};
|
||||||
types.emplace("input_x", input_args[0]->BuildType());
|
types.emplace("input_x", input_args[0]->BuildType());
|
||||||
return CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name());
|
return CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim->name());
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
AbstractBasePtr HSigmoidInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
AbstractBasePtr HSigmoidInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||||
const std::vector<AbstractBasePtr> &input_args) {
|
const std::vector<AbstractBasePtr> &input_args) {
|
||||||
return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args),
|
return std::make_shared<abstract::AbstractTensor>(InferType(primitive, input_args),
|
||||||
InferShape(primitive, input_args)->shape());
|
InferShape(primitive, input_args)->shape());
|
||||||
}
|
}
|
||||||
|
|
||||||
REGISTER_PRIMITIVE_EVAL_IMPL(HSigmoid, prim::kPrimHSigmoid, HSigmoidInfer, nullptr, true);
|
|
||||||
|
|
||||||
} // namespace ops
|
} // namespace ops
|
||||||
} // namespace mindspore
|
} // namespace mindspore
|
||||||
|
|
|
@ -13,6 +13,9 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifndef MINDSPORE_CORE_OPS_HSIGMOID_H_
|
||||||
|
#define MINDSPORE_CORE_OPS_HSIGMOID_H_
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
|
@ -36,3 +39,5 @@ AbstractBasePtr HSigmoidInfer(const abstract::AnalysisEnginePtr &, const Primiti
|
||||||
using PrimHSigmoidPtr = std::shared_ptr<HSigmoid>;
|
using PrimHSigmoidPtr = std::shared_ptr<HSigmoid>;
|
||||||
} // namespace ops
|
} // namespace ops
|
||||||
} // namespace mindspore
|
} // namespace mindspore
|
||||||
|
|
||||||
|
#endif // MINDSPORE_CORE_OPS_HSIGMOID_H_
|
||||||
|
|
|
@ -675,14 +675,14 @@ class HSigmoid(Cell):
|
||||||
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
|
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
|
||||||
|
|
||||||
Inputs:
|
Inputs:
|
||||||
- **x** (Tensor) - The input of HSigmoid, data type must be float16 or float32.
|
- **input_x** (Tensor) - The input of HSigmoid. The shape is :math:`(N,*)` where :math:`*` means, any number of
|
||||||
The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.
|
additional dimensions.
|
||||||
|
|
||||||
Outputs:
|
Outputs:
|
||||||
Tensor, with the same type and shape as the `x`.
|
Tensor, with the same type and shape as the `input_x`.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
TypeError: If dtype of `x` is neither float16 nor float32.
|
TypeError: If `input_x` is not a Tensor.
|
||||||
|
|
||||||
Supported Platforms:
|
Supported Platforms:
|
||||||
``Ascend`` ``GPU`` ``CPU``
|
``Ascend`` ``GPU`` ``CPU``
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
# Copyright 2021 Huawei Technologies Co., Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -27,9 +27,15 @@ hsigmoid_op_info = TBERegOp("HSigmoid") \
|
||||||
.attr("beta", "optional", "float", "all", "0.5") \
|
.attr("beta", "optional", "float", "all", "0.5") \
|
||||||
.input(0, "input_x", False, "required", "all") \
|
.input(0, "input_x", False, "required", "all") \
|
||||||
.output(0, "output_y", False, "required", "all") \
|
.output(0, "output_y", False, "required", "all") \
|
||||||
.op_pattern("formatAgnostic") \
|
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
|
||||||
.dtype_format(DataType.F16_None, DataType.F16_None) \
|
.dtype_format(DataType.F16_5HD, DataType.F16_5HD) \
|
||||||
.dtype_format(DataType.F32_None, DataType.F32_None) \
|
.dtype_format(DataType.F16_NHWC, DataType.F16_NHWC) \
|
||||||
|
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
|
||||||
|
.dtype_format(DataType.F32_5HD, DataType.F32_5HD) \
|
||||||
|
.dtype_format(DataType.F32_NHWC, DataType.F32_NHWC) \
|
||||||
|
.dtype_format(DataType.I32_Default, DataType.I32_Default) \
|
||||||
|
.dtype_format(DataType.I32_5HD, DataType.I32_5HD) \
|
||||||
|
.dtype_format(DataType.I32_NHWC, DataType.I32_NHWC) \
|
||||||
.get_op_info()
|
.get_op_info()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1740,31 +1740,8 @@ class HSwishGrad(_ActivationGrad):
|
||||||
"""Gets the gradient of HSwish operation."""
|
"""Gets the gradient of HSwish operation."""
|
||||||
|
|
||||||
|
|
||||||
class HSigmoidGrad(Primitive):
|
class HSigmoidGrad(_ActivationGrad):
|
||||||
"""
|
"""Gets the gradient of HSigmoid operation."""
|
||||||
Gets the gradient of HSigmoid operation.
|
|
||||||
|
|
||||||
Inputs:
|
|
||||||
- **grads** (Tensor) - The gradients of loss to output of HSigmoid function. Currently
|
|
||||||
grads data type only support float16 and float32.
|
|
||||||
- **input_x** (Tensor) - Must be the input `input_x` of the forward operator HSigmoid.
|
|
||||||
Currentlyinput_x data type only support float16 and float32.
|
|
||||||
|
|
||||||
Outputs:
|
|
||||||
- **output** (Tensor) - With the same shape and data type as `input_x`.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
TypeError: If shape of `grads` is not the same as `input_x`.
|
|
||||||
TypeError: If dtype of `grads` is not the same as `input_x`.
|
|
||||||
TypeError: If dtype of `grads` or `input_x` is neither float16 nor float32.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``Ascend`` ``GPU`` ``CPU``
|
|
||||||
"""
|
|
||||||
|
|
||||||
@prim_attr_register
|
|
||||||
def __init__(self):
|
|
||||||
self.init_prim_io_names(inputs=['grads', 'input_x'], outputs=['output'])
|
|
||||||
|
|
||||||
|
|
||||||
class SigmoidCrossEntropyWithLogitsGrad(PrimitiveWithInfer):
|
class SigmoidCrossEntropyWithLogitsGrad(PrimitiveWithInfer):
|
||||||
|
|
|
@ -794,6 +794,46 @@ class Sigmoid(PrimitiveWithInfer):
|
||||||
return input_x
|
return input_x
|
||||||
|
|
||||||
|
|
||||||
|
class HSigmoid(Primitive):
|
||||||
|
r"""
|
||||||
|
Hard sigmoid activation function.
|
||||||
|
|
||||||
|
Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape.
|
||||||
|
|
||||||
|
Hard sigmoid is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
|
||||||
|
\text{hsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6})),
|
||||||
|
|
||||||
|
where :math:`x_i` is an element of the input Tensor.
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
- **input_x** (Tensor) - Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of
|
||||||
|
additional dimensions.
|
||||||
|
|
||||||
|
Outputs:
|
||||||
|
Tensor, with the same type and shape as the `input_x`.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
TypeError: If `input_x` is not a Tensor.
|
||||||
|
|
||||||
|
Supported Platforms:
|
||||||
|
``Ascend`` ``GPU`` ``CPU``
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> hsigmoid = ops.HSigmoid()
|
||||||
|
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mstype.float16)
|
||||||
|
>>> result = hsigmoid(input_x)
|
||||||
|
>>> print(result)
|
||||||
|
[0.3333 0.1666 0.5 0.8335 0.6665]
|
||||||
|
"""
|
||||||
|
@prim_attr_register
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize HSigmoid."""
|
||||||
|
self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
|
||||||
|
|
||||||
|
|
||||||
class Tanh(PrimitiveWithInfer):
|
class Tanh(PrimitiveWithInfer):
|
||||||
r"""
|
r"""
|
||||||
Tanh activation function.
|
Tanh activation function.
|
||||||
|
@ -8667,43 +8707,3 @@ class SoftShrink(Primitive):
|
||||||
"""Initialize SoftShrink"""
|
"""Initialize SoftShrink"""
|
||||||
validator.check_value_type("lambd", lambd, [float], self.name)
|
validator.check_value_type("lambd", lambd, [float], self.name)
|
||||||
validator.check_number("lambd", lambd, 0, Rel.GE, self.name)
|
validator.check_number("lambd", lambd, 0, Rel.GE, self.name)
|
||||||
|
|
||||||
class HSigmoid(Primitive):
|
|
||||||
r"""
|
|
||||||
Hard sigmoid activation function.
|
|
||||||
|
|
||||||
Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape.
|
|
||||||
|
|
||||||
Hard sigmoid is defined as:
|
|
||||||
|
|
||||||
.. math::
|
|
||||||
|
|
||||||
\text{hsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6})),
|
|
||||||
|
|
||||||
where :math:`x_i` is an element of the input Tensor.
|
|
||||||
|
|
||||||
Inputs:
|
|
||||||
- **input_x** (Tensor) - Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of
|
|
||||||
additional dimensions, with float16 or float32 data type.
|
|
||||||
|
|
||||||
Outputs:
|
|
||||||
Tensor, with the same type and shape as the `input_x`.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
TypeError: If `input_x` is not a Tensor.
|
|
||||||
TypeError: If dtype of `input_x` is neither float16 nor float32.
|
|
||||||
|
|
||||||
Supported Platforms:
|
|
||||||
``Ascend`` ``GPU`` ``CPU``
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> hsigmoid = ops.HSigmoid()
|
|
||||||
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mstype.float16)
|
|
||||||
>>> result = hsigmoid(input_x)
|
|
||||||
>>> print(result)
|
|
||||||
[0.3333 0.1666 0.5 0.8335 0.6665]
|
|
||||||
"""
|
|
||||||
@prim_attr_register
|
|
||||||
def __init__(self):
|
|
||||||
"""Initialize HSigmoid."""
|
|
||||||
self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
|
|
||||||
|
|
|
@ -2154,6 +2154,7 @@ test_case_nn_ops = [
|
||||||
'desc_inputs': [Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), mstype.float16),
|
'desc_inputs': [Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), mstype.float16),
|
||||||
Tensor(np.array([[-3, -2, 0], [1, 2, 4]]), mstype.float16)],
|
Tensor(np.array([[-3, -2, 0], [1, 2, 4]]), mstype.float16)],
|
||||||
'desc_bprop': [],
|
'desc_bprop': [],
|
||||||
|
'skip': ['backward']}),
|
||||||
('HSigmoidGrad', {
|
('HSigmoidGrad', {
|
||||||
'block': G.HSigmoidGrad(),
|
'block': G.HSigmoidGrad(),
|
||||||
'desc_inputs': [Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), mstype.float16),
|
'desc_inputs': [Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), mstype.float16),
|
||||||
|
|
Loading…
Reference in New Issue