From 6f1747045c7fa58ff5ce06ba63ca647b98dbf328 Mon Sep 17 00:00:00 2001 From: zhangz0911gm Date: Tue, 31 Mar 2020 23:14:21 -0400 Subject: [PATCH] Add FloorMod, Acosh in ME --- mindspore/ccsrc/transform/convert.cc | 4 ++ mindspore/ccsrc/transform/op_declare.cc | 10 +++++ mindspore/ccsrc/transform/op_declare.h | 5 ++- mindspore/ops/_grad/grad_math_ops.py | 25 +++++++++++ mindspore/ops/operations/__init__.py | 4 +- mindspore/ops/operations/math_ops.py | 56 +++++++++++++++++++++++++ mindspore/ops/operations/nn_ops.py | 2 +- tests/ut/python/ops/test_ops.py | 9 ++++ 8 files changed, 112 insertions(+), 3 deletions(-) diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index fdacff7ba8a..7c100f0f0ed 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -171,6 +171,8 @@ const char kNameAbsGrad[] = "AbsGrad"; const char kNameBinaryCrossEntropy[] = "BinaryCrossEntropy"; const char kNameBinaryCrossEntropyGrad[] = "BinaryCrossEntropyGrad"; const char kNameSparseApplyAdagrad[] = "SparseApplyAdagrad"; +const char kNameAcosh[] = "Acosh"; +const char kNameFloorMod[] = "FloorMod"; const char kNameSpaceToDepth[] = "SpaceToDepth"; const char kNameDepthToSpace[] = "DepthToSpace"; const char kNameSign[] = "Sign"; @@ -360,6 +362,8 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameBinaryCrossEntropy), ADPT_DESC(BinaryCrossEntropy)}, {string(kNameBinaryCrossEntropyGrad), ADPT_DESC(BinaryCrossEntropyGrad)}, {string(kNameSparseApplyAdagrad), ADPT_DESC(SparseApplyAdagradD)}, + {string(kNameAcosh), ADPT_DESC(Acosh)}, + {string(kNameFloorMod), ADPT_DESC(FloorMod)}, {string(kNameSpaceToDepth), ADPT_DESC(SpaceToDepth)}, {string(kNameDepthToSpace), ADPT_DESC(DepthToSpace)}, {string(kNameSign), ADPT_DESC(Sign)}, diff --git a/mindspore/ccsrc/transform/op_declare.cc b/mindspore/ccsrc/transform/op_declare.cc index 9258eb08db2..0af2923cc4a 100755 --- a/mindspore/ccsrc/transform/op_declare.cc +++ b/mindspore/ccsrc/transform/op_declare.cc @@ -357,6 +357,11 @@ INPUT_MAP(AcosGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; ATTR_MAP(AcosGrad) = EMPTY_ATTR_MAP; OUTPUT_MAP(AcosGrad) = {{0, OUTPUT_DESC(z)}}; +// Acosh +INPUT_MAP(Acosh) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Acosh) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Acosh) = {{0, OUTPUT_DESC(y)}}; + // Floor INPUT_MAP(Floor) = {{1, INPUT_DESC(x)}}; ATTR_MAP(Floor) = EMPTY_ATTR_MAP; @@ -367,6 +372,11 @@ INPUT_MAP(FloorDiv) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; ATTR_MAP(FloorDiv) = EMPTY_ATTR_MAP; OUTPUT_MAP(FloorDiv) = {{0, OUTPUT_DESC(y)}}; +// FloorMod +INPUT_MAP(FloorMod) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(FloorMod) = EMPTY_ATTR_MAP; +OUTPUT_MAP(FloorMod) = {{0, OUTPUT_DESC(y)}}; + // Sin INPUT_MAP(Sin) = {{1, INPUT_DESC(x)}}; ATTR_MAP(Sin) = EMPTY_ATTR_MAP; diff --git a/mindspore/ccsrc/transform/op_declare.h b/mindspore/ccsrc/transform/op_declare.h index 031ce80865e..d120c949892 100755 --- a/mindspore/ccsrc/transform/op_declare.h +++ b/mindspore/ccsrc/transform/op_declare.h @@ -324,11 +324,15 @@ DECLARE_OP_USE_OUTPUT(Acos) DECLARE_OP_ADAPTER(AcosGrad) DECLARE_OP_USE_OUTPUT(AcosGrad) +DECLARE_OP_ADAPTER(Acosh) +DECLARE_OP_USE_OUTPUT(Acosh) DECLARE_OP_ADAPTER(Floor) DECLARE_OP_USE_OUTPUT(Floor) DECLARE_OP_ADAPTER(FloorDiv) DECLARE_OP_USE_OUTPUT(FloorDiv) +DECLARE_OP_ADAPTER(FloorMod) +DECLARE_OP_USE_OUTPUT(FloorMod) DECLARE_OP_ADAPTER(Sin) DECLARE_OP_USE_OUTPUT(Sin) DECLARE_OP_ADAPTER(Exp) @@ -450,7 +454,6 @@ DECLARE_OP_USE_INPUT_ATTR(ApplyRMSPropD) DECLARE_OP_USE_OUTPUT(ApplyRMSPropD) DECLARE_OP_ADAPTER(ApplyCenteredRMSProp) DECLARE_OP_USE_OUTPUT(ApplyCenteredRMSProp) - #ifdef ENABLE_GE DECLARE_OP_ADAPTER(Print) DECLARE_OP_USE_DYN_INPUT(Print) diff --git a/mindspore/ops/_grad/grad_math_ops.py b/mindspore/ops/_grad/grad_math_ops.py index 1675855c88d..1863ac8fdd3 100755 --- a/mindspore/ops/_grad/grad_math_ops.py +++ b/mindspore/ops/_grad/grad_math_ops.py @@ -251,6 +251,20 @@ def get_bprop_floordiv(self): return bprop +@bprop_getters.register(P.FloorMod) +def get_bprop_floormod(self): + """Grad definition for `FloorMod` operation.""" + div_op = P.FloorMod() + neg = P.Neg() + mul_op = P.Mul() + + def bprop(x, y, out, dout): + bc_x = div_op(dout, y) + bc_y = neg(mul_op(bc_x, out)) + return binop_grad_common(x, y, bc_x, bc_y) + return bprop + + @bprop_getters.register(P.Square) def get_bprop_square(self): """Grad definition for `Square` operation.""" @@ -690,6 +704,17 @@ def get_bprop_acos(self): return bprop +@bprop_getters.register(P.Acosh) +def get_bprop_acosh(self): + """Grad definition for `Acosh` operation.""" + input_grad = G.AcoshGrad() + + def bprop(x, out, dout): + dx = input_grad(x, dout) + return (dx,) + return bprop + + @bprop_getters.register(P.Abs) def get_bprop_abs(self): """Grad definition for `Abs` operation.""" diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 727ddaf88f0..846be05c4d7 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -39,7 +39,7 @@ from .control_ops import ControlDepend, GeSwitch, Merge from .inner_ops import ScalarCast from .math_ops import (Abs, ACos, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul, ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd, - Cos, Div, Equal, EqualCount, Exp, Floor, FloorDiv, + Cos, Div, Equal, EqualCount, Exp, Floor, FloorDiv, FloorMod, Acosh, Greater, GreaterEqual, Less, LessEqual, Log, LogicalAnd, LogicalNot, LogicalOr, MatMul, Maximum, Minimum, Mul, Neg, NMSWithMask, NotEqual, @@ -205,6 +205,8 @@ __all__ = [ 'Log', 'SigmoidCrossEntropyWithLogits', 'FloorDiv', + 'FloorMod', + 'Acosh', "PReLU", "Cos", "ACos", diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 398a7e6f1a9..9600c736b2a 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1183,6 +1183,62 @@ class Floor(PrimitiveWithInfer): return x_dtype +class FloorMod(_MathBinaryOp): + """ + Compute element-wise remainder of division. + + The inputs must be two tensors or one tensor and one scalar. + When the inputs are two tensors, the shapes of them could be broadcast, + and the data types of them should be same. + When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant, + and the type of the scalar is the same as the data type of the tensor. + + Inputs: + - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number. + - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or + a number. + + Outputs: + Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'. + + Examples: + >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32) + >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32) + >>> floor_mod = FloorMod() + >>> floor_mod(input_x, input_y) + [2, 1, 2] + """ + + +class Acosh(PrimitiveWithInfer): + """ + Compute inverse hyperbolic cosine of x element-wise. + + Inputs: + - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. + + Outputs: + Tensor, has the same shape as `input_x`. + + Examples: + >>> acosh = Acosh() + >>> X = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), ms.float32) + >>> output = acosh(X) + """ + + @prim_attr_register + def __init__(self): + """init Acosh""" + + def infer_shape(self, x): + return x + + def infer_dtype(self, x): + validator.check_subclass("x_dtype", x, mstype.tensor) + validator.check_typename('x_dtype', x, mstype.number_type) + return x + + class _LogicBinaryOp(_BinaryOp): """ Define logic binary operators. diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 0410d4a3462..e0f8280514c 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -2395,4 +2395,4 @@ class ApplyFtrl(PrimitiveWithInfer): validator.check_typename("l1", l1_type,[mstype.float16, mstype.float32]) validator.check_typename("l2", l2_type,[mstype.float16, mstype.float32]) validator.check_typename("lr_power", lr_power_type,[mstype.float16, mstype.float32]) - return var_type \ No newline at end of file + return var_type diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 453ef9a652f..0f5b716e390 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -219,6 +219,10 @@ test_case_math_ops = [ 'block': P.ACos(), 'desc_inputs': [[2, 3]], 'desc_bprop': [[2, 3]]}), + ('Acosh', { + 'block': P.Acosh(), + 'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16))], + 'skip': ['backward']}), ('Sin', { 'block': P.Sin(), 'desc_inputs': [[2, 3]], @@ -301,6 +305,11 @@ test_case_math_ops = [ 'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16)), Tensor(np.random.rand(4).astype(np.float16))], 'skip': ['backward']}), + ('FloorMod', { + 'block': P.FloorMod(), + 'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16)), + Tensor(np.random.rand(4).astype(np.float16))], + 'skip': ['backward']}), ('identity', { 'block': ops.functional.identity, 'desc_inputs': [[2, 2]],