forked from mindspore-Ecosystem/mindspore
vm for mod
This commit is contained in:
parent
ea475637a1
commit
ffd0352162
|
@ -306,6 +306,18 @@ def get_bprop_floormod(self):
|
|||
return bprop
|
||||
|
||||
|
||||
@bprop_getters.register(P.Mod)
|
||||
def get_bprop_mod(self):
|
||||
"""Grad definition for `Mod` operation."""
|
||||
|
||||
def bprop(x, y, out, dout):
|
||||
bc_x = dout
|
||||
bc_y = -dout * (x // y)
|
||||
return binop_grad_common(x, y, bc_x, bc_y)
|
||||
|
||||
return bprop
|
||||
|
||||
|
||||
@bprop_getters.register(P.Square)
|
||||
def get_bprop_square(self):
|
||||
"""Grad definition for `Square` operation."""
|
||||
|
|
|
@ -276,3 +276,4 @@ from .lrn_grad import _lrn_grad_tbe
|
|||
from .scatter_max import _scatter_max_tbe
|
||||
from .scatter_min import _scatter_min_tbe
|
||||
from .scatter_sub import _scatter_sub_tbe
|
||||
from .mod import _mod_tbe
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Mod op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
|
||||
|
||||
mod_op_info = TBERegOp("Mod") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.async_flag(False) \
|
||||
.binfile_name("mod.so") \
|
||||
.compute_cost(10) \
|
||||
.kernel_name("mod") \
|
||||
.partial_flag(True) \
|
||||
.input(0, "x1", False, "required", "all") \
|
||||
.input(1, "x2", False, "required", "all") \
|
||||
.output(0, "y", False, "required", "all") \
|
||||
.dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \
|
||||
.dtype_format(DataType.I8_5HD, DataType.I8_5HD, DataType.I8_5HD) \
|
||||
.dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \
|
||||
.dtype_format(DataType.U8_5HD, DataType.U8_5HD, DataType.U8_5HD) \
|
||||
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
|
||||
.dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \
|
||||
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
|
||||
.dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \
|
||||
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
|
||||
.dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(mod_op_info)
|
||||
def _mod_tbe():
|
||||
"""Mod TBE register"""
|
||||
return
|
|
@ -45,7 +45,7 @@ from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AccumulateNV2, AssignAdd, A
|
|||
BitwiseXor, Inv, Invert, ApproximateEqual, InplaceAdd, InplaceSub,
|
||||
ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd,
|
||||
Cos, Div, DivNoNan, Equal, EqualCount, Exp, Expm1, Erf, Erfc, Floor, FloorDiv, FloorMod, Ceil,
|
||||
Acosh, Greater, GreaterEqual, Less, LessEqual, Log, Log1p, LogicalAnd,
|
||||
Acosh, Greater, GreaterEqual, Less, LessEqual, Log, Log1p, LogicalAnd, Mod,
|
||||
LogicalNot, LogicalOr, MatMul, Maximum,
|
||||
Minimum, Mul, Neg, NMSWithMask, NotEqual,
|
||||
NPUAllocFloatStatus, NPUClearFloatStatus,
|
||||
|
@ -322,7 +322,8 @@ __all__ = [
|
|||
"ApproximateEqual",
|
||||
"InplaceUpdate",
|
||||
"InTopK",
|
||||
"LRN"
|
||||
"LRN",
|
||||
"Mod"
|
||||
]
|
||||
|
||||
__all__.sort()
|
||||
|
|
|
@ -1361,7 +1361,7 @@ class HistogramFixedWidth(PrimitiveWithInfer):
|
|||
Inputs:
|
||||
- **x** (Tensor) - Numeric Tensor. Must be one of the following types: int32, float32, float16.
|
||||
- **range** (Tensor) - Must have the same type as x. Shape [2] Tensor of same dtype as x.
|
||||
x <= range[0] will be mapped to hist[0], x >= range[1] will be mapped to hist[-1].
|
||||
x <= range[0] will be mapped to hist[0], x >= range[1] will be mapped to hist[-1].
|
||||
|
||||
Outputs:
|
||||
Tensor, the type is int32.
|
||||
|
@ -1645,8 +1645,9 @@ class Div(_MathBinaryOp):
|
|||
Inputs:
|
||||
- **input_x** (Union[Tensor, Number, bool]) - The first input is a number or
|
||||
a bool or a tensor whose data type is number or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - The second input is a number or
|
||||
a bool when the first input is a tensor or a tensor whose data type is number or bool.
|
||||
- **input_y** (Union[Tensor, Number, bool]) - When the first input is a tensor, The second input
|
||||
could be a number or a bool, or a tensor whose data type is number or bool. When the first input
|
||||
is a number or a bool, the second input should be a tensor whose data type is number or bool.
|
||||
|
||||
Outputs:
|
||||
Tensor, the shape is same as the shape after broadcasting,
|
||||
|
@ -1742,6 +1743,42 @@ class FloorDiv(_MathBinaryOp):
|
|||
"""
|
||||
|
||||
|
||||
class Mod(_MathBinaryOp):
|
||||
"""
|
||||
Computes the remainder of dividing the first input tensor by the second input tensor element-wise.
|
||||
|
||||
The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors,
|
||||
both dtypes cannot be bool, and the shapes of them could be broadcast. When the inputs are one tensor
|
||||
and one scalar, the scalar only could be a constant.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Union[Tensor, Number]) - The first input is a number or a tensor whose data type is number.
|
||||
- **input_y** (Union[Tensor, Number]) - When the first input is a tensor, The second input
|
||||
could be a number or a tensor whose data type is number. When the first input is a number,
|
||||
the second input should be a tensor whose data type is number.
|
||||
|
||||
Outputs:
|
||||
Tensor, the shape is same as the shape after broadcasting,
|
||||
and the data type is the one with high precision or high digits among the two inputs.
|
||||
|
||||
Raises:
|
||||
ValueError: When `input_x` and `input_y` are not the same dtype.
|
||||
|
||||
Examples:
|
||||
>>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
|
||||
>>> mod = P.Mod()
|
||||
>>> mod(input_x, input_y)
|
||||
"""
|
||||
|
||||
def infer_value(self, x, y):
|
||||
if x is not None and y is not None:
|
||||
x = x.asnumpy()
|
||||
y = y.asnumpy()
|
||||
return Tensor(np.fmod(x, y))
|
||||
return None
|
||||
|
||||
|
||||
class Floor(PrimitiveWithInfer):
|
||||
"""
|
||||
Round a tensor down to the closest integer element-wise.
|
||||
|
|
|
@ -1669,7 +1669,7 @@ class DataFormatDimMap(PrimitiveWithInfer):
|
|||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - A Tensor with each element as a dimension index in source data format.
|
||||
Must be in the range [-4, 4). It's type is int32.
|
||||
Must be in the range [-4, 4). It's type is int32.
|
||||
|
||||
Outputs:
|
||||
Tensor, has the same type as the `input_x`.
|
||||
|
|
|
@ -996,6 +996,10 @@ test_case_math_ops = [
|
|||
'block': NormalNet((3, 2, 4), 0.0, 1.0, 0),
|
||||
'desc_inputs': [],
|
||||
'skip': ['backward']}),
|
||||
('Mod', {
|
||||
'block': P.Mod(),
|
||||
'desc_inputs': [[3, 4, 5], [2, 3, 4, 5]],
|
||||
'desc_bprop': [[2, 3, 4, 5]]}),
|
||||
]
|
||||
|
||||
test_case_nn_ops = [
|
||||
|
|
Loading…
Reference in New Issue