!214 modify GPU operator information registration

Merge pull request !214 from Maoweiyong/modify-gpu-op-register
This commit is contained in:
mindspore-ci-bot 2020-04-14 20:56:46 +08:00 committed by Gitee
commit fb19655ea6
16 changed files with 225 additions and 544 deletions

View File

@ -30,7 +30,7 @@ Note:
from .primitive import Primitive, PrimitiveWithInfer, prim_attr_register from .primitive import Primitive, PrimitiveWithInfer, prim_attr_register
from .vm_impl_registry import get_vm_impl_fn, vm_impl_registry from .vm_impl_registry import get_vm_impl_fn, vm_impl_registry
from .op_info_register import op_info_register, AiCPURegOp, TBERegOp, DataType from .op_info_register import op_info_register, AkgRegOp, AiCPURegOp, TBERegOp, DataType
from .primitive import constexpr from .primitive import constexpr
from .._c_expression import signature_rw, signature_kind from .._c_expression import signature_rw, signature_kind
@ -40,6 +40,6 @@ __primitive__ = [
] ]
__all__ = ["get_vm_impl_fn", "vm_impl_registry", __all__ = ["get_vm_impl_fn", "vm_impl_registry",
"op_info_register", "AiCPURegOp", "TBERegOp", "DataType", "op_info_register", "AkgRegOp", "AiCPURegOp", "TBERegOp", "DataType",
"constexpr"] "constexpr"]
__all__.extend(__primitive__) __all__.extend(__primitive__)

View File

@ -13,45 +13,19 @@
# limitations under the License. # limitations under the License.
"""Cast op""" """Cast op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ cast_op_info = AkgRegOp("Cast") \
"op_name": "Cast", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "x") \
"fusion_type": "OPAQUE", .output(0, "output") \
"processor": "cuda", .attr("dst_type", "required", "str") \
"attr": [ .dtype_format(DataType.F16_Default, DataType.F32_Default) \
{ .dtype_format(DataType.F32_Default, DataType.F16_Default) \
"name": "dst_type", .get_op_info()
"param_type": "required",
"type": "str"
} @op_info_register(cast_op_info)
],
"inputs": [
{
"index": 0,
"dtype": [
"float16", "float32"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "x"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _cast_akg(): def _cast_akg():
"""Cast AutoDiff register""" """Cast AutoDiff register"""
return return

View File

@ -13,50 +13,19 @@
# limitations under the License. # limitations under the License.
"""Equal op""" """Equal op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ equal_op_info = AkgRegOp("Equal") \
"op_name": "Equal", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "x") \
"fusion_type": "OPAQUE", .input(1, "y") \
"processor": "cuda", .output(0, "output") \
"attr": [ .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.BOOL_Default) \
], .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.BOOL_Default) \
"inputs": [ .get_op_info()
{
"index": 0,
"dtype": [ @op_info_register(equal_op_info)
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "x"
},
{
"index": 1,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "y"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"bool", "bool"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _equal_akg(): def _equal_akg():
"""Equal AutoDiff register""" """Equal AutoDiff register"""
return return

View File

@ -13,40 +13,18 @@
# limitations under the License. # limitations under the License.
"""HSigmoid op""" """HSigmoid op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ hsigmoid_op_info = AkgRegOp("HSigmoid") \
"op_name": "HSigmoid", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "x") \
"fusion_type": "OPAQUE", .output(0, "output") \
"processor": "cuda", .dtype_format(DataType.F32_Default, DataType.F32_Default) \
"attr": [ .dtype_format(DataType.F16_Default, DataType.F16_Default) \
], .get_op_info()
"inputs": [
{
"index": 0, @op_info_register(hsigmoidgrad_op_info)
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "x"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _hsigmoid_akg(): def _hsigmoid_akg():
"""HSigmoid AutoDiff register""" """HSigmoid AutoDiff register"""
return return

View File

@ -13,50 +13,19 @@
# limitations under the License. # limitations under the License.
"""HSigmoidGrad op""" """HSigmoidGrad op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ hsigmoidgrad_op_info = AkgRegOp("HSigmoidGrad") \
"op_name": "HSigmoidGrad", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "y_grad") \
"fusion_type": "OPAQUE", .input(1, "x") \
"processor": "cuda", .output(0, "output") \
"attr": [ .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
], .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
"inputs": [ .get_op_info()
{
"index": 0,
"dtype": [ @op_info_register(hsigmoidgrad_op_info)
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "y_grad"
},
{
"index": 1,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "x"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _hsigmoid_grad_akg(): def _hsigmoid_grad_akg():
"""HSigmoidGrad AutoDiff register""" """HSigmoidGrad AutoDiff register"""
return return

View File

@ -13,40 +13,18 @@
# limitations under the License. # limitations under the License.
"""HSwish op""" """HSwish op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ hswish_op_info = AkgRegOp("HSwish") \
"op_name": "HSwish", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "x") \
"fusion_type": "OPAQUE", .output(0, "output") \
"processor": "cuda", .dtype_format(DataType.F32_Default, DataType.F32_Default) \
"attr": [ .dtype_format(DataType.F16_Default, DataType.F16_Default) \
], .get_op_info()
"inputs": [
{
"index": 0, @op_info_register(hsigmoidgrad_op_info)
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "x"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _hswish_akg(): def _hswish_akg():
"""HSwish AutoDiff register""" """HSwish AutoDiff register"""
return return

View File

@ -13,50 +13,19 @@
# limitations under the License. # limitations under the License.
"""HSwishGrad op""" """HSwishGrad op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ hswishgrad_op_info = AkgRegOp("HSwishGrad") \
"op_name": "HSwishGrad", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "y_grad") \
"fusion_type": "OPAQUE", .input(1, "x") \
"processor": "cuda", .output(0, "output") \
"attr": [ .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
], .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
"inputs": [ .get_op_info()
{
"index": 0,
"dtype": [ @op_info_register(hsigmoidgrad_op_info)
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "y_grad"
},
{
"index": 1,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "x"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _hswish_grad_akg(): def _hswish_grad_akg():
"""HSwishGrad AutoDiff register""" """HSwishGrad AutoDiff register"""
return return

View File

@ -13,40 +13,18 @@
# limitations under the License. # limitations under the License.
"""SimpleMean op""" """SimpleMean op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ mean_op_info = AkgRegOp("SimpleMean") \
"op_name": "SimpleMean", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "x") \
"fusion_type": "OPAQUE", .output(0, "output") \
"processor": "cuda", .dtype_format(DataType.F16_Default, DataType.F16_Default) \
"attr": [ .dtype_format(DataType.F32_Default, DataType.F32_Default) \
], .get_op_info()
"inputs": [
{
"index": 0, @op_info_register(mean_op_info)
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "x"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _simple_mean_akg(): def _simple_mean_akg():
"""SimpleMean AutoDiff register""" """SimpleMean AutoDiff register"""
return return

View File

@ -13,45 +13,19 @@
# limitations under the License. # limitations under the License.
"""SimpleMeanGrad op""" """SimpleMeanGrad op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ mean_grad_op_info = AkgRegOp("SimpleMeanGrad") \
"op_name": "SimpleMeanGrad", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "HEAD") \
"fusion_type": "OPAQUE", .output(0, "output") \
"processor": "cuda", .attr("input_shape", "required", "listInt") \
"attr": [ .dtype_format(DataType.F16_Default, DataType.F16_Default) \
{ .dtype_format(DataType.F32_Default, DataType.F32_Default) \
"name": "input_shape", .get_op_info()
"param_type": "required",
"type": "listInt"
} @op_info_register(mean_grad_op_info)
],
"inputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "HEAD"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _simple_mean_grad_akg(): def _simple_mean_grad_akg():
"""SimpleMeanGrad AutoDiff register""" """SimpleMeanGrad AutoDiff register"""
return return

View File

@ -13,50 +13,19 @@
# limitations under the License. # limitations under the License.
"""Mul op""" """Mul op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ mul_op_info = AkgRegOp("Mul") \
"op_name": "Mul", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "x") \
"fusion_type": "OPAQUE", .input(1, "y") \
"processor": "cuda", .output(0, "output") \
"attr": [ .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
], .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
"inputs": [ .get_op_info()
{
"index": 0,
"dtype": [ @op_info_register(mul_op_info)
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "x"
},
{
"index": 1,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "y"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _mul_akg(): def _mul_akg():
"""Mul AutoDiff register""" """Mul AutoDiff register"""
return return

View File

@ -13,40 +13,18 @@
# limitations under the License. # limitations under the License.
"""ReLU6 op""" """ReLU6 op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ relu_op_info = AkgRegOp("ReLU6") \
"op_name": "ReLU6", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "x") \
"fusion_type": "OPAQUE", .output(0, "output") \
"processor": "cuda", .dtype_format(DataType.F16_Default, DataType.F16_Default) \
"attr": [ .dtype_format(DataType.F32_Default, DataType.F32_Default) \
], .get_op_info()
"inputs": [
{
"index": 0, @op_info_register(relu_op_info)
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "x"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _relu6_akg(): def _relu6_akg():
"""ReLU6 AutoDiff register""" """ReLU6 AutoDiff register"""
return return

View File

@ -13,50 +13,19 @@
# limitations under the License. # limitations under the License.
"""ReLU6Grad op""" """ReLU6Grad op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ relu_grad_op_info = AkgRegOp("ReLU6Grad") \
"op_name": "ReLU6Grad", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "y_grad") \
"fusion_type": "OPAQUE", .input(1, "x") \
"processor": "cuda", .output(0, "output") \
"attr": [ .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
], .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
"inputs": [ .get_op_info()
{
"index": 0,
"dtype": [ @op_info_register(relu_grad_op_info)
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "y_grad"
},
{
"index": 1,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "x"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _relu6_grad_akg(): def _relu6_grad_akg():
"""ReLU6Grad AutoDiff register""" """ReLU6Grad AutoDiff register"""
return return

View File

@ -13,45 +13,19 @@
# limitations under the License. # limitations under the License.
"""Squeeze op""" """Squeeze op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ squeeze_op_info = AkgRegOp("SqueezeGrad") \
"op_name": "Squeeze", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "x") \
"fusion_type": "OPAQUE", .output(0, "output") \
"processor": "cuda", .attr("axis", "optional", "listInt") \
"attr": [ .dtype_format(DataType.F16_Default, DataType.F16_Default) \
{ .dtype_format(DataType.F32_Default, DataType.F32_Default) \
"name": "axis", .get_op_info()
"param_type": "optional",
"type": "listInt"
} @op_info_register(squeeze_op_info)
],
"inputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "x"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _squeeze_akg(): def _squeeze_akg():
"""Squeeze AutoDiff register""" """Squeeze AutoDiff register"""
return return

View File

@ -13,50 +13,20 @@
# limitations under the License. # limitations under the License.
"""SqueezeGrad op""" """SqueezeGrad op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ squeeze_grad_op_info = AkgRegOp("SqueezeGrad") \
"op_name": "SqueezeGrad", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "y_grad") \
"fusion_type": "OPAQUE", .output(0, "output") \
"processor": "cuda", .attr("x_shape", "required", "listInt") \
"attr": [ .attr("axis", "optional", "listInt") \
{ .dtype_format(DataType.F16_Default, DataType.F16_Default) \
"name": "x_shape", .dtype_format(DataType.F32_Default, DataType.F32_Default) \
"param_type": "required", .get_op_info()
"type": "listInt"
},
{ @op_info_register(squeeze_grad_op_info)
"name": "axis",
"param_type": "optional",
"type": "listInt"
}
],
"inputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "y_grad"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _squeeze_grad_akg(): def _squeeze_grad_akg():
"""SqueezeGrad AutoDiff register""" """SqueezeGrad AutoDiff register"""
return return

View File

@ -13,45 +13,19 @@
# limitations under the License. # limitations under the License.
"""Tile op""" """Tile op"""
from mindspore.ops.op_info_register import op_info_register from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType
@op_info_register("""{ tile_op_info = AkgRegOp("Tile") \
"op_name": "Tile", .fusion_type("OPAQUE") \
"imply_type": "AutoDiff", .input(0, "x") \
"fusion_type": "OPAQUE", .output(0, "output") \
"processor": "cuda", .attr("multiples", "required", "listInt") \
"attr": [ .dtype_format(DataType.F16_Default, DataType.F16_Default) \
{ .dtype_format(DataType.F32_Default, DataType.F32_Default) \
"name": "multiples", .get_op_info()
"param_type": "required",
"type": "listInt"
} @op_info_register(tile_op_info)
],
"inputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "x"
}
],
"outputs": [
{
"index": 0,
"dtype": [
"float32", "float16"
],
"format": [
"DefaultFormat", "DefaultFormat"
],
"name": "output"
}
]
}""")
def _tile_akg(): def _tile_akg():
"""Tile AutoDiff register""" """Tile AutoDiff register"""
return return

View File

@ -205,6 +205,64 @@ class RegOp():
return op_info return op_info
class AkgRegOp(RegOp):
"""Class for Akg op info register"""
def __init__(self, op_name):
super(AkgRegOp, self).__init__(op_name)
self.imply_type = "AutoDiff"
self.processor = "cuda"
def input(self, index=None, name=None, **kwargs):
"""
Register Akg op input information.
Args:
index (int): Order of the input. Default: None.
name (str): Name of the input. Default: None.
kwargs (dict): Other information for the input.
"""
param_list = [index, name]
key_list = ["index", "name"]
fn_list = [self._is_int, self._is_string]
input_dict = self._check_param(param_list, key_list, fn_list, kwargs)
self.inputs.append(input_dict)
return self
def output(self, index=None, name=None, **kwargs):
"""
Register Akg op output information.
Args:
index (int): Order of the output. Default: None.
name (str): Name of the output. Default: None.
kwargs (dict): Other information for the output.
"""
param_list = [index, name]
key_list = ["index", "name"]
fn_list = [self._is_int, self._is_string]
output_dict = self._check_param(param_list, key_list, fn_list, kwargs)
self.outputs.append(output_dict)
return self
def attr(self, name=None, param_type=None, value_type=None, **kwargs):
"""
Register Akg op attribute information.
Args:
name (str): Name of the attribute. Default: None.
param_type (str): Param type of the attribute. Default: None.
value_type (str): Value type of the attribute. Default: None.
kwargs (dict): Other information for the attribute.
"""
param_list = [name, param_type, value_type]
key_list = ["name", "param_type", "type"]
fn_list = [self._is_string]
attr_dict = self._check_param(param_list, key_list, fn_list, kwargs)
self.attr_.append(attr_dict)
return self
class AiCPURegOp(RegOp): class AiCPURegOp(RegOp):
"""Class for AiCPU op info register""" """Class for AiCPU op info register"""