fix bugs and add register info

This commit is contained in:
lihongkang 2020-09-04 14:37:43 +08:00
parent bc4c5afc1a
commit 8b9c6a3527
5 changed files with 116 additions and 7 deletions

View File

@ -304,3 +304,5 @@ from .max_pool_grad_grad_with_argmax import _max_pool_grad_grad_with_argmax_tbe
from .tensor_move import _tensor_move_tbe
from .population_count import _population_count_tbe
from .parallel_concat import _parallel_concat_tbe
from .adam_apply_one_assign import _adam_apply_one_assign_tbe
from .adam_apply_one_with_decay_assign import _adam_apply_one_with_decay_assign_tbe

View File

@ -0,0 +1,53 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""AdamApplyOneAssign op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
adam_apply_one_assign_op_info = TBERegOp("AdamApplyOneAssign") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("adam_apply_one_assign.so") \
.compute_cost(10) \
.kernel_name("adam_apply_one_assign") \
.partial_flag(True) \
.input(0, "input0", False, "required", "all") \
.input(1, "input1", False, "required", "all") \
.input(2, "input2", False, "required", "all") \
.input(3, "input3", False, "required", "all") \
.input(4, "input4", False, "required", "all") \
.input(5, "mul0_x", False, "required", "all") \
.input(6, "mul1_x", False, "required", "all") \
.input(7, "mul2_x", False, "required", "all") \
.input(8, "mul3_x", False, "required", "all") \
.input(9, "add2_y", False, "required", "all") \
.output(0, "output0", True, "required", "all") \
.output(1, "output1", True, "required", "all") \
.output(2, "output2", True, "required", "all") \
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default,
DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default,
DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default,
DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default) \
.get_op_info()
@op_info_register(adam_apply_one_assign_op_info)
def _adam_apply_one_assign_tbe():
"""AdamApplyOneAssign TBE register"""
return

View File

@ -0,0 +1,54 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""AdamApplyOneWithDecayAssign op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
adam_apply_one_with_decay_assign_op_info = TBERegOp("AdamApplyOneWithDecayAssign") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("adam_apply_one_with_decay_assign.so") \
.compute_cost(10) \
.kernel_name("adam_apply_one_with_decay_assign") \
.partial_flag(True) \
.input(0, "input0", False, "required", "all") \
.input(1, "input1", False, "required", "all") \
.input(2, "input2", False, "required", "all") \
.input(3, "input3", False, "required", "all") \
.input(4, "input4", False, "required", "all") \
.input(5, "mul0_x", False, "required", "all") \
.input(6, "mul1_x", False, "required", "all") \
.input(7, "mul2_x", False, "required", "all") \
.input(8, "mul3_x", False, "required", "all") \
.input(9, "mul4_x", False, "required", "all") \
.input(10, "add2_y", False, "required", "all") \
.output(0, "output0", True, "required", "all") \
.output(1, "output1", True, "required", "all") \
.output(2, "output2", True, "required", "all") \
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default,
DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default,
DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default,
DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(adam_apply_one_with_decay_assign_op_info)
def _adam_apply_one_with_decay_assign_tbe():
"""AdamApplyOneWithDecayAssign TBE register"""
return

View File

@ -2257,10 +2257,10 @@ class Diag(PrimitiveWithInfer):
:math:`output[i_1,..., i_k, i_1,..., i_k] = input_x[i_1,..., i_k]` and 0 everywhere else.
Inputs:
- **input_x** (Tensor) - The input tensor.
- **input_x** (Tensor) - The input tensor. The input shape should be less than 5d.
Outputs:
Tensor.
Tensor, has the same dtype as the 'input_x'.
Examples:
>>> input_x = Tensor([1, 2, 3, 4])

View File

@ -1001,14 +1001,14 @@ class DepthwiseConv2dNative(PrimitiveWithInfer):
:math:`\text{in_channels} * \text{channel_multiplier}` channels.
Args:
channel_multiplier (int): The multipiler for the original output conv.
channel_multiplier (int): The multipiler for the original output conv. Its value must be greater than 0.
kernel_size (Union[int, tuple[int]]): The size of the conv kernel.
mode (int): 0 Math convolution, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 3.
pad_mode (str): "valid", "same", "pad" the mode to fill padding. Default: "valid".
pad (Union[int, tuple[int]]): The pad value to fill. Default: 0. If `pad` is one integer, the padding of
pad (Union[int, tuple[int]]): The pad value to fill. If `pad` is one integer, the padding of
top, bottom, left and right is same, equal to pad. If `pad` is tuple with four integer, the padding
of top, bottom, left and right equal to pad[0], pad[1], pad[2], pad[3] with corresponding.
of top, bottom, left and right equal to pad[0], pad[1], pad[2], pad[3] with corresponding. Default: 0.
stride (Union[int, tuple[int]]): The stride to apply conv filter. Default: 1.
dilation (Union[int, tuple[int]]): Specifies the dilation rate to use for dilated convolution. Default: 1.
group (int): Splits input into groups. Default: 1.
@ -1026,7 +1026,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer):
>>> weight = Tensor(np.ones([1, 32, 3, 3]), mindspore.float32)
>>> depthwise_conv2d = P.DepthwiseConv2dNative(channel_multiplier = 3, kernel_size = (3, 3))
>>> output = depthwise_conv2d(input, weight)
>>> assert output.shape == (10, 96, 30, 30)
>>> output.shape == (10, 96, 30, 30)
"""
@prim_attr_register
@ -1850,7 +1850,7 @@ class DataFormatDimMap(PrimitiveWithInfer):
Inputs:
- **input_x** (Tensor) - A Tensor with each element as a dimension index in source data format.
Must be in the range [-4, 4). It's type is int32.
The suggested values is in the range [-4, 4). It's type is int32.
Outputs:
Tensor, has the same type as the `input_x`.