!1289 add op register info for sin and cumsum

Merge pull request !1289 from GongLiyao/add_vm_support_sin_cumsum
This commit is contained in:
mindspore-ci-bot 2020-05-21 20:25:53 +08:00 committed by Gitee
commit 817b0e4a59
9 changed files with 131 additions and 12 deletions

View File

@ -91,7 +91,8 @@ static std::map<string, string> tbe_func_adapter_map = {
{"s_gd", "sgd"},
{"l_ars_update", "lars_v2_update"},
{"n_ms_with_mask", "nms_with_mask"},
{"square_sum_all", "square_sum_all"}};
{"square_sum_all", "square_sum_all"},
{"cum_sum", "cumsum_d"}};
void TbeAdapter::NormalizeFuncName(std::string *func_name) {
if (func_name == nullptr) {

View File

@ -166,6 +166,7 @@ const PrimitivePtr kPrimSquare = std::make_shared<Primitive>("Square");
const PrimitivePtr kPrimEqual = std::make_shared<Primitive>("Equal");
const PrimitivePtr kPrimLess = std::make_shared<Primitive>("Less");
const PrimitivePtr kPrimLessEqual = std::make_shared<Primitive>("LessEqual");
const PrimitivePtr kPrimCumSum = std::make_shared<Primitive>("CumSum");
// NN
const PrimitivePtr kPrimFlatten = std::make_shared<Primitive>("Flatten");

View File

@ -172,6 +172,7 @@ extern const PrimitivePtr kPrimSquare;
extern const PrimitivePtr kPrimEqual;
extern const PrimitivePtr kPrimLess;
extern const PrimitivePtr kPrimLessEqual;
extern const PrimitivePtr kPrimCumSum;
// NN
extern const PrimitivePtr kPrimFlatten;

View File

@ -40,6 +40,7 @@ ConstInputToAttrInfoRegistry::ConstInputToAttrInfoRegistry() {
Register(prim::kPrimUnsortedSegmentSum->name(), {2});
Register(prim::kPrimOneHot->name(), {1});
Register(prim::kPrimConcat->name(), {0});
Register(prim::kPrimCumSum->name(), {1});
Register(kUnsortedSegmentProdOpName, {2});
Register(kUnsortedSegmentMinOpName, {2});
Register(kSimpleMeanGradOpName, {1});
@ -60,7 +61,6 @@ ConstInputToAttrInfoRegistry::ConstInputToAttrInfoRegistry() {
Register(kResizeNearestNeighborV2OpName, {1});
Register(kResizeNearestNeighborV2GradOpName, {1});
Register(kApplyRMSPropOpname, {4, 5, 6});
Register(kCumsumOpName, {1});
Register(kResizeBilinearV2OpName, {1});
Register(kReduceProdOpName, {1});
Register(kCumprodOpName, {1});

View File

@ -191,3 +191,6 @@ from .prelu import _prelu_tbe
from .prelu_grad import _prelu_grad_tbe
from .binary_cross_entropy import _binary_cross_entropy_tbe
from .binary_cross_entropy_grad import _binary_cross_entropy_grad_tbe
from .sin import _sin_tbe
from .cos import _cos_tbe
from .cum_sum import _cum_sum_tbe

View File

@ -0,0 +1,37 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Cos op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
cos_op_info = TBERegOp("Cos") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("cos.so") \
.compute_cost(10) \
.kernel_name("cos") \
.partial_flag(True) \
.op_pattern("formatAgnostic") \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_5HD, DataType.F16_5HD) \
.dtype_format(DataType.F32_5HD, DataType.F32_5HD) \
.get_op_info()
@op_info_register(cos_op_info)
def _cos_tbe():
"""Cos TBE register"""
return

View File

@ -0,0 +1,42 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""CumSum op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
cum_sum_op_info = TBERegOp("CumSum") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("cumsum_d.so") \
.compute_cost(10) \
.kernel_name("cumsum_d") \
.partial_flag(True) \
.attr("axis", "optional", "int", "all", "0") \
.attr("exclusive", "optional", "bool", "true,false", "fales") \
.attr("reverse", "optional", "bool", "true,false", "false") \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.I8_Default, DataType.I8_Default) \
.dtype_format(DataType.U8_Default, DataType.U8_Default) \
.get_op_info()
@op_info_register(cum_sum_op_info)
def _cum_sum_tbe():
"""CumSum TBE register"""
return

View File

@ -0,0 +1,37 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sin op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
sin_op_info = TBERegOp("Sin") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("sin.so") \
.compute_cost(10) \
.kernel_name("sin") \
.partial_flag(True) \
.op_pattern("formatAgnostic") \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_5HD, DataType.F16_5HD) \
.dtype_format(DataType.F32_5HD, DataType.F32_5HD) \
.get_op_info()
@op_info_register(sin_op_info)
def _sin_tbe():
"""Sin TBE register"""
return

View File

@ -504,10 +504,9 @@ test_case_math_ops = [
'desc_inputs': [[4]],
'desc_bprop': [[4]]}),
('CumSum', {
'block': P.CumSum(),
'desc_const': [0],
'desc_inputs': [Tensor(np.array([[3, 4], [1, 6]]).astype(np.float16))],
'desc_bprop': [Tensor(np.array([[3, 4], [4, 10]]).astype(np.float16))]}),
'block': CumSumNet(),
'desc_inputs': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))],
'desc_bprop': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))]}),
('ReduceSum_3', {
'block': P.ReduceSum(),
'desc_const': [0],
@ -579,6 +578,10 @@ test_case_math_ops = [
'desc_inputs': [Tensor(np.array([0, 1, 4, 5]).astype(np.float32)),
Tensor(np.array([1, 1, 3, 7]).astype(np.float32))],
'skip': ['backward']}),
('Cos', {
'block': P.Cos(),
'desc_inputs': [[2, 3]],
'desc_bprop': [[2, 3]]}),
]
test_case_nn_ops = [
@ -885,12 +888,6 @@ test_case_nn_ops = [
'desc_inputs': [Tensor(np.array([[128, 32, 32, 64], [128, 32, 32, 64]]).astype(np.float16))],
'desc_bprop': [Tensor(np.array([[128, 32, 32, 64], [128, 32, 32, 64]]).astype(np.float16))],
'skip': ['backward']}),
('CumSumNet', {
'block': CumSumNet(),
'desc_const': [0],
'desc_inputs': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float16))],
'desc_bprop': [
Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float16))]}),
('OneHot', {
'block': P.OneHot(),
'desc_const': [3, Tensor(1.0, mstype.float32), Tensor(0.0, mstype.float32)],