!19494 add ResizeBilinear ops for aicpu

Merge pull request !19494 from yanzhenxiang2020/add_resize_bilinear_aicpu
This commit is contained in:
i-robot 2021-07-10 08:46:27 +00:00 committed by Gitee
commit cb555f4b6e
5 changed files with 140 additions and 2 deletions

View File

@ -62,8 +62,10 @@ constexpr auto kMaskedSelect = "MaskedSelect";
constexpr auto kMaskedSelectGrad = "MaskedSelectGrad"; constexpr auto kMaskedSelectGrad = "MaskedSelectGrad";
constexpr auto kDynamicStitch = "DynamicStitch"; constexpr auto kDynamicStitch = "DynamicStitch";
constexpr auto kSearchSorted = "SearchSorted"; constexpr auto kSearchSorted = "SearchSorted";
constexpr auto kResizeBilinear = "ResizeBilinear";
constexpr auto kResizeBilinearGrad = "ResizeBilinearGrad";
const std::set<std::string> kCustAiCpuKernelOps{kIdentity, kMaskedSelect, kMaskedSelectGrad, kDynamicStitch, const std::set<std::string> kCustAiCpuKernelOps{kIdentity, kMaskedSelect, kMaskedSelectGrad, kDynamicStitch,
kSearchSorted}; kSearchSorted, kResizeBilinear, kResizeBilinearGrad};
const std::set<std::string> kCacheKernelOps{kUpdateCache, kCacheSwapTable, kSubAndFilter, const std::set<std::string> kCacheKernelOps{kUpdateCache, kCacheSwapTable, kSubAndFilter,
kPadAndShift, kDropout3D, kDropout2D}; kPadAndShift, kDropout3D, kDropout2D};

View File

@ -77,3 +77,5 @@ from .stack_push_pop import _stack_push_aicpu
from .stack_push_pop import _stack_pop_aicpu from .stack_push_pop import _stack_pop_aicpu
from .stack_push_pop import _stack_destroy_aicpu from .stack_push_pop import _stack_destroy_aicpu
from .ctc_greedy_decoder import _ctc_greedy_decoder_aicpu from .ctc_greedy_decoder import _ctc_greedy_decoder_aicpu
from .resize_bilinear import _resize_bilinear_aicpu
from .resize_bilinear_grad import _resize_bilinear_grad_aicpu

View File

@ -0,0 +1,32 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ResizeBilinear op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
resize_bilinear_op_info = AiCPURegOp("ResizeBilinear") \
.fusion_type("OPAQUE") \
.input(0, "input", "required") \
.output(1, "output", "required") \
.attr("align_corners", "bool") \
.dtype_format(DataType.F16_Default, DataType.F32_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(resize_bilinear_op_info)
def _resize_bilinear_aicpu():
"""ResizeBilinear AiCPU register"""
return

View File

@ -0,0 +1,33 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ResizeBilinearGrad op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
resize_bilinear_grad_op_info = AiCPURegOp("ResizeBilinearGrad") \
.fusion_type("OPAQUE") \
.input(0, "output_grad", "required") \
.input(0, "input", "required") \
.output(1, "input_grad", "required") \
.attr("align_corners", "bool") \
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(resize_bilinear_grad_op_info)
def _resize_bilinear_grad_aicpu():
"""ResizeBilinearGrad AiCPU register"""
return

View File

@ -0,0 +1,69 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.composite import GradOperation
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.resize = P.ResizeBilinear((2, 4), False)
def construct(self, x):
return self.resize(x)
class Grad(nn.Cell):
def __init__(self, network):
super(Grad, self).__init__()
self.grad = GradOperation(get_all=True, sens_param=True)
self.network = network
self.network.set_train()
def construct(self, x, y):
return self.grad(self.network)(x, y)
def net_float16():
tensor = Tensor([[[[1, 2, 3, 4, 5], [2, 4, 6, 4, 9]]]], mindspore.float16)
net = Net()
output = net(tensor)
return output
def test_net_grad():
net = Grad(Net())
x = Tensor([[[[1, 2, 3, 4, 5], [2, 4, 6, 4, 9]]]], mindspore.float16)
y = net_float16()
dy = Tensor([[[[1, 2, 3, 4], [2, 4, 6, 4]]]], mindspore.float16)
dy = P.Cast()(dy, mindspore.float32)
dx = net(x, dy)
print("forward input: ", x)
print("forward output: ", y)
print("backward input: ", dy)
print("backward output: ", dx)
y_expect = np.array([[[[1.0, 2.25, 3.5, 4.75],
[2.0, 4.5, 5.0, 7.75]]]])
dx_expect = np.array([[[[1.0, 1.5, 2.0, 2.5, 3.0],
[2.0, 3.0, 4.0, 4.0, 3.0]]]])
assert np.array_equal(y_expect, y.asnumpy())
assert np.array_equal(dx_expect, dx[0].asnumpy())