diff --git a/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_util.h b/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_util.h index b3eaefdc4d7..00afeee1fc2 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_util.h +++ b/mindspore/ccsrc/backend/kernel_compiler/aicpu/aicpu_util.h @@ -62,8 +62,10 @@ constexpr auto kMaskedSelect = "MaskedSelect"; constexpr auto kMaskedSelectGrad = "MaskedSelectGrad"; constexpr auto kDynamicStitch = "DynamicStitch"; constexpr auto kSearchSorted = "SearchSorted"; -const std::set kCustAiCpuKernelOps{kIdentity, kMaskedSelect, kMaskedSelectGrad, kDynamicStitch, - kSearchSorted}; +constexpr auto kResizeBilinear = "ResizeBilinear"; +constexpr auto kResizeBilinearGrad = "ResizeBilinearGrad"; +const std::set kCustAiCpuKernelOps{kIdentity, kMaskedSelect, kMaskedSelectGrad, kDynamicStitch, + kSearchSorted, kResizeBilinear, kResizeBilinearGrad}; const std::set kCacheKernelOps{kUpdateCache, kCacheSwapTable, kSubAndFilter, kPadAndShift, kDropout3D, kDropout2D}; diff --git a/mindspore/ops/_op_impl/aicpu/__init__.py b/mindspore/ops/_op_impl/aicpu/__init__.py index 53c6b61a37b..0facef6edf2 100644 --- a/mindspore/ops/_op_impl/aicpu/__init__.py +++ b/mindspore/ops/_op_impl/aicpu/__init__.py @@ -77,3 +77,5 @@ from .stack_push_pop import _stack_push_aicpu from .stack_push_pop import _stack_pop_aicpu from .stack_push_pop import _stack_destroy_aicpu from .ctc_greedy_decoder import _ctc_greedy_decoder_aicpu +from .resize_bilinear import _resize_bilinear_aicpu +from .resize_bilinear_grad import _resize_bilinear_grad_aicpu diff --git a/mindspore/ops/_op_impl/aicpu/resize_bilinear.py b/mindspore/ops/_op_impl/aicpu/resize_bilinear.py new file mode 100644 index 00000000000..7575d943fd0 --- /dev/null +++ b/mindspore/ops/_op_impl/aicpu/resize_bilinear.py @@ -0,0 +1,32 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""ResizeBilinear op""" +from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType + +resize_bilinear_op_info = AiCPURegOp("ResizeBilinear") \ + .fusion_type("OPAQUE") \ + .input(0, "input", "required") \ + .output(1, "output", "required") \ + .attr("align_corners", "bool") \ + .dtype_format(DataType.F16_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(resize_bilinear_op_info) +def _resize_bilinear_aicpu(): + """ResizeBilinear AiCPU register""" + return diff --git a/mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py b/mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py new file mode 100644 index 00000000000..30dbe9bf50c --- /dev/null +++ b/mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py @@ -0,0 +1,33 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""ResizeBilinearGrad op""" +from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType + +resize_bilinear_grad_op_info = AiCPURegOp("ResizeBilinearGrad") \ + .fusion_type("OPAQUE") \ + .input(0, "output_grad", "required") \ + .input(0, "input", "required") \ + .output(1, "input_grad", "required") \ + .attr("align_corners", "bool") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(resize_bilinear_grad_op_info) +def _resize_bilinear_grad_aicpu(): + """ResizeBilinearGrad AiCPU register""" + return diff --git a/tests/st/ops/ascend/test_aicpu_ops/test_resize_bilinear.py b/tests/st/ops/ascend/test_aicpu_ops/test_resize_bilinear.py new file mode 100644 index 00000000000..6913ad27134 --- /dev/null +++ b/tests/st/ops/ascend/test_aicpu_ops/test_resize_bilinear.py @@ -0,0 +1,69 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import numpy as np +import mindspore +import mindspore.context as context +import mindspore.nn as nn +from mindspore import Tensor +from mindspore.ops import operations as P +from mindspore.ops.composite import GradOperation + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.resize = P.ResizeBilinear((2, 4), False) + + def construct(self, x): + return self.resize(x) + + +class Grad(nn.Cell): + def __init__(self, network): + super(Grad, self).__init__() + self.grad = GradOperation(get_all=True, sens_param=True) + self.network = network + self.network.set_train() + + def construct(self, x, y): + return self.grad(self.network)(x, y) + + +def net_float16(): + tensor = Tensor([[[[1, 2, 3, 4, 5], [2, 4, 6, 4, 9]]]], mindspore.float16) + net = Net() + output = net(tensor) + return output + + +def test_net_grad(): + net = Grad(Net()) + x = Tensor([[[[1, 2, 3, 4, 5], [2, 4, 6, 4, 9]]]], mindspore.float16) + y = net_float16() + dy = Tensor([[[[1, 2, 3, 4], [2, 4, 6, 4]]]], mindspore.float16) + dy = P.Cast()(dy, mindspore.float32) + dx = net(x, dy) + print("forward input: ", x) + print("forward output: ", y) + print("backward input: ", dy) + print("backward output: ", dx) + + y_expect = np.array([[[[1.0, 2.25, 3.5, 4.75], + [2.0, 4.5, 5.0, 7.75]]]]) + dx_expect = np.array([[[[1.0, 1.5, 2.0, 2.5, 3.0], + [2.0, 3.0, 4.0, 4.0, 3.0]]]]) + assert np.array_equal(y_expect, y.asnumpy()) + assert np.array_equal(dx_expect, dx[0].asnumpy())