From 64a6769d3feafcd9ce8d5d721b88d7bca085458a Mon Sep 17 00:00:00 2001 From: danishnxt Date: Wed, 25 Nov 2020 14:38:30 -0500 Subject: [PATCH] Update Square Op to support Dynamic Shape --- mindspore/ops/operations/math_ops.py | 9 ++-- tests/st/ops/gpu/test_square_op.py | 74 ++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 6 deletions(-) create mode 100644 tests/st/ops/gpu/test_square_op.py diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index b8cfd8ec733..fcb009955a5 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1319,7 +1319,7 @@ class SquaredDifference(_MathBinaryOp): return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, valid_type, self.name) -class Square(PrimitiveWithInfer): +class Square(PrimitiveWithCheck): """ Returns square of a tensor element-wise. @@ -1345,12 +1345,9 @@ class Square(PrimitiveWithInfer): """Initialize Square""" self.init_prim_io_names(inputs=['input_x'], outputs=['output']) - def infer_shape(self, x_shape): - return x_shape - - def infer_dtype(self, x_dtype): + def __check__(self, x): + x_dtype = x["dtype"] validator.check_tensor_dtype_valid("x", x_dtype, mstype.number_type, self.name) - return x_dtype def infer_value(self, x): if x is not None: diff --git a/tests/st/ops/gpu/test_square_op.py b/tests/st/ops/gpu/test_square_op.py new file mode 100644 index 00000000000..588f1ff62c8 --- /dev/null +++ b/tests/st/ops/gpu/test_square_op.py @@ -0,0 +1,74 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import pytest + +import mindspore.context as context +import mindspore.nn as nn +from mindspore import Tensor +from mindspore.ops.operations import _inner_ops as inner +from mindspore.ops import operations as P + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_square_normal(): + context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") + x_np = np.random.rand(2, 3, 4, 4).astype(np.float32) + output_ms = P.Square()(Tensor(x_np)) + output_np = np.square(x_np) + assert np.allclose(output_ms.asnumpy(), output_np) + x_np = np.random.rand(2, 3, 1, 5, 4, 4).astype(np.float32) + output_ms = P.Square()(Tensor(x_np)) + output_np = np.square(x_np) + assert np.allclose(output_ms.asnumpy(), output_np) + x_np = np.random.rand(2,).astype(np.float32) + output_ms = P.Square()(Tensor(x_np)) + output_np = np.square(x_np) + assert np.allclose(output_ms.asnumpy(), output_np) + + +# Dynamic Shape Testing +class SqaureNetDynamic(nn.Cell): + def __init__(self): + super(SqaureNetDynamic, self).__init__() + self.square = P.Square() + self.gpu_convert_to_dynamic_shape = inner.GpuConvertToDynamicShape() + + def construct(self, x): + x_dyn = self.gpu_convert_to_dynamic_shape(x) + return self.square(x_dyn) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_square_dynamic(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + net = SqaureNetDynamic() + x_np = np.random.rand(1, 3, 4, 4, 1).astype(np.float32) + output_ms = net(Tensor(x_np)) + output_np = np.square(x_np) + assert np.allclose(output_ms.asnumpy(), output_np) + x_np = np.random.rand(2, 3, 4, 4, 8, 9).astype(np.float16) + output_ms = net(Tensor(x_np)) + output_np = np.square(x_np) + assert np.allclose(output_ms.asnumpy(), output_np) + x_np = np.random.rand(1).astype(np.float32) + output_ms = net(Tensor(x_np)) + output_np = np.square(x_np) + assert np.allclose(output_ms.asnumpy(), output_np)