From 47756fb031cb736a3b523de386e22e8a65226473 Mon Sep 17 00:00:00 2001 From: lihongkang <[lihongkang1@huawei.com]> Date: Tue, 29 Sep 2020 10:50:43 +0800 Subject: [PATCH] fix bugs --- mindspore/nn/layer/image.py | 2 +- mindspore/ops/operations/math_ops.py | 6 ------ mindspore/ops/operations/nn_ops.py | 10 +++++++--- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/mindspore/nn/layer/image.py b/mindspore/nn/layer/image.py index d5d9458b26a..159749c9606 100644 --- a/mindspore/nn/layer/image.py +++ b/mindspore/nn/layer/image.py @@ -271,7 +271,7 @@ class MSSSIM(Cell): - **img2** (Tensor) - The second image batch with format 'NCHW'. It must be the same shape and dtype as img1. Outputs: - Tensor, has the same dtype as img1. It is a 1-D tensor with shape N, where N is the batch num of img1. + Tensor, the value is in range [0, 1]. It is a 1-D tensor with shape N, where N is the batch num of img1. Examples: >>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033)) diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 4f7664003c3..47320e514ac 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1763,9 +1763,6 @@ class Div(_MathBinaryOp): Tensor, the shape is the same as the one after broadcasting, and the data type is the one with higher precision or higher digits among the two inputs. - Raises: - ValueError: When `input_x` and `input_y` do not have the same dtype. - Examples: >>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32) >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32) @@ -1804,9 +1801,6 @@ class DivNoNan(_MathBinaryOp): Tensor, the shape is the same as the one after broadcasting, and the data type is the one with higher precision or higher digits among the two inputs. - Raises: - ValueError: When `input_x` and `input_y` do not have the same dtype. - Examples: >>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32) >>> input_y = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index e85f2797522..fc0f66c3d6d 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -2420,8 +2420,8 @@ class DropoutDoMask(PrimitiveWithInfer): - **mask** (Tensor) - The mask to be applied on `input_x`, which is the output of `DropoutGenMask`. And the shape of `input_x` must be the same as the value of `DropoutGenMask`'s input `shape`. If input wrong `mask`, the output of `DropoutDoMask` are unpredictable. - - **keep_prob** (Tensor) - The keep rate, greater than 0 and less equal than 1, e.g. keep_prob = 0.9, - means dropping out 10% of input units. The value of `keep_prob` is the same as the input `keep_prob` of + - **keep_prob** (Union[Tensor, float]) - The keep rate, greater than 0 and less equal than 1, e.g. keep_prob = + 0.9, means dropping out 10% of input units. The value of `keep_prob` is the same as the input `keep_prob` of `DropoutGenMask`. Outputs: @@ -2465,7 +2465,11 @@ class DropoutDoMask(PrimitiveWithInfer): keep_prob_v = keep_prob['value'] if keep_prob_v is not None: - validator.check_number_range('keep_prob', keep_prob_v.asnumpy(), 0, 1, Rel.INC_BOTH, self.name) + if isinstance(keep_prob['dtype'], type(mstype.tensor)): + validator.check_number_range('keep_prob', keep_prob_v.asnumpy(), 0, 1, Rel.INC_BOTH, self.name) + else: + validator.check_value_type("keep_prob", keep_prob_v, [float], self.name) + validator.check_number_range('keep_prob', keep_prob_v, 0, 1, Rel.INC_BOTH, self.name) out = {'shape': input_x_shape, 'dtype': input_x['dtype'],