forked from mindspore-Ecosystem/mindspore
!7006 fix bugs of op DivNoNan, Div, MSSSIM and DropoutDoMask
Merge pull request !7006 from lihongkang/v2_master
This commit is contained in:
commit
77978d0921
|
@ -271,7 +271,7 @@ class MSSSIM(Cell):
|
||||||
- **img2** (Tensor) - The second image batch with format 'NCHW'. It must be the same shape and dtype as img1.
|
- **img2** (Tensor) - The second image batch with format 'NCHW'. It must be the same shape and dtype as img1.
|
||||||
|
|
||||||
Outputs:
|
Outputs:
|
||||||
Tensor, has the same dtype as img1. It is a 1-D tensor with shape N, where N is the batch num of img1.
|
Tensor, the value is in range [0, 1]. It is a 1-D tensor with shape N, where N is the batch num of img1.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033))
|
>>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033))
|
||||||
|
|
|
@ -1765,9 +1765,6 @@ class Div(_MathBinaryOp):
|
||||||
Tensor, the shape is the same as the one after broadcasting,
|
Tensor, the shape is the same as the one after broadcasting,
|
||||||
and the data type is the one with higher precision or higher digits among the two inputs.
|
and the data type is the one with higher precision or higher digits among the two inputs.
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: When `input_x` and `input_y` do not have the same dtype.
|
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
|
>>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
|
||||||
>>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
|
>>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
|
||||||
|
@ -1806,9 +1803,6 @@ class DivNoNan(_MathBinaryOp):
|
||||||
Tensor, the shape is the same as the one after broadcasting,
|
Tensor, the shape is the same as the one after broadcasting,
|
||||||
and the data type is the one with higher precision or higher digits among the two inputs.
|
and the data type is the one with higher precision or higher digits among the two inputs.
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: When `input_x` and `input_y` do not have the same dtype.
|
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32)
|
>>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32)
|
||||||
>>> input_y = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32)
|
>>> input_y = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32)
|
||||||
|
|
|
@ -2418,8 +2418,8 @@ class DropoutDoMask(PrimitiveWithInfer):
|
||||||
- **mask** (Tensor) - The mask to be applied on `input_x`, which is the output of `DropoutGenMask`. And the
|
- **mask** (Tensor) - The mask to be applied on `input_x`, which is the output of `DropoutGenMask`. And the
|
||||||
shape of `input_x` must be the same as the value of `DropoutGenMask`'s input `shape`. If input wrong `mask`,
|
shape of `input_x` must be the same as the value of `DropoutGenMask`'s input `shape`. If input wrong `mask`,
|
||||||
the output of `DropoutDoMask` are unpredictable.
|
the output of `DropoutDoMask` are unpredictable.
|
||||||
- **keep_prob** (Tensor) - The keep rate, greater than 0 and less equal than 1, e.g. keep_prob = 0.9,
|
- **keep_prob** (Union[Tensor, float]) - The keep rate, greater than 0 and less equal than 1, e.g. keep_prob =
|
||||||
means dropping out 10% of input units. The value of `keep_prob` is the same as the input `keep_prob` of
|
0.9, means dropping out 10% of input units. The value of `keep_prob` is the same as the input `keep_prob` of
|
||||||
`DropoutGenMask`.
|
`DropoutGenMask`.
|
||||||
|
|
||||||
Outputs:
|
Outputs:
|
||||||
|
@ -2463,7 +2463,11 @@ class DropoutDoMask(PrimitiveWithInfer):
|
||||||
|
|
||||||
keep_prob_v = keep_prob['value']
|
keep_prob_v = keep_prob['value']
|
||||||
if keep_prob_v is not None:
|
if keep_prob_v is not None:
|
||||||
validator.check_number_range('keep_prob', keep_prob_v.asnumpy(), 0, 1, Rel.INC_BOTH, self.name)
|
if isinstance(keep_prob['dtype'], type(mstype.tensor)):
|
||||||
|
validator.check_number_range('keep_prob', keep_prob_v.asnumpy(), 0, 1, Rel.INC_BOTH, self.name)
|
||||||
|
else:
|
||||||
|
validator.check_value_type("keep_prob", keep_prob_v, [float], self.name)
|
||||||
|
validator.check_number_range('keep_prob', keep_prob_v, 0, 1, Rel.INC_BOTH, self.name)
|
||||||
|
|
||||||
out = {'shape': input_x_shape,
|
out = {'shape': input_x_shape,
|
||||||
'dtype': input_x['dtype'],
|
'dtype': input_x['dtype'],
|
||||||
|
|
Loading…
Reference in New Issue