modify func dropout

This commit is contained in:
changzherui 2023-03-02 00:00:58 +08:00
parent a54b91e617
commit 6f791c6b49
4 changed files with 21 additions and 38 deletions

View File

@ -1,22 +1,19 @@
mindspore.ops.dropout
======================
.. py:function:: mindspore.ops.dropout(x, p=0.5, seed0=0, seed1=0)
.. py:function:: mindspore.ops.dropout(input, p=0.5)
在训练期间,以服从伯努利分布的概率 `p` 随机将输入Tensor的某些值归零起到减少神经元相关性的作用避免过拟合。此概率与 `ops.Dropout``nn.Dropout` 中的含义相反。
参数:
- **x** (Tensor) - dropout的输入任意维度的Tensor其数据类型为float16或float32。
- **input** (Tensor) - dropout的输入任意维度的Tensor其数据类型为float16或float32。
- **p** (float可选) - 输入神经元丢弃概率数值范围在0到1之间。例如p=0.1删除10%的神经元。默认值0.5。
- **seed0** (int可选) - 算子层的随机种子用于生成随机数。默认值0。
- **seed1** (int可选) - 全局的随机种子和算子层的随机种子共同决定最终生成的随机数。默认值0。
返回:
- **output** (Tensor) - 归零后的Tensorshape和数据类型与 `x` 相同。
- **output** (Tensor) - 归零后的Tensorshape和数据类型与 `input` 相同。
- **mask** (Tensor) - 用于归零的掩码,内部会按位压缩与对齐。
异常:
- **TypeError** - `p` 不是float。
- **TypeError** - `seed0``seed1` 不是int。
- **TypeError** - `x` 的数据类型既不是float16也不是float32。
- **TypeError** - `x` 不是Tensor。
- **TypeError** - `input` 的数据类型既不是float16也不是float32。
- **TypeError** - `input` 不是Tensor。

View File

@ -252,10 +252,7 @@ class Dropout1d(Cell):
self.prob = p
def construct(self, x):
if not self.training:
return x
if self.prob == 0:
if not self.training or self.prob == 0:
return x
out = F.dropout1d(x, self.prob)
@ -299,10 +296,7 @@ class Dropout2d(Cell):
self.dropout2d = P.Dropout2D(self.keep_prob)
def construct(self, x):
if not self.training:
return x
if self.keep_prob == 1:
if not self.training or self.keep_prob == 1:
return x
out, _ = self.dropout2d(x)
@ -350,10 +344,7 @@ class Dropout3d(Cell):
self.dropout3d = P.Dropout3D(self.keep_prob)
def construct(self, x):
if not self.training:
return x
if self.keep_prob == 1:
if not self.training or self.keep_prob == 1:
return x
out, _ = self.dropout3d(x)

View File

@ -1158,7 +1158,7 @@ def binary_cross_entropy_with_logits(logits, label, weight, pos_weight, reductio
return bce_with_logits_loss_op(logits, label, weight, pos_weight)
def dropout(x, p=0.5, seed0=0, seed1=0):
def dropout(input, p=0.5):
"""
During training, randomly zeroes some of the elements of the input tensor
with probability `p` from a Bernoulli distribution. It plays the role of
@ -1166,34 +1166,31 @@ def dropout(x, p=0.5, seed0=0, seed1=0):
here is opposite to that in `ops.Dropout` and `nn.Dropout`.
Args:
x (Tensor): The input of Dropout, a Tensor of any shape with data type of float16 or float32.
input (Tensor): The input of Dropout, a Tensor of any shape with data type of float16 or float32.
p (float, optional): The dropping rate, between 0 and 1, e.g. p = 0.1,
means dropping out 10% of input units. Default: 0.5.
seed0 (int, optional): seed0 value for random generating. Default: 0.
seed1 (int, optional): seed1 value for random generating. Default: 0.
Returns:
- **output** (Tensor) - Zeroed tensor, with the same shape and data type as `x`.
- **output** (Tensor) - Zeroed tensor, with the same shape and data type as `input`.
- **mask** (Tensor) - Mask for zeroing, bitwise compression and alignment are performed internally.
Raises:
TypeError: If `p` is not a float.
TypeError: If `seed0` or `seed1` is not an int.
TypeError: If dtype of `x` is neither float16 nor float32.
TypeError: If `x` is not a Tensor.
TypeError: If dtype of `input` is neither float16 nor float32.
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x = Tensor(((20, 16), (50, 50)), mindspore.float32)
>>> output, mask = ops.dropout(x, p=0.5)
>>> print(output.shape, mask.shape, mask.dtype)
(2, 2) (16,) UInt8
>>> input = Tensor(((20, 16), (50, 50)), mindspore.float32)
>>> output = ops.dropout(input, p=0.5)
>>> print(output.shape)
(2, 2)
"""
keep_prob = 1 - p
dropout_ = P.Dropout(keep_prob=keep_prob, Seed0=seed0, Seed1=seed1)
return dropout_(x)
out, _ = P.Dropout(keep_prob=keep_prob)(input)
return out
def celu(x, alpha=1.0):

View File

@ -125,12 +125,10 @@ def test_op1():
Expectation: No exception.
"""
x = Tensor(np.arange(0, 12).reshape(3, 4).astype(np.float16))
output1, mask1 = ops.dropout(x, p=0.5, seed0=1, seed1=100)
output2, mask2 = ops.dropout(x, p=0.5, seed0=1, seed1=100)
output1 = ops.dropout(x, p=0.5)
output2 = ops.dropout(x, p=0.5)
assert mask1.shape == mask2.shape
assert np.allclose(output1.asnumpy(), output2.asnumpy())
assert np.allclose(mask1.asnumpy(), mask2.asnumpy())
@pytest.mark.level0