forked from mindspore-Ecosystem/mindspore
!49626 modify dropout
Merge pull request !49626 from changzherui/mod_dropout
This commit is contained in:
commit
b678f30a0f
|
@ -1,22 +1,19 @@
|
|||
mindspore.ops.dropout
|
||||
======================
|
||||
|
||||
.. py:function:: mindspore.ops.dropout(x, p=0.5, seed0=0, seed1=0)
|
||||
.. py:function:: mindspore.ops.dropout(input, p=0.5)
|
||||
|
||||
在训练期间,以服从伯努利分布的概率 `p` 随机将输入Tensor的某些值归零,起到减少神经元相关性的作用,避免过拟合。此概率与 `ops.Dropout` 和 `nn.Dropout` 中的含义相反。
|
||||
|
||||
参数:
|
||||
- **x** (Tensor) - dropout的输入,任意维度的Tensor,其数据类型为float16或float32。
|
||||
- **input** (Tensor) - dropout的输入,任意维度的Tensor,其数据类型为float16或float32。
|
||||
- **p** (float,可选) - 输入神经元丢弃概率,数值范围在0到1之间。例如,p=0.1,删除10%的神经元。默认值:0.5。
|
||||
- **seed0** (int,可选) - 算子层的随机种子,用于生成随机数。默认值:0。
|
||||
- **seed1** (int,可选) - 全局的随机种子,和算子层的随机种子共同决定最终生成的随机数。默认值:0。
|
||||
|
||||
返回:
|
||||
- **output** (Tensor) - 归零后的Tensor,shape和数据类型与 `x` 相同。
|
||||
- **output** (Tensor) - 归零后的Tensor,shape和数据类型与 `input` 相同。
|
||||
- **mask** (Tensor) - 用于归零的掩码,内部会按位压缩与对齐。
|
||||
|
||||
异常:
|
||||
- **TypeError** - `p` 不是float。
|
||||
- **TypeError** - `seed0` 或 `seed1` 不是int。
|
||||
- **TypeError** - `x` 的数据类型既不是float16也不是float32。
|
||||
- **TypeError** - `x` 不是Tensor。
|
||||
- **TypeError** - `input` 的数据类型既不是float16也不是float32。
|
||||
- **TypeError** - `input` 不是Tensor。
|
|
@ -252,10 +252,7 @@ class Dropout1d(Cell):
|
|||
self.prob = p
|
||||
|
||||
def construct(self, x):
|
||||
if not self.training:
|
||||
return x
|
||||
|
||||
if self.prob == 0:
|
||||
if not self.training or self.prob == 0:
|
||||
return x
|
||||
|
||||
out = F.dropout1d(x, self.prob)
|
||||
|
@ -299,10 +296,7 @@ class Dropout2d(Cell):
|
|||
self.dropout2d = P.Dropout2D(self.keep_prob)
|
||||
|
||||
def construct(self, x):
|
||||
if not self.training:
|
||||
return x
|
||||
|
||||
if self.keep_prob == 1:
|
||||
if not self.training or self.keep_prob == 1:
|
||||
return x
|
||||
|
||||
out, _ = self.dropout2d(x)
|
||||
|
@ -350,10 +344,7 @@ class Dropout3d(Cell):
|
|||
self.dropout3d = P.Dropout3D(self.keep_prob)
|
||||
|
||||
def construct(self, x):
|
||||
if not self.training:
|
||||
return x
|
||||
|
||||
if self.keep_prob == 1:
|
||||
if not self.training or self.keep_prob == 1:
|
||||
return x
|
||||
|
||||
out, _ = self.dropout3d(x)
|
||||
|
|
|
@ -1158,7 +1158,7 @@ def binary_cross_entropy_with_logits(logits, label, weight, pos_weight, reductio
|
|||
return bce_with_logits_loss_op(logits, label, weight, pos_weight)
|
||||
|
||||
|
||||
def dropout(x, p=0.5, seed0=0, seed1=0):
|
||||
def dropout(input, p=0.5):
|
||||
"""
|
||||
During training, randomly zeroes some of the elements of the input tensor
|
||||
with probability `p` from a Bernoulli distribution. It plays the role of
|
||||
|
@ -1166,34 +1166,31 @@ def dropout(x, p=0.5, seed0=0, seed1=0):
|
|||
here is opposite to that in `ops.Dropout` and `nn.Dropout`.
|
||||
|
||||
Args:
|
||||
x (Tensor): The input of Dropout, a Tensor of any shape with data type of float16 or float32.
|
||||
input (Tensor): The input of Dropout, a Tensor of any shape with data type of float16 or float32.
|
||||
p (float, optional): The dropping rate, between 0 and 1, e.g. p = 0.1,
|
||||
means dropping out 10% of input units. Default: 0.5.
|
||||
seed0 (int, optional): seed0 value for random generating. Default: 0.
|
||||
seed1 (int, optional): seed1 value for random generating. Default: 0.
|
||||
|
||||
Returns:
|
||||
- **output** (Tensor) - Zeroed tensor, with the same shape and data type as `x`.
|
||||
- **output** (Tensor) - Zeroed tensor, with the same shape and data type as `input`.
|
||||
- **mask** (Tensor) - Mask for zeroing, bitwise compression and alignment are performed internally.
|
||||
|
||||
Raises:
|
||||
TypeError: If `p` is not a float.
|
||||
TypeError: If `seed0` or `seed1` is not an int.
|
||||
TypeError: If dtype of `x` is neither float16 nor float32.
|
||||
TypeError: If `x` is not a Tensor.
|
||||
TypeError: If dtype of `input` is neither float16 nor float32.
|
||||
TypeError: If `input` is not a Tensor.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(((20, 16), (50, 50)), mindspore.float32)
|
||||
>>> output, mask = ops.dropout(x, p=0.5)
|
||||
>>> print(output.shape, mask.shape, mask.dtype)
|
||||
(2, 2) (16,) UInt8
|
||||
>>> input = Tensor(((20, 16), (50, 50)), mindspore.float32)
|
||||
>>> output = ops.dropout(input, p=0.5)
|
||||
>>> print(output.shape)
|
||||
(2, 2)
|
||||
"""
|
||||
keep_prob = 1 - p
|
||||
dropout_ = P.Dropout(keep_prob=keep_prob, Seed0=seed0, Seed1=seed1)
|
||||
return dropout_(x)
|
||||
out, _ = P.Dropout(keep_prob=keep_prob)(input)
|
||||
return out
|
||||
|
||||
|
||||
def celu(x, alpha=1.0):
|
||||
|
|
|
@ -125,12 +125,10 @@ def test_op1():
|
|||
Expectation: No exception.
|
||||
"""
|
||||
x = Tensor(np.arange(0, 12).reshape(3, 4).astype(np.float16))
|
||||
output1, mask1 = ops.dropout(x, p=0.5, seed0=1, seed1=100)
|
||||
output2, mask2 = ops.dropout(x, p=0.5, seed0=1, seed1=100)
|
||||
output1 = ops.dropout(x, p=0.5)
|
||||
output2 = ops.dropout(x, p=0.5)
|
||||
|
||||
assert mask1.shape == mask2.shape
|
||||
assert np.allclose(output1.asnumpy(), output2.asnumpy())
|
||||
assert np.allclose(mask1.asnumpy(), mask2.asnumpy())
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
|
|
Loading…
Reference in New Issue