forked from mindspore-Ecosystem/mindspore
move dropoutgrad to inner
This commit is contained in:
parent
1abaeaf197
commit
347eb6b07a
|
@ -930,7 +930,7 @@ def get_bprop_kl_div_loss(self):
|
||||||
@bprop_getters.register(P.Dropout)
|
@bprop_getters.register(P.Dropout)
|
||||||
def get_bprop_dropout(self):
|
def get_bprop_dropout(self):
|
||||||
"""Grad definition for `Dropout` operation."""
|
"""Grad definition for `Dropout` operation."""
|
||||||
grad = P.DropoutGrad(self.keep_prob)
|
grad = G.DropoutGrad(self.keep_prob)
|
||||||
|
|
||||||
def bprop(x, out, dout):
|
def bprop(x, out, dout):
|
||||||
_, mask = out
|
_, mask = out
|
||||||
|
|
|
@ -61,7 +61,7 @@ from .random_ops import (RandomChoiceWithMask, StandardNormal, Gamma, Poisson, U
|
||||||
from .nn_ops import (LSTM, SGD, Adam, FusedSparseAdam, FusedSparseLazyAdam, ApplyMomentum, BatchNorm,
|
from .nn_ops import (LSTM, SGD, Adam, FusedSparseAdam, FusedSparseLazyAdam, ApplyMomentum, BatchNorm,
|
||||||
BiasAdd, Conv2D,
|
BiasAdd, Conv2D,
|
||||||
DepthwiseConv2dNative,
|
DepthwiseConv2dNative,
|
||||||
DropoutDoMask, DropoutGrad, Dropout,
|
DropoutDoMask, Dropout,
|
||||||
DropoutGenMask, Flatten, FusedBatchNorm, FusedBatchNormEx, BNTrainingReduce, BNTrainingUpdate,
|
DropoutGenMask, Flatten, FusedBatchNorm, FusedBatchNormEx, BNTrainingReduce, BNTrainingUpdate,
|
||||||
Gelu, Elu,
|
Gelu, Elu,
|
||||||
GetNext, L2Normalize, LayerNorm, L2Loss, CTCLoss, CTCLossV2, CTCGreedyDecoder,
|
GetNext, L2Normalize, LayerNorm, L2Loss, CTCLoss, CTCLossV2, CTCGreedyDecoder,
|
||||||
|
@ -211,7 +211,6 @@ __all__ = [
|
||||||
'DynamicShape',
|
'DynamicShape',
|
||||||
'DropoutDoMask',
|
'DropoutDoMask',
|
||||||
'DropoutGenMask',
|
'DropoutGenMask',
|
||||||
'DropoutGrad',
|
|
||||||
'Dropout',
|
'Dropout',
|
||||||
'Neg',
|
'Neg',
|
||||||
'InplaceAdd',
|
'InplaceAdd',
|
||||||
|
|
|
@ -462,6 +462,42 @@ class DepthwiseConv2dNativeBackpropInput(PrimitiveWithInfer):
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class DropoutGrad(PrimitiveWithInfer):
|
||||||
|
"""
|
||||||
|
The gradient of Dropout. During training, randomly zeroes some of the elements
|
||||||
|
of the input tensor with probability.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
|
||||||
|
means dropping out 10% of input units.
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
- **shape** (tuple[int]) - The shape of target mask.
|
||||||
|
|
||||||
|
Outputs:
|
||||||
|
Tensor, the value of generated mask for input shape.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> dropout_grad = P.DropoutGrad(keep_prob=0.5)
|
||||||
|
>>> in = Tensor((20, 16, 50, 50))
|
||||||
|
>>> out = dropout_grad(in)
|
||||||
|
"""
|
||||||
|
|
||||||
|
@prim_attr_register
|
||||||
|
def __init__(self, keep_prob=0.5):
|
||||||
|
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
|
||||||
|
|
||||||
|
def infer_shape(self, dy_shape, mask_shape):
|
||||||
|
return dy_shape
|
||||||
|
|
||||||
|
def infer_dtype(self, dy_dtype, mask_dtype):
|
||||||
|
valid_types = (mstype.float16, mstype.float32)
|
||||||
|
validator.check_subclass("dy", dy_dtype, mstype.tensor, self.name)
|
||||||
|
validator.check_subclass("mask", mask_dtype, mstype.tensor, self.name)
|
||||||
|
validator.check_tensor_type_same({"dy_dtype": dy_dtype}, valid_types, self.name)
|
||||||
|
return dy_dtype
|
||||||
|
|
||||||
|
|
||||||
class FlattenGrad(PrimitiveWithInfer):
|
class FlattenGrad(PrimitiveWithInfer):
|
||||||
"""Performs gradients of Flatten."""
|
"""Performs gradients of Flatten."""
|
||||||
|
|
||||||
|
|
|
@ -5247,42 +5247,6 @@ class Dropout(PrimitiveWithInfer):
|
||||||
return x_dtype, x_dtype
|
return x_dtype, x_dtype
|
||||||
|
|
||||||
|
|
||||||
class DropoutGrad(PrimitiveWithInfer):
|
|
||||||
"""
|
|
||||||
The gradient of Dropout. During training, randomly zeroes some of the elements
|
|
||||||
of the input tensor with probability.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
|
|
||||||
means dropping out 10% of input units.
|
|
||||||
|
|
||||||
Inputs:
|
|
||||||
- **shape** (tuple[int]) - The shape of target mask.
|
|
||||||
|
|
||||||
Outputs:
|
|
||||||
Tensor, the value of generated mask for input shape.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> dropout_grad = P.DropoutGrad(keep_prob=0.5)
|
|
||||||
>>> in = Tensor((20, 16, 50, 50))
|
|
||||||
>>> out = dropout_grad(in)
|
|
||||||
"""
|
|
||||||
|
|
||||||
@prim_attr_register
|
|
||||||
def __init__(self, keep_prob=0.5):
|
|
||||||
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
|
|
||||||
|
|
||||||
def infer_shape(self, dy_shape, mask_shape):
|
|
||||||
return dy_shape
|
|
||||||
|
|
||||||
def infer_dtype(self, dy_dtype, mask_dtype):
|
|
||||||
valid_types = (mstype.float16, mstype.float32)
|
|
||||||
validator.check_subclass("dy", dy_dtype, mstype.tensor, self.name)
|
|
||||||
validator.check_subclass("mask", mask_dtype, mstype.tensor, self.name)
|
|
||||||
validator.check_tensor_type_same({"dy_dtype": dy_dtype}, valid_types, self.name)
|
|
||||||
return dy_dtype
|
|
||||||
|
|
||||||
|
|
||||||
class CTCLoss(PrimitiveWithInfer):
|
class CTCLoss(PrimitiveWithInfer):
|
||||||
"""
|
"""
|
||||||
Calculates the CTC (Connectionist Temporal Classification) loss and the gradient.
|
Calculates the CTC (Connectionist Temporal Classification) loss and the gradient.
|
||||||
|
|
Loading…
Reference in New Issue