diff --git a/mindspore/ops/_grad/grad_nn_ops.py b/mindspore/ops/_grad/grad_nn_ops.py index de93d718502..9e6a8791afb 100755 --- a/mindspore/ops/_grad/grad_nn_ops.py +++ b/mindspore/ops/_grad/grad_nn_ops.py @@ -930,7 +930,7 @@ def get_bprop_kl_div_loss(self): @bprop_getters.register(P.Dropout) def get_bprop_dropout(self): """Grad definition for `Dropout` operation.""" - grad = P.DropoutGrad(self.keep_prob) + grad = G.DropoutGrad(self.keep_prob) def bprop(x, out, dout): _, mask = out diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 396ece17239..9bc167d91a5 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -61,7 +61,7 @@ from .random_ops import (RandomChoiceWithMask, StandardNormal, Gamma, Poisson, U from .nn_ops import (LSTM, SGD, Adam, FusedSparseAdam, FusedSparseLazyAdam, ApplyMomentum, BatchNorm, BiasAdd, Conv2D, DepthwiseConv2dNative, - DropoutDoMask, DropoutGrad, Dropout, + DropoutDoMask, Dropout, DropoutGenMask, Flatten, FusedBatchNorm, FusedBatchNormEx, BNTrainingReduce, BNTrainingUpdate, Gelu, Elu, GetNext, L2Normalize, LayerNorm, L2Loss, CTCLoss, CTCLossV2, CTCGreedyDecoder, @@ -211,7 +211,6 @@ __all__ = [ 'DynamicShape', 'DropoutDoMask', 'DropoutGenMask', - 'DropoutGrad', 'Dropout', 'Neg', 'InplaceAdd', diff --git a/mindspore/ops/operations/_grad_ops.py b/mindspore/ops/operations/_grad_ops.py index af103f0bd5f..b408419c297 100644 --- a/mindspore/ops/operations/_grad_ops.py +++ b/mindspore/ops/operations/_grad_ops.py @@ -462,6 +462,42 @@ class DepthwiseConv2dNativeBackpropInput(PrimitiveWithInfer): return out +class DropoutGrad(PrimitiveWithInfer): + """ + The gradient of Dropout. During training, randomly zeroes some of the elements + of the input tensor with probability. + + Args: + keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9, + means dropping out 10% of input units. + + Inputs: + - **shape** (tuple[int]) - The shape of target mask. + + Outputs: + Tensor, the value of generated mask for input shape. + + Examples: + >>> dropout_grad = P.DropoutGrad(keep_prob=0.5) + >>> in = Tensor((20, 16, 50, 50)) + >>> out = dropout_grad(in) + """ + + @prim_attr_register + def __init__(self, keep_prob=0.5): + self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name) + + def infer_shape(self, dy_shape, mask_shape): + return dy_shape + + def infer_dtype(self, dy_dtype, mask_dtype): + valid_types = (mstype.float16, mstype.float32) + validator.check_subclass("dy", dy_dtype, mstype.tensor, self.name) + validator.check_subclass("mask", mask_dtype, mstype.tensor, self.name) + validator.check_tensor_type_same({"dy_dtype": dy_dtype}, valid_types, self.name) + return dy_dtype + + class FlattenGrad(PrimitiveWithInfer): """Performs gradients of Flatten.""" diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 11b674dc079..641b3a4f6cb 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -5247,42 +5247,6 @@ class Dropout(PrimitiveWithInfer): return x_dtype, x_dtype -class DropoutGrad(PrimitiveWithInfer): - """ - The gradient of Dropout. During training, randomly zeroes some of the elements - of the input tensor with probability. - - Args: - keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9, - means dropping out 10% of input units. - - Inputs: - - **shape** (tuple[int]) - The shape of target mask. - - Outputs: - Tensor, the value of generated mask for input shape. - - Examples: - >>> dropout_grad = P.DropoutGrad(keep_prob=0.5) - >>> in = Tensor((20, 16, 50, 50)) - >>> out = dropout_grad(in) - """ - - @prim_attr_register - def __init__(self, keep_prob=0.5): - self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name) - - def infer_shape(self, dy_shape, mask_shape): - return dy_shape - - def infer_dtype(self, dy_dtype, mask_dtype): - valid_types = (mstype.float16, mstype.float32) - validator.check_subclass("dy", dy_dtype, mstype.tensor, self.name) - validator.check_subclass("mask", mask_dtype, mstype.tensor, self.name) - validator.check_tensor_type_same({"dy_dtype": dy_dtype}, valid_types, self.name) - return dy_dtype - - class CTCLoss(PrimitiveWithInfer): """ Calculates the CTC (Connectionist Temporal Classification) loss and the gradient.