forked from mindspore-Ecosystem/mindspore
!21983 revert remove_redundant_depend in some network scripts
Merge pull request !21983 from huangbingjian/revert_depend
This commit is contained in:
commit
ae24142e05
|
@ -23,6 +23,7 @@ from mindspore import ParameterTuple
|
|||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import functional as F
|
||||
from mindspore.train.callback import Callback
|
||||
|
||||
__all__ = ['LossCallBack', 'WithLossCell', 'TrainOneStepCell']
|
||||
|
@ -143,5 +144,4 @@ class TrainOneStepCell(nn.Cell):
|
|||
grads = self.grad(self.network, weights)(img, gt_text, gt_kernels, training_mask, self.sens)
|
||||
if self.reducer_flag:
|
||||
grads = self.grad_reducer(grads)
|
||||
self.optimizer(grads)
|
||||
return loss
|
||||
return F.depend(loss, self.optimizer(grads))
|
||||
|
|
|
@ -678,8 +678,7 @@ class TrainingWrapper(nn.Cell):
|
|||
if self.reducer_flag:
|
||||
# apply grad reducer on grads
|
||||
grads = self.grad_reducer(grads)
|
||||
self.optimizer(grads)
|
||||
return loss
|
||||
return F.depend(loss, self.optimizer(grads))
|
||||
|
||||
|
||||
class YoloBoxScores(nn.Cell):
|
||||
|
|
|
@ -18,6 +18,7 @@ from mindspore.common.parameter import ParameterTuple
|
|||
from mindspore import Tensor
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import functional as F
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
|
||||
|
@ -149,8 +150,7 @@ class TrainOneStepCell(nn.Cell):
|
|||
loss = self.network(feature, biases)
|
||||
sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)
|
||||
grads = self.grad(self.network, weights)(feature, biases, sens)
|
||||
self.optimizer(grads)
|
||||
return loss
|
||||
return F.depend(loss, self.optimizer(grads))
|
||||
|
||||
|
||||
class TrainGAT(nn.Cell):
|
||||
|
|
|
@ -137,6 +137,4 @@ class FastTextTrainOneStepCell(nn.Cell):
|
|||
if self.reducer_flag:
|
||||
# apply grad reducer on grads
|
||||
grads = self.grad_reducer(grads)
|
||||
|
||||
self.optimizer(grads)
|
||||
return loss
|
||||
return F.depend(loss, self.optimizer(grads))
|
||||
|
|
|
@ -19,6 +19,7 @@ import numpy as np
|
|||
from sklearn.metrics import roc_auc_score
|
||||
import mindspore.common.dtype as mstype
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import functional as F
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.nn import Dropout
|
||||
from mindspore.nn.optim import Adam
|
||||
|
@ -332,8 +333,7 @@ class TrainStepWrap(nn.Cell):
|
|||
loss = self.network(batch_ids, batch_wts, label)
|
||||
sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens) #
|
||||
grads = self.grad(self.network, weights)(batch_ids, batch_wts, label, sens)
|
||||
self.optimizer(grads)
|
||||
return loss
|
||||
return F.depend(loss, self.optimizer(grads))
|
||||
|
||||
|
||||
class PredictWithSigmoid(nn.Cell):
|
||||
|
|
Loading…
Reference in New Issue