forked from mindspore-Ecosystem/mindspore
!21983 revert remove_redundant_depend in some network scripts
Merge pull request !21983 from huangbingjian/revert_depend
This commit is contained in:
commit
ae24142e05
|
@ -23,6 +23,7 @@ from mindspore import ParameterTuple
|
||||||
from mindspore.common.tensor import Tensor
|
from mindspore.common.tensor import Tensor
|
||||||
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
|
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
|
||||||
from mindspore.ops import composite as C
|
from mindspore.ops import composite as C
|
||||||
|
from mindspore.ops import functional as F
|
||||||
from mindspore.train.callback import Callback
|
from mindspore.train.callback import Callback
|
||||||
|
|
||||||
__all__ = ['LossCallBack', 'WithLossCell', 'TrainOneStepCell']
|
__all__ = ['LossCallBack', 'WithLossCell', 'TrainOneStepCell']
|
||||||
|
@ -143,5 +144,4 @@ class TrainOneStepCell(nn.Cell):
|
||||||
grads = self.grad(self.network, weights)(img, gt_text, gt_kernels, training_mask, self.sens)
|
grads = self.grad(self.network, weights)(img, gt_text, gt_kernels, training_mask, self.sens)
|
||||||
if self.reducer_flag:
|
if self.reducer_flag:
|
||||||
grads = self.grad_reducer(grads)
|
grads = self.grad_reducer(grads)
|
||||||
self.optimizer(grads)
|
return F.depend(loss, self.optimizer(grads))
|
||||||
return loss
|
|
||||||
|
|
|
@ -678,8 +678,7 @@ class TrainingWrapper(nn.Cell):
|
||||||
if self.reducer_flag:
|
if self.reducer_flag:
|
||||||
# apply grad reducer on grads
|
# apply grad reducer on grads
|
||||||
grads = self.grad_reducer(grads)
|
grads = self.grad_reducer(grads)
|
||||||
self.optimizer(grads)
|
return F.depend(loss, self.optimizer(grads))
|
||||||
return loss
|
|
||||||
|
|
||||||
|
|
||||||
class YoloBoxScores(nn.Cell):
|
class YoloBoxScores(nn.Cell):
|
||||||
|
|
|
@ -18,6 +18,7 @@ from mindspore.common.parameter import ParameterTuple
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.common import dtype as mstype
|
from mindspore.common import dtype as mstype
|
||||||
from mindspore.ops import composite as C
|
from mindspore.ops import composite as C
|
||||||
|
from mindspore.ops import functional as F
|
||||||
from mindspore.ops import operations as P
|
from mindspore.ops import operations as P
|
||||||
|
|
||||||
|
|
||||||
|
@ -149,8 +150,7 @@ class TrainOneStepCell(nn.Cell):
|
||||||
loss = self.network(feature, biases)
|
loss = self.network(feature, biases)
|
||||||
sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)
|
sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)
|
||||||
grads = self.grad(self.network, weights)(feature, biases, sens)
|
grads = self.grad(self.network, weights)(feature, biases, sens)
|
||||||
self.optimizer(grads)
|
return F.depend(loss, self.optimizer(grads))
|
||||||
return loss
|
|
||||||
|
|
||||||
|
|
||||||
class TrainGAT(nn.Cell):
|
class TrainGAT(nn.Cell):
|
||||||
|
|
|
@ -137,6 +137,4 @@ class FastTextTrainOneStepCell(nn.Cell):
|
||||||
if self.reducer_flag:
|
if self.reducer_flag:
|
||||||
# apply grad reducer on grads
|
# apply grad reducer on grads
|
||||||
grads = self.grad_reducer(grads)
|
grads = self.grad_reducer(grads)
|
||||||
|
return F.depend(loss, self.optimizer(grads))
|
||||||
self.optimizer(grads)
|
|
||||||
return loss
|
|
||||||
|
|
|
@ -19,6 +19,7 @@ import numpy as np
|
||||||
from sklearn.metrics import roc_auc_score
|
from sklearn.metrics import roc_auc_score
|
||||||
import mindspore.common.dtype as mstype
|
import mindspore.common.dtype as mstype
|
||||||
from mindspore.ops import composite as C
|
from mindspore.ops import composite as C
|
||||||
|
from mindspore.ops import functional as F
|
||||||
from mindspore.ops import operations as P
|
from mindspore.ops import operations as P
|
||||||
from mindspore.nn import Dropout
|
from mindspore.nn import Dropout
|
||||||
from mindspore.nn.optim import Adam
|
from mindspore.nn.optim import Adam
|
||||||
|
@ -332,8 +333,7 @@ class TrainStepWrap(nn.Cell):
|
||||||
loss = self.network(batch_ids, batch_wts, label)
|
loss = self.network(batch_ids, batch_wts, label)
|
||||||
sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens) #
|
sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens) #
|
||||||
grads = self.grad(self.network, weights)(batch_ids, batch_wts, label, sens)
|
grads = self.grad(self.network, weights)(batch_ids, batch_wts, label, sens)
|
||||||
self.optimizer(grads)
|
return F.depend(loss, self.optimizer(grads))
|
||||||
return loss
|
|
||||||
|
|
||||||
|
|
||||||
class PredictWithSigmoid(nn.Cell):
|
class PredictWithSigmoid(nn.Cell):
|
||||||
|
|
Loading…
Reference in New Issue