diff --git a/mindspore/nn/wrap/cell_wrapper.py b/mindspore/nn/wrap/cell_wrapper.py index d0073a4929e..c32b7eadd97 100644 --- a/mindspore/nn/wrap/cell_wrapper.py +++ b/mindspore/nn/wrap/cell_wrapper.py @@ -117,7 +117,7 @@ class WithGradCell(Cell): self.network = network self.loss_fn = loss_fn self.weights = ParameterTuple(network.trainable_params()) - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=(sens is not None)) + self.grad = C.GradOperation(get_by_list=True, sens_param=(sens is not None)) self.sens = sens if loss_fn is None: self.network_with_loss = network @@ -182,7 +182,7 @@ class TrainOneStepCell(Cell): self.network.add_flags(defer_inline=True) self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/mindspore/nn/wrap/grad_reducer.py b/mindspore/nn/wrap/grad_reducer.py index 68f676ec664..8e8a9ef7566 100644 --- a/mindspore/nn/wrap/grad_reducer.py +++ b/mindspore/nn/wrap/grad_reducer.py @@ -269,7 +269,7 @@ class DistributedGradReducer(Cell): >>> self.network.add_flags(defer_inline=True) >>> self.weights = optimizer.parameters >>> self.optimizer = optimizer - >>> self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + >>> self.grad = C.GradOperation(get_by_list=True, sens_param=True) >>> self.sens = sens >>> self.reducer_flag = False >>> self.grad_reducer = None diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index 3bfc7170f10..88a0f26a34f 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -210,7 +210,7 @@ class TrainOneStepWithLossScaleCell(Cell): self.network.add_flags(defer_inline=True) self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.hyper_map = C.HyperMap() if context.get_context("device_target") == "GPU": self.gpu_target = True diff --git a/mindspore/ops/composite/base.py b/mindspore/ops/composite/base.py index aa77f8005a7..43d30128ba9 100644 --- a/mindspore/ops/composite/base.py +++ b/mindspore/ops/composite/base.py @@ -106,12 +106,11 @@ class GradOperation(GradOperation_): a 'ones_like(outputs)' sensitivity will be attached automatically. Default: False. """ - def __init__(self, name, - get_all=False, get_by_list=False, sens_param=False): + def __init__(self, get_all=False, get_by_list=False, sens_param=False): self.get_all = get_all self.get_by_list = get_by_list self.sens_param = sens_param - GradOperation_.__init__(self, name, get_all, get_by_list, sens_param) + GradOperation_.__init__(self, 'grad', get_all, get_by_list, sens_param) self.grad_fn = None self.fn = None self.need_forward = False @@ -139,7 +138,7 @@ class GradOperation(GradOperation_): fn.already_run = False def __call__(self, fn, weights=None): - grad_ = GradOperation('grad', self.get_all, self.get_by_list, self.sens_param) + grad_ = GradOperation(self.get_all, self.get_by_list, self.sens_param) if self.grad_fn is None or self.fn != fn: if context.get_context("mode") == context.GRAPH_MODE: if self.get_by_list: diff --git a/mindspore/ops/operations/debug_ops.py b/mindspore/ops/operations/debug_ops.py index e5796ec360e..4123122ea21 100644 --- a/mindspore/ops/operations/debug_ops.py +++ b/mindspore/ops/operations/debug_ops.py @@ -216,7 +216,7 @@ class InsertGradientOf(PrimitiveWithInfer): >>> return ret >>> >>> clip = P.InsertGradientOf(clip_gradient) - >>> grad_all = C.GradOperation('get_all', get_all=True) + >>> grad_all = C.GradOperation(get_all=True) >>> def InsertGradientOfClipDemo(): >>> def clip_test(x, y): >>> x = clip(x) @@ -268,7 +268,7 @@ class HookBackward(PrimitiveWithInfer): >>> def hook_fn(grad_out): >>> print(grad_out) >>> - >>> grad_all = GradOperation('get_all', get_all=True) + >>> grad_all = GradOperation(get_all=True) >>> hook = P.HookBackward(hook_fn) >>> >>> def hook_test(x, y): diff --git a/model_zoo/official/cv/faster_rcnn/src/network_define.py b/model_zoo/official/cv/faster_rcnn/src/network_define.py index 348c72cee53..ae71d46352a 100644 --- a/model_zoo/official/cv/faster_rcnn/src/network_define.py +++ b/model_zoo/official/cv/faster_rcnn/src/network_define.py @@ -163,8 +163,7 @@ class TrainOneStepCell(nn.Cell): self.backbone = network_backbone self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = Tensor((np.ones((1,)) * sens).astype(np.float16)) self.reduce_flag = reduce_flag diff --git a/model_zoo/official/cv/maskrcnn/src/network_define.py b/model_zoo/official/cv/maskrcnn/src/network_define.py index 481632667bc..b94262f45cd 100644 --- a/model_zoo/official/cv/maskrcnn/src/network_define.py +++ b/model_zoo/official/cv/maskrcnn/src/network_define.py @@ -171,8 +171,7 @@ class TrainOneStepCell(nn.Cell): self.backbone = network_backbone self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = Tensor((np.ones((1,)) * sens).astype(np.float16)) self.reduce_flag = reduce_flag diff --git a/model_zoo/official/cv/resnet_thor/src/grad_reducer_thor.py b/model_zoo/official/cv/resnet_thor/src/grad_reducer_thor.py index b8bbbf29b72..86ee3fcc8f3 100644 --- a/model_zoo/official/cv/resnet_thor/src/grad_reducer_thor.py +++ b/model_zoo/official/cv/resnet_thor/src/grad_reducer_thor.py @@ -119,7 +119,7 @@ class DistributedGradReducerThor(Cell): >>> self.network.add_flags(defer_inline=True) >>> self.weights = ParameterTuple(network.trainable_params()) >>> self.optimizer = optimizer - >>> self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + >>> self.grad = C.GradOperation(get_by_list=True, sens_param=True) >>> self.sens = sens >>> self.reducer_flag = False >>> self.grad_reducer = None diff --git a/model_zoo/official/cv/ssd/src/ssd.py b/model_zoo/official/cv/ssd/src/ssd.py index d2fb64531ec..fca8a1948d5 100644 --- a/model_zoo/official/cv/ssd/src/ssd.py +++ b/model_zoo/official/cv/ssd/src/ssd.py @@ -383,7 +383,7 @@ class TrainingWrapper(nn.Cell): self.network = network self.weights = ms.ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/model_zoo/official/cv/warpctc/src/warpctc_for_train.py b/model_zoo/official/cv/warpctc/src/warpctc_for_train.py index 5654f05e5df..8391ffe6768 100755 --- a/model_zoo/official/cv/warpctc/src/warpctc_for_train.py +++ b/model_zoo/official/cv/warpctc/src/warpctc_for_train.py @@ -77,7 +77,7 @@ class TrainOneStepCellWithGradClip(Cell): self.network.add_flags(defer_inline=True) self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/model_zoo/official/cv/yolov3_darknet53/src/yolo.py b/model_zoo/official/cv/yolov3_darknet53/src/yolo.py index 09cce0c97fa..eefa1e8bfad 100644 --- a/model_zoo/official/cv/yolov3_darknet53/src/yolo.py +++ b/model_zoo/official/cv/yolov3_darknet53/src/yolo.py @@ -412,7 +412,7 @@ class TrainingWrapper(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/model_zoo/official/cv/yolov3_darknet53_quant/src/yolo.py b/model_zoo/official/cv/yolov3_darknet53_quant/src/yolo.py index e010ddef2b8..e7942186361 100644 --- a/model_zoo/official/cv/yolov3_darknet53_quant/src/yolo.py +++ b/model_zoo/official/cv/yolov3_darknet53_quant/src/yolo.py @@ -412,7 +412,7 @@ class TrainingWrapper(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py b/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py index 0ac6b21070e..c33ed1a0d3b 100644 --- a/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py +++ b/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py @@ -647,7 +647,7 @@ class TrainingWrapper(nn.Cell): self.network = network self.weights = ms.ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/model_zoo/official/gnn/gat/src/utils.py b/model_zoo/official/gnn/gat/src/utils.py index 06d32529944..8b8a46c76b6 100644 --- a/model_zoo/official/gnn/gat/src/utils.py +++ b/model_zoo/official/gnn/gat/src/utils.py @@ -141,7 +141,7 @@ class TrainOneStepCell(nn.Cell): self.network.add_flags(defer_inline=True) self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens def construct(self): diff --git a/model_zoo/official/gnn/gcn/src/metrics.py b/model_zoo/official/gnn/gcn/src/metrics.py index 0d47a9bc6c4..8aa4c3da7e7 100644 --- a/model_zoo/official/gnn/gcn/src/metrics.py +++ b/model_zoo/official/gnn/gcn/src/metrics.py @@ -150,7 +150,7 @@ class TrainOneStepCell(nn.Cell): self.network.add_flags(defer_inline=True) self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens def construct(self): diff --git a/model_zoo/official/nlp/bert/src/bert_for_finetune.py b/model_zoo/official/nlp/bert/src/bert_for_finetune.py index 97262b6faec..1b147982f05 100644 --- a/model_zoo/official/nlp/bert/src/bert_for_finetune.py +++ b/model_zoo/official/nlp/bert/src/bert_for_finetune.py @@ -57,8 +57,7 @@ class BertFinetuneCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() @@ -160,7 +159,7 @@ class BertSquadCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() self.parallel_mode = context.get_auto_parallel_context("parallel_mode") diff --git a/model_zoo/official/nlp/bert/src/bert_for_pre_training.py b/model_zoo/official/nlp/bert/src/bert_for_pre_training.py index 8607c3ba872..84f442c22c2 100644 --- a/model_zoo/official/nlp/bert/src/bert_for_pre_training.py +++ b/model_zoo/official/nlp/bert/src/bert_for_pre_training.py @@ -274,7 +274,7 @@ class BertTrainOneStepCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.parallel_mode = context.get_auto_parallel_context("parallel_mode") @@ -353,8 +353,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() diff --git a/model_zoo/official/nlp/bert_thor/src/bert_for_pre_training.py b/model_zoo/official/nlp/bert_thor/src/bert_for_pre_training.py index 7ba00146dbd..807d5a5d310 100644 --- a/model_zoo/official/nlp/bert_thor/src/bert_for_pre_training.py +++ b/model_zoo/official/nlp/bert_thor/src/bert_for_pre_training.py @@ -293,7 +293,7 @@ class BertTrainOneStepCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.parallel_mode = context.get_auto_parallel_context("parallel_mode") @@ -373,8 +373,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() diff --git a/model_zoo/official/nlp/bert_thor/src/grad_reducer_thor.py b/model_zoo/official/nlp/bert_thor/src/grad_reducer_thor.py index d0316e99b21..47b86a4e65d 100644 --- a/model_zoo/official/nlp/bert_thor/src/grad_reducer_thor.py +++ b/model_zoo/official/nlp/bert_thor/src/grad_reducer_thor.py @@ -119,7 +119,7 @@ class DistributedGradReducerThor(Cell): >>> self.network.add_flags(defer_inline=True) >>> self.weights = ParameterTuple(network.trainable_params()) >>> self.optimizer = optimizer - >>> self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + >>> self.grad = C.GradOperation(get_by_list=True, sens_param=True) >>> self.sens = sens >>> self.reducer_flag = False >>> self.grad_reducer = None diff --git a/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py b/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py index 7aa674d2533..3cb1b3739a1 100644 --- a/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py +++ b/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py @@ -239,7 +239,7 @@ class TransformerTrainOneStepWithLossScaleCell(nn.Cell): self.network.add_flags(defer_inline=True) self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.all_reduce = P.AllReduce() diff --git a/model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py b/model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py index f244c5591d5..f003ec26e7e 100644 --- a/model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py +++ b/model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py @@ -218,8 +218,7 @@ class BertTrainWithLossScaleCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() @@ -310,8 +309,7 @@ class BertTrainCell(nn.Cell): self.weights = optimizer.parameters self.optimizer = optimizer self.sens = sens - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.parallel_mode = context.get_auto_parallel_context("parallel_mode") @@ -474,8 +472,7 @@ class BertEvaluationWithLossScaleCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() @@ -562,8 +559,7 @@ class BertEvaluationCell(nn.Cell): self.weights = optimizer.parameters self.optimizer = optimizer self.sens = sens - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.parallel_mode = context.get_auto_parallel_context("parallel_mode") diff --git a/model_zoo/official/nlp/transformer/src/transformer_for_train.py b/model_zoo/official/nlp/transformer/src/transformer_for_train.py index 164c9391e91..32d5ad7e207 100644 --- a/model_zoo/official/nlp/transformer/src/transformer_for_train.py +++ b/model_zoo/official/nlp/transformer/src/transformer_for_train.py @@ -158,7 +158,7 @@ class TransformerTrainOneStepCell(nn.Cell): self.network = network self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.parallel_mode = context.get_auto_parallel_context("parallel_mode") @@ -244,8 +244,7 @@ class TransformerTrainOneStepWithLossScaleCell(nn.Cell): self.network.add_flags(defer_inline=True) self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() diff --git a/model_zoo/official/recommend/deepfm/src/deepfm.py b/model_zoo/official/recommend/deepfm/src/deepfm.py index 61dd3b5f85c..9b82b15525c 100644 --- a/model_zoo/official/recommend/deepfm/src/deepfm.py +++ b/model_zoo/official/recommend/deepfm/src/deepfm.py @@ -286,7 +286,7 @@ class TrainStepWrap(nn.Cell): self.weights = ParameterTuple(network.trainable_params()) self.optimizer = Adam(self.weights, learning_rate=lr, eps=eps, loss_scale=loss_scale) self.hyper_map = C.HyperMap() - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = loss_scale def construct(self, batch_ids, batch_wts, label): diff --git a/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py b/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py index 8bef1821b3e..8b6566b32c8 100644 --- a/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py +++ b/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py @@ -337,9 +337,9 @@ class TrainStepWrap(nn.Cell): self.optimizer_w = FTRL(learning_rate=5e-2, params=self.weights_w, l1=1e-8, l2=1e-8, initial_accum=1.0, loss_scale=sens) self.hyper_map = C.HyperMap() - self.grad_w = C.GradOperation('grad_w', get_by_list=True, + self.grad_w = C.GradOperation(get_by_list=True, sens_param=True) - self.grad_d = C.GradOperation('grad_d', get_by_list=True, + self.grad_d = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.loss_net_w = IthOutputCell(network, output_index=0) diff --git a/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py b/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py index ba358dd7238..c36a7ea66f5 100644 --- a/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py +++ b/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py @@ -537,11 +537,9 @@ class TrainStepWrap(nn.Cell): self.hyper_map = C.HyperMap() - self.grad_w = C.GradOperation('grad_w', - get_by_list=True, + self.grad_w = C.GradOperation(get_by_list=True, sens_param=True) - self.grad_d = C.GradOperation('grad_d', - get_by_list=True, + self.grad_d = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens diff --git a/tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py b/tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py index 83c76c4cdb2..66cd84a7849 100644 --- a/tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py +++ b/tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py @@ -46,5 +46,5 @@ class CompileBackwardBlockWrtInputsBC(IBuilderComponent): """ def __call__(self): - grad_op = GradOperation('grad', get_all=True, sens_param=True) + grad_op = GradOperation(get_all=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, compile_block, grad_op) diff --git a/tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py b/tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py index 15cc02b3f42..bb6a9c68dff 100644 --- a/tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py +++ b/tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py @@ -46,5 +46,5 @@ class CompileBackwardBlockWrtParamsBC(IBuilderComponent): """ def __call__(self, verification_set): - grad_op = GradOperation('grad', get_by_list=True, sens_param=True) + grad_op = GradOperation(get_by_list=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, compile_block, grad_op) diff --git a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py index 2dcf8073288..c7a0ca78c70 100644 --- a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py +++ b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py @@ -22,5 +22,5 @@ from ...utils.block_util import run_block, gen_grad_net, create_funcs, get_unifo class RunBackwardBlockWrtInputsWithRandParamBC(IBuilderComponent): def __call__(self): - grad_op = GradOperation('grad', get_all=True, sens_param=True) + grad_op = GradOperation(get_all=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, run_block, grad_op, get_uniform_with_shape) diff --git a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py index 22f03194c48..4f046f60ad1 100644 --- a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py +++ b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py @@ -22,5 +22,5 @@ from ...utils.block_util import run_block, gen_grad_net, create_funcs, get_unifo class RunBackwardBlockWrtParamsWithRandParamBC(IBuilderComponent): def __call__(self): - grad_op = GradOperation('grad', get_by_list=True, sens_param=True) + grad_op = GradOperation(get_by_list=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, run_block, grad_op, get_uniform_with_shape) diff --git a/tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py b/tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py index aa7ffad79c3..124fe70c9b9 100644 --- a/tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py +++ b/tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py @@ -22,5 +22,5 @@ from ...utils.block_util import run_block, gen_grad_net, create_funcs class RunBackwardBlockWrtInputsBC(IBuilderComponent): def __call__(self): - grad_op = GradOperation('grad', get_all=True, sens_param=True) + grad_op = GradOperation(get_all=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, run_block, grad_op) diff --git a/tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py b/tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py index d365a002300..ae2086b2fff 100644 --- a/tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py +++ b/tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py @@ -22,5 +22,5 @@ from ...utils.block_util import run_block, gen_grad_net, create_funcs class RunBackwardBlockWrtParamsBC(IBuilderComponent): def __call__(self): - grad_op = GradOperation('grad', get_by_list=True, sens_param=True) + grad_op = GradOperation(get_by_list=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, run_block, grad_op) diff --git a/tests/mindspore_test_framework/utils/block_util.py b/tests/mindspore_test_framework/utils/block_util.py index faf7143f4b7..0ecfd066dc4 100644 --- a/tests/mindspore_test_framework/utils/block_util.py +++ b/tests/mindspore_test_framework/utils/block_util.py @@ -331,7 +331,7 @@ def create_funcs(verification_set, block_generator, block_runner, grad_op=None, # gradient if grad_op: if num_outputs == 0: - grad_op_ = GradOperation('grad', get_all=grad_op.get_all, + grad_op_ = GradOperation(get_all=grad_op.get_all, get_by_list=grad_op.get_by_list, sens_param=False) b = block_generator(block, grad_op_, len(inputs), desc_const=desc_const, const_first=const_first, add_fake_input=add_fake_input) diff --git a/tests/mindspore_test_framework/utils/bprop_util.py b/tests/mindspore_test_framework/utils/bprop_util.py index 1990c1d0df7..11e9c0f90ff 100644 --- a/tests/mindspore_test_framework/utils/bprop_util.py +++ b/tests/mindspore_test_framework/utils/bprop_util.py @@ -85,7 +85,7 @@ def bprop(func, *inputs, grads_wrt_outputs=None, wrt: list = None, params: list if not params: params = func.trainable_params() - grad_op = GradOperation(name='grad', get_all=wrt_inputs, get_by_list=wrt_params, sens_param=with_sens_param) + grad_op = GradOperation(get_all=wrt_inputs, get_by_list=wrt_params, sens_param=with_sens_param) grad = Bprop(func, wrt_params, params, grad_op, grads_wrt_outputs) if context.get_context("mode") == context.PYNATIVE_MODE: diff --git a/tests/mindspore_test_framework/utils/check_gradient.py b/tests/mindspore_test_framework/utils/check_gradient.py index 81490e7ee17..c11a7db5de5 100644 --- a/tests/mindspore_test_framework/utils/check_gradient.py +++ b/tests/mindspore_test_framework/utils/check_gradient.py @@ -315,7 +315,7 @@ class ScalarGradChecker(_GradChecker): output_selector=None, sampling_times=-1, reduce_output=False) -> None: - grad_op = GradOperation('grad', get_all=True, sens_param=True) + grad_op = GradOperation(get_all=True, sens_param=True) super(ScalarGradChecker, self).__init__(fn, grad_op, args, delta, max_error, input_selector, \ output_selector, sampling_times, reduce_output) @@ -358,7 +358,7 @@ class OperationGradChecker(_GradChecker): output_selector=None, sampling_times=-1, reduce_output=False) -> None: - grad_op = GradOperation('grad', get_all=True, sens_param=True) + grad_op = GradOperation(get_all=True, sens_param=True) super(OperationGradChecker, self).__init__(fn, grad_op, args, delta, max_error, input_selector, \ output_selector, sampling_times, reduce_output) @@ -390,7 +390,7 @@ class NNGradChecker(_GradChecker): output_selector=None, sampling_times=-1, reduce_output=False) -> None: - grad_op = GradOperation('grad', get_by_list=True, sens_param=True) + grad_op = GradOperation(get_by_list=True, sens_param=True) self.params = ParameterTuple(fn.trainable_params()) super(NNGradChecker, self).__init__(fn, grad_op, args, delta, max_error, input_selector, \ output_selector, sampling_times, reduce_output) diff --git a/tests/ops_common.py b/tests/ops_common.py index fc41ea575d0..7e042f57d46 100644 --- a/tests/ops_common.py +++ b/tests/ops_common.py @@ -23,7 +23,7 @@ from mindspore import Tensor from mindspore.common.api import _executor -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) class InputBackward(nn.Cell): diff --git a/tests/perf_test/test_lenet.py b/tests/perf_test/test_lenet.py index 41ff41acf48..8b61e9be5ea 100644 --- a/tests/perf_test/test_lenet.py +++ b/tests/perf_test/test_lenet.py @@ -27,7 +27,7 @@ from mindspore.common.api import _executor context.set_context(mode=context.GRAPH_MODE) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) batch_size = 1 channel = 1 diff --git a/tests/st/control/test_cont_grad.py b/tests/st/control/test_cont_grad.py index 4eb2257dcb6..68f6b1f30dc 100644 --- a/tests/st/control/test_cont_grad.py +++ b/tests/st/control/test_cont_grad.py @@ -28,8 +28,8 @@ from mindspore.ops import operations as P # context.set_context(save_graphs=True) -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all = C.GradOperation('get_all', get_all=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all = C.GradOperation(get_all=True) def test_while_forward(): diff --git a/tests/st/gnn/test_gnn_aggregator.py b/tests/st/gnn/test_gnn_aggregator.py index a17187a2d47..fc1f682a782 100644 --- a/tests/st/gnn/test_gnn_aggregator.py +++ b/tests/st/gnn/test_gnn_aggregator.py @@ -25,7 +25,7 @@ from mindspore.common.api import _executor context.set_context(mode=context.GRAPH_MODE) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) class MeanAggregatorGrad(nn.Cell): diff --git a/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py b/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py index e860e0afef6..c31b3b5b0e7 100644 --- a/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py +++ b/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py @@ -284,9 +284,9 @@ class TrainStepWrap(nn.Cell): self.optimizer_d = Adam( self.weights_d, learning_rate=3.5e-4, eps=1e-8, loss_scale=sens) self.hyper_map = C.HyperMap() - self.grad_w = C.GradOperation('grad_w', get_by_list=True, + self.grad_w = C.GradOperation(get_by_list=True, sens_param=True) - self.grad_d = C.GradOperation('grad_d', get_by_list=True, + self.grad_d = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.loss_net_w = IthOutputCell(network, output_index=0) diff --git a/tests/st/model_zoo_tests/yolov3/src/yolov3.py b/tests/st/model_zoo_tests/yolov3/src/yolov3.py index 0ac6b21070e..c33ed1a0d3b 100644 --- a/tests/st/model_zoo_tests/yolov3/src/yolov3.py +++ b/tests/st/model_zoo_tests/yolov3/src/yolov3.py @@ -647,7 +647,7 @@ class TrainingWrapper(nn.Cell): self.network = network self.weights = ms.ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/tests/st/networks/models/bert/src/bert_for_pre_training.py b/tests/st/networks/models/bert/src/bert_for_pre_training.py index 7c557a49c9e..2577cf617a6 100644 --- a/tests/st/networks/models/bert/src/bert_for_pre_training.py +++ b/tests/st/networks/models/bert/src/bert_for_pre_training.py @@ -271,7 +271,7 @@ class BertTrainOneStepCell(nn.Cell): self.network = network self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.parallel_mode = context.get_auto_parallel_context("parallel_mode") @@ -351,8 +351,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): self.network = network self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() diff --git a/tests/st/networks/models/bert/src/utils.py b/tests/st/networks/models/bert/src/utils.py index e4dd3e7b472..2b19d3d2915 100644 --- a/tests/st/networks/models/bert/src/utils.py +++ b/tests/st/networks/models/bert/src/utils.py @@ -52,8 +52,7 @@ class BertFinetuneCell(nn.Cell): self.network = network self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() diff --git a/tests/st/networks/models/resnet50/src_thor/grad_reducer_thor.py b/tests/st/networks/models/resnet50/src_thor/grad_reducer_thor.py index 0b160c02f23..e84c9412497 100644 --- a/tests/st/networks/models/resnet50/src_thor/grad_reducer_thor.py +++ b/tests/st/networks/models/resnet50/src_thor/grad_reducer_thor.py @@ -120,7 +120,7 @@ class DistributedGradReducerThor(Cell): >>> self.network.add_flags(defer_inline=True) >>> self.weights = ParameterTuple(network.trainable_params()) >>> self.optimizer = optimizer - >>> self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + >>> self.grad = C.GradOperation(get_by_list=True, sens_param=True) >>> self.sens = sens >>> self.reducer_flag = False >>> self.grad_reducer = None diff --git a/tests/st/networks/test_cell_bprop.py b/tests/st/networks/test_cell_bprop.py index 9fd699682ee..92cda581a00 100644 --- a/tests/st/networks/test_cell_bprop.py +++ b/tests/st/networks/test_cell_bprop.py @@ -29,7 +29,7 @@ from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class MulAdd(nn.Cell): @@ -351,7 +351,7 @@ class MulAddWithParam(nn.Cell): @pytest.mark.platform_x86_ascend_training @pytest.mark.env_onecard def test_refkey_bprop(): - grad_by_list = C.GradOperation('get_by_list', get_all=True, get_by_list=True) + grad_by_list = C.GradOperation(get_all=True, get_by_list=True) class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() diff --git a/tests/st/ops/ascend/test_addn.py b/tests/st/ops/ascend/test_addn.py index fa97fcc973b..7644dea3974 100644 --- a/tests/st/ops/ascend/test_addn.py +++ b/tests/st/ops/ascend/test_addn.py @@ -49,7 +49,7 @@ def test_net(): def test_grad_addn_with_list(): - grad_op = C.GradOperation('get_all', get_all=True) + grad_op = C.GradOperation(get_all=True) class AddN(nn.Cell): def __init__(self): super().__init__() diff --git a/tests/st/ops/ascend/test_conv_grad.py b/tests/st/ops/ascend/test_conv_grad.py index e24f218087d..85aed21cbc1 100644 --- a/tests/st/ops/ascend/test_conv_grad.py +++ b/tests/st/ops/ascend/test_conv_grad.py @@ -29,7 +29,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_dense_grad.py b/tests/st/ops/ascend/test_dense_grad.py index 7a529144ee4..6cd6516da13 100644 --- a/tests/st/ops/ascend/test_dense_grad.py +++ b/tests/st/ops/ascend/test_dense_grad.py @@ -26,7 +26,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_fused_batchnorm_grad.py b/tests/st/ops/ascend/test_fused_batchnorm_grad.py index a8d4190e097..7210b1f3c1e 100644 --- a/tests/st/ops/ascend/test_fused_batchnorm_grad.py +++ b/tests/st/ops/ascend/test_fused_batchnorm_grad.py @@ -30,7 +30,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_maxpool_grad.py b/tests/st/ops/ascend/test_maxpool_grad.py index 9af45111203..2fb8fb60285 100644 --- a/tests/st/ops/ascend/test_maxpool_grad.py +++ b/tests/st/ops/ascend/test_maxpool_grad.py @@ -27,7 +27,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py b/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py index 8d0d5155803..a6c29335133 100644 --- a/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py +++ b/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py @@ -27,7 +27,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_relu_grad.py b/tests/st/ops/ascend/test_relu_grad.py index 4ebc17d507f..e969bf6b886 100644 --- a/tests/st/ops/ascend/test_relu_grad.py +++ b/tests/st/ops/ascend/test_relu_grad.py @@ -27,7 +27,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_simplemean_grad.py b/tests/st/ops/ascend/test_simplemean_grad.py index 2704c1434fb..00605e1ef6d 100644 --- a/tests/st/ops/ascend/test_simplemean_grad.py +++ b/tests/st/ops/ascend/test_simplemean_grad.py @@ -27,7 +27,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py index 0340f9e6be0..7c9110ba74e 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py @@ -30,7 +30,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py b/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py index 0f890ea9987..70bce4794dd 100755 --- a/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py @@ -27,7 +27,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_, output_grad): @@ -71,7 +71,7 @@ class MEGeluLargeIn(Cell): class GradLargeIn(Cell): def __init__(self, network): super(GradLargeIn, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, x1, x2, output_grad): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py index c068cbfe8a5..dfcae1b895c 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py @@ -27,7 +27,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_, output_grad,): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py index 529343812eb..cd55676b0b5 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py @@ -21,7 +21,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P context.set_context(device_target="Ascend") -grad = C.GradOperation('get_all', get_all=True, sens_param=True) +grad = C.GradOperation(get_all=True, sens_param=True) class MaxNetMe(Cell): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py index 7beb22f005e..4d17b046bab 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py @@ -27,7 +27,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py index 3a19aaa1d16..7559ebe957f 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py @@ -21,7 +21,7 @@ from mindspore.ops import composite as C from mindspore.ops.operations import Minimum context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") -grad = C.GradOperation('get_all', get_all=True, sens_param=True) +grad = C.GradOperation(get_all=True, sens_param=True) class MinNetMe(Cell): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py index 40dc5ebadae..705e8bac7fd 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py @@ -27,7 +27,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py index 645765792a7..999a7af76e1 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py @@ -27,7 +27,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True) + self.grad = GradOperation(get_all=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py index 4603fc59d86..8d53fac1462 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py @@ -37,7 +37,7 @@ class Net(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py index f3eaef5b869..d9203f8892c 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py @@ -37,7 +37,7 @@ class Net(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py index 8e68ac3235b..e6bcb97e7e1 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py @@ -37,7 +37,7 @@ class Net(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py index 7d30ae4bed5..a69329a4067 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py @@ -36,7 +36,7 @@ class Net(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, pred, gt, dout): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py index 1938aaeca38..d3a488a31da 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py @@ -26,7 +26,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_, output_grad): diff --git a/tests/st/ops/cpu/test_batchnorm_op.py b/tests/st/ops/cpu/test_batchnorm_op.py index 0dc090d63e6..e020354f8b3 100644 --- a/tests/st/ops/cpu/test_batchnorm_op.py +++ b/tests/st/ops/cpu/test_batchnorm_op.py @@ -37,7 +37,7 @@ class Batchnorm_Net(Cell): class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_data, sens): diff --git a/tests/st/ops/cpu/test_lstm_op.py b/tests/st/ops/cpu/test_lstm_op.py index c8174a5f905..3b159c83db9 100644 --- a/tests/st/ops/cpu/test_lstm_op.py +++ b/tests/st/ops/cpu/test_lstm_op.py @@ -207,8 +207,7 @@ class Grad(nn.Cell): super(Grad, self).__init__() self.network = network self.weights = ParameterTuple(network.trainable_params()) - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) @ms_function diff --git a/tests/st/ops/custom_ops_tbe/test_square.py b/tests/st/ops/custom_ops_tbe/test_square.py index b8d847f4a72..5e9c4d8535b 100644 --- a/tests/st/ops/custom_ops_tbe/test_square.py +++ b/tests/st/ops/custom_ops_tbe/test_square.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") -grad_with_sens = C.GradOperation('grad_with_sens', sens_param=True) +grad_with_sens = C.GradOperation(sens_param=True) class Net(nn.Cell): diff --git a/tests/st/ops/gpu/test_batchnorm_op.py b/tests/st/ops/gpu/test_batchnorm_op.py index 0aeac6dfcc1..58aa8416951 100644 --- a/tests/st/ops/gpu/test_batchnorm_op.py +++ b/tests/st/ops/gpu/test_batchnorm_op.py @@ -37,7 +37,7 @@ class Batchnorm_Net(Cell): class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_data, sens): diff --git a/tests/st/ops/gpu/test_binary_cross_entropy_op.py b/tests/st/ops/gpu/test_binary_cross_entropy_op.py index 724188314db..1b770b78d9b 100644 --- a/tests/st/ops/gpu/test_binary_cross_entropy_op.py +++ b/tests/st/ops/gpu/test_binary_cross_entropy_op.py @@ -54,7 +54,7 @@ def test_binary_cross_entropy_loss(): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, x1, x2, sens, weight): diff --git a/tests/st/ops/gpu/test_ctcloss_op.py b/tests/st/ops/gpu/test_ctcloss_op.py index b9a88e7e70a..964677740f7 100644 --- a/tests/st/ops/gpu/test_ctcloss_op.py +++ b/tests/st/ops/gpu/test_ctcloss_op.py @@ -40,7 +40,7 @@ class Net(nn.Cell): class GradData(nn.Cell): def __init__(self, network): super(GradData, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=False) + self.grad = GradOperation(get_all=True, sens_param=False) self.network = network def construct(self, probs, labels, input_lengths, label_lengths): diff --git a/tests/st/ops/gpu/test_dense_op.py b/tests/st/ops/gpu/test_dense_op.py index e9c010ea77d..b07baa658b3 100644 --- a/tests/st/ops/gpu/test_dense_op.py +++ b/tests/st/ops/gpu/test_dense_op.py @@ -65,7 +65,7 @@ def test_biasadd(): class GradData(nn.Cell): def __init__(self, network): super(GradData, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, inputs, output_grad): @@ -77,8 +77,7 @@ class GradWeight(nn.Cell): super(GradWeight, self).__init__() self.network = network self.weights = ParameterTuple(network.trainable_params()) - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) def construct(self, x, output_grad): @@ -169,7 +168,7 @@ def test_dw(): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_, bias, dy): diff --git a/tests/st/ops/gpu/test_gelu_grad_op.py b/tests/st/ops/gpu/test_gelu_grad_op.py index 82145b9d3ff..975355114e7 100644 --- a/tests/st/ops/gpu/test_gelu_grad_op.py +++ b/tests/st/ops/gpu/test_gelu_grad_op.py @@ -37,7 +37,7 @@ class GeluNet(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_data, sens): diff --git a/tests/st/ops/gpu/test_kl_div_op.py b/tests/st/ops/gpu/test_kl_div_op.py index e5b8fcd0799..64c9845f25d 100644 --- a/tests/st/ops/gpu/test_kl_div_op.py +++ b/tests/st/ops/gpu/test_kl_div_op.py @@ -53,7 +53,7 @@ def test_binary_cross_entropy_loss(): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, x1, x2, sens): diff --git a/tests/st/ops/gpu/test_logsoftmax_op.py b/tests/st/ops/gpu/test_logsoftmax_op.py index 5834f90a109..271be6367b4 100644 --- a/tests/st/ops/gpu/test_logsoftmax_op.py +++ b/tests/st/ops/gpu/test_logsoftmax_op.py @@ -52,7 +52,7 @@ class LogSoftmax(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_data, sens): diff --git a/tests/st/ops/gpu/test_lstm_op.py b/tests/st/ops/gpu/test_lstm_op.py index f0a58c2d36c..de1197eeb20 100644 --- a/tests/st/ops/gpu/test_lstm_op.py +++ b/tests/st/ops/gpu/test_lstm_op.py @@ -581,8 +581,7 @@ class Grad(nn.Cell): super(Grad, self).__init__() self.network = network self.weights = ParameterTuple(network.trainable_params()) - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) @ms_function diff --git a/tests/st/ops/gpu/test_maximum_op.py b/tests/st/ops/gpu/test_maximum_op.py index 9566554231f..4e009dae430 100644 --- a/tests/st/ops/gpu/test_maximum_op.py +++ b/tests/st/ops/gpu/test_maximum_op.py @@ -35,7 +35,7 @@ class Net(Cell): class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, x1, x2, sens): diff --git a/tests/st/ops/gpu/test_minimum_op.py b/tests/st/ops/gpu/test_minimum_op.py index 2a14a5bb042..78198db45a9 100644 --- a/tests/st/ops/gpu/test_minimum_op.py +++ b/tests/st/ops/gpu/test_minimum_op.py @@ -36,7 +36,7 @@ class MinimumNet(Cell): class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, x1, x2, sens): diff --git a/tests/st/ops/gpu/test_mirror_pad.py b/tests/st/ops/gpu/test_mirror_pad.py index 9e6613d7440..d28eaeecc88 100644 --- a/tests/st/ops/gpu/test_mirror_pad.py +++ b/tests/st/ops/gpu/test_mirror_pad.py @@ -58,7 +58,7 @@ def test_mirror_pad(): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_, output_grad): return self.grad(self.network)(input_, output_grad) diff --git a/tests/st/ops/gpu/test_smoothl1loss_op.py b/tests/st/ops/gpu/test_smoothl1loss_op.py index 10d8411d20a..4145f5e971d 100644 --- a/tests/st/ops/gpu/test_smoothl1loss_op.py +++ b/tests/st/ops/gpu/test_smoothl1loss_op.py @@ -59,7 +59,7 @@ def test_smoothl1loss(): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, x1, x2, sens): diff --git a/tests/st/ops/gpu/test_softmax_op.py b/tests/st/ops/gpu/test_softmax_op.py index 73925d8c3b1..0f654e10bd9 100644 --- a/tests/st/ops/gpu/test_softmax_op.py +++ b/tests/st/ops/gpu/test_softmax_op.py @@ -79,7 +79,7 @@ class Net(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_data, sens): diff --git a/tests/st/ops/gpu/test_stridedslice_grad_op.py b/tests/st/ops/gpu/test_stridedslice_grad_op.py index 17ad80d00aa..77cb7e6009c 100644 --- a/tests/st/ops/gpu/test_stridedslice_grad_op.py +++ b/tests/st/ops/gpu/test_stridedslice_grad_op.py @@ -36,7 +36,7 @@ class StridedSliceNet(nn.Cell): class GradData(nn.Cell): def __init__(self, network): super(GradData, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=False) + self.grad = C.GradOperation(get_all=True, sens_param=False) self.network = network def construct(self, x): diff --git a/tests/st/ops/gpu/test_tanh_op.py b/tests/st/ops/gpu/test_tanh_op.py index 065bf50f087..b44c59570c7 100644 --- a/tests/st/ops/gpu/test_tanh_op.py +++ b/tests/st/ops/gpu/test_tanh_op.py @@ -37,7 +37,7 @@ class TanhNet(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_data, sens): diff --git a/tests/st/pynative/test_pynative_hook.py b/tests/st/pynative/test_pynative_hook.py index b9431f9d34b..99688697aef 100644 --- a/tests/st/pynative/test_pynative_hook.py +++ b/tests/st/pynative/test_pynative_hook.py @@ -30,7 +30,7 @@ from mindspore.common.initializer import TruncatedNormal context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) def weight_variable(): @@ -112,7 +112,7 @@ class GradWrap(nn.Cell): def construct(self, x, label): weights = self.weights - return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) + return C.GradOperation(get_by_list=True)(self.network, weights)(x, label) class test_custom_cell_base(): diff --git a/tests/st/pynative/test_pynative_lenet.py b/tests/st/pynative/test_pynative_lenet.py index eb669cbb768..75b5d0cfe5f 100644 --- a/tests/st/pynative/test_pynative_lenet.py +++ b/tests/st/pynative/test_pynative_lenet.py @@ -29,7 +29,7 @@ from mindspore.ops import operations as P np.random.seed(1) -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) def weight_variable(): diff --git a/tests/st/pynative/test_pynative_mindarmour.py b/tests/st/pynative/test_pynative_mindarmour.py index 469964c8717..23e7b2d042d 100644 --- a/tests/st/pynative/test_pynative_mindarmour.py +++ b/tests/st/pynative/test_pynative_mindarmour.py @@ -87,7 +87,7 @@ class LeNet(nn.Cell): class GradWithSens(Cell): def __init__(self, network): super(GradWithSens, self).__init__() - self.grad = GradOperation(name="grad", get_all=False, + self.grad = GradOperation(get_all=False, sens_param=True) self.network = network @@ -99,8 +99,7 @@ class GradWithSens(Cell): class GradWrapWithLoss(Cell): def __init__(self, network): super(GradWrapWithLoss, self).__init__() - self._grad_all = GradOperation(name="get_all", - get_all=True, + self._grad_all = GradOperation(get_all=True, sens_param=False) self._network = network diff --git a/tests/st/pynative/test_pynative_resnet50.py b/tests/st/pynative/test_pynative_resnet50.py index 1a6df5db465..2f4c1972399 100644 --- a/tests/st/pynative/test_pynative_resnet50.py +++ b/tests/st/pynative/test_pynative_resnet50.py @@ -40,7 +40,7 @@ np.random.seed(1) ds.config.set_seed(1) -grad_by_list = CP.GradOperation('get_by_list', get_by_list=True) +grad_by_list = CP.GradOperation(get_by_list=True) def weight_variable(shape): diff --git a/tests/st/pynative/test_tensor_index.py b/tests/st/pynative/test_tensor_index.py index 4f62204e7bc..d1d496e034b 100644 --- a/tests/st/pynative/test_tensor_index.py +++ b/tests/st/pynative/test_tensor_index.py @@ -24,7 +24,7 @@ from mindspore.common.parameter import ParameterTuple from mindspore.ops import composite as C -grad_by_list_with_sens = C.GradOperation('grad_by_list_with_sens', get_by_list=True, sens_param=True) +grad_by_list_with_sens = C.GradOperation(get_by_list=True, sens_param=True) def setup_module(): diff --git a/tests/train_step_wrap.py b/tests/train_step_wrap.py index 842b924198a..03451674193 100644 --- a/tests/train_step_wrap.py +++ b/tests/train_step_wrap.py @@ -32,7 +32,7 @@ class TrainStepWrap(nn.Cell): self.weights = ParameterTuple(network.trainable_params()) self.optimizer = nn.Momentum(self.weights, 0.1, 0.9) self.hyper_map = C.HyperMap() - self.grad = C.GradOperation('grad', get_by_list=True) + self.grad = C.GradOperation(get_by_list=True) def construct(self, x, label): weights = self.weights @@ -71,7 +71,7 @@ class TrainStepWrap2(nn.Cell): self.weights = ParameterTuple(network.get_parameters()) self.optimizer = nn.Momentum(self.weights, 0.1, 0.9) self.hyper_map = C.HyperMap() - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens def construct(self, x): @@ -93,7 +93,7 @@ class TrainStepWrapWithoutOpt(nn.Cell): super(TrainStepWrapWithoutOpt, self).__init__() self.network = network self.weights = ParameterTuple(network.trainable_params()) - self.grad = C.GradOperation('grad', get_by_list=True) + self.grad = C.GradOperation(get_by_list=True) def construct(self, x, label): grads = self.grad(self.network, self.weights)(x, label) diff --git a/tests/ut/python/dtype/test_list.py b/tests/ut/python/dtype/test_list.py index e8b651c55bf..13460e03ba8 100644 --- a/tests/ut/python/dtype/test_list.py +++ b/tests/ut/python/dtype/test_list.py @@ -31,7 +31,7 @@ from tests.mindspore_test_framework.pipeline.forward.compile_forward \ context.set_context(mode=context.GRAPH_MODE) -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) def test_list_equal(): diff --git a/tests/ut/python/exec/test_train_with_lars.py b/tests/ut/python/exec/test_train_with_lars.py index 4d3621b3b28..b09584f2989 100644 --- a/tests/ut/python/exec/test_train_with_lars.py +++ b/tests/ut/python/exec/test_train_with_lars.py @@ -52,8 +52,7 @@ class TrainOneStepWithLarsCell(nn.Cell): self.slice_index, self.params_len, weights = get_net_trainable_reordered_params(self.network) self.weights = ParameterTuple(weights) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = Parameter(Tensor([sens], mstype.float32), name='sens', requires_grad=False) self.weight_decay = 1.0 diff --git a/tests/ut/python/ir/test_row_tensor.py b/tests/ut/python/ir/test_row_tensor.py index cdfcf55bdc7..62d7d761a1c 100644 --- a/tests/ut/python/ir/test_row_tensor.py +++ b/tests/ut/python/ir/test_row_tensor.py @@ -248,7 +248,7 @@ def test_row_tensor_attr(): def test_row_tensor_sparse_gatherv2_grad_all(): - grad_all = C.GradOperation('get_all', get_all=True) + grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -269,7 +269,7 @@ def test_row_tensor_sparse_gatherv2_grad_all(): def test_row_tensor_sparse_gatherv2_grad_with_pram(): - grad_by_list = C.GradOperation('get_by_list', get_by_list=True) + grad_by_list = C.GradOperation(get_by_list=True) class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() diff --git a/tests/ut/python/ir/test_sparse_tensor.py b/tests/ut/python/ir/test_sparse_tensor.py index 76f53f2e13c..184bc26d93c 100644 --- a/tests/ut/python/ir/test_sparse_tensor.py +++ b/tests/ut/python/ir/test_sparse_tensor.py @@ -28,7 +28,7 @@ from mindspore import Tensor, SparseTensor, context context.set_context(mode=context.GRAPH_MODE, enable_sparse=True) -grad_op = C.GradOperation('get_all', get_all=True) +grad_op = C.GradOperation(get_all=True) class MakeSparseTensor(nn.Cell): def __init__(self, dense_shape): diff --git a/tests/ut/python/keep_order/test_keep_order.py b/tests/ut/python/keep_order/test_keep_order.py index fa0df6dd5d4..0113a362786 100644 --- a/tests/ut/python/keep_order/test_keep_order.py +++ b/tests/ut/python/keep_order/test_keep_order.py @@ -50,7 +50,7 @@ class Func(nn.Cell): return out -grad_s = C.GradOperation('grad_with_sens', get_all=True, sens_param=True) +grad_s = C.GradOperation(get_all=True, sens_param=True) class Net(nn.Cell): diff --git a/tests/ut/python/model/test_mix_precision.py b/tests/ut/python/model/test_mix_precision.py index 89a71bd37ce..4570bd243a3 100644 --- a/tests/ut/python/model/test_mix_precision.py +++ b/tests/ut/python/model/test_mix_precision.py @@ -166,8 +166,7 @@ class GetParamGrad(nn.Cell): super(GetParamGrad, self).__init__(auto_prefix=False) self.network = network self.weights = ParameterTuple(network.trainable_params()) - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) def construct(self, data, sens): diff --git a/tests/ut/python/nn/optim/test_lr_schedule.py b/tests/ut/python/nn/optim/test_lr_schedule.py index 69fa8a356f7..d3c9ab2aab8 100644 --- a/tests/ut/python/nn/optim/test_lr_schedule.py +++ b/tests/ut/python/nn/optim/test_lr_schedule.py @@ -22,7 +22,7 @@ from mindspore.ops.operations import BiasAdd, MatMul import mindspore.ops.composite as C -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) class Net(Cell): diff --git a/tests/ut/python/nn/test_nn_pad.py b/tests/ut/python/nn/test_nn_pad.py index 5e0f7108d60..3ea28aefc0c 100644 --- a/tests/ut/python/nn/test_nn_pad.py +++ b/tests/ut/python/nn/test_nn_pad.py @@ -34,7 +34,7 @@ class Net(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/ut/python/ops/test_bprop_disorder.py b/tests/ut/python/ops/test_bprop_disorder.py index c228c768d78..7f1829d5e77 100644 --- a/tests/ut/python/ops/test_bprop_disorder.py +++ b/tests/ut/python/ops/test_bprop_disorder.py @@ -28,7 +28,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ import pipeline_for_compile_forward_ge_graph_for_case_by_case_config -grad_by_list_with_sens = C.GradOperation('grad_by_list_with_sens', get_by_list=True, sens_param=True) +grad_by_list_with_sens = C.GradOperation(get_by_list=True, sens_param=True) class DisOrderTest1(nn.Cell): diff --git a/tests/ut/python/ops/test_control_ops.py b/tests/ut/python/ops/test_control_ops.py index 7784ab1e0d4..00c653ce880 100644 --- a/tests/ut/python/ops/test_control_ops.py +++ b/tests/ut/python/ops/test_control_ops.py @@ -30,9 +30,9 @@ from mindspore.common import ms_function context.set_context(mode=context.GRAPH_MODE) -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all = C.GradOperation('get_all', get_all=True) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all = C.GradOperation(get_all=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def cond_data_test(x_init, y_init): @@ -564,7 +564,7 @@ def test_switch_layer_env_eliminate(): class NetGrad(nn.Cell): def __init__(self, net): super(NetGrad, self).__init__() - self.grad_op = C.GradOperation('grad', get_by_list=True, sens_param=False) + self.grad_op = C.GradOperation(get_by_list=True, sens_param=False) self.net = net self.weights = ParameterTuple(self.net.trainable_params()) @@ -593,7 +593,7 @@ def test_switch_layer_single_layer(): class NetGrad(nn.Cell): def __init__(self, net): super(NetGrad, self).__init__() - self.grad_op = C.GradOperation('grad', get_by_list=True, sens_param=False) + self.grad_op = C.GradOperation(get_by_list=True, sens_param=False) self.net = net self.weights = ParameterTuple(self.net.trainable_params()) diff --git a/tests/ut/python/ops/test_math_ops.py b/tests/ut/python/ops/test_math_ops.py index 2eeed81eaf5..1113d9eeb60 100755 --- a/tests/ut/python/ops/test_math_ops.py +++ b/tests/ut/python/ops/test_math_ops.py @@ -38,7 +38,7 @@ context.set_context(mode=context.GRAPH_MODE) # W0613: unused-argument # W0231: super-init-not-called -grad = C.GradOperation('grad') +grad = C.GradOperation() def test_multiply(): """ test_multiply """ diff --git a/tests/ut/python/ops/test_momentum.py b/tests/ut/python/ops/test_momentum.py index ddc22b65d06..983ea9fe7dc 100644 --- a/tests/ut/python/ops/test_momentum.py +++ b/tests/ut/python/ops/test_momentum.py @@ -34,7 +34,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ run_opt = C.MultitypeFuncGraph("run_opt") -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) @run_opt.register("Function", "Tensor", "Tensor", "Tensor", diff --git a/tests/ut/python/ops/test_nn_ops.py b/tests/ut/python/ops/test_nn_ops.py index a6541466c17..abf0b034d30 100644 --- a/tests/ut/python/ops/test_nn_ops.py +++ b/tests/ut/python/ops/test_nn_ops.py @@ -45,8 +45,8 @@ def conv1x1(in_channels, out_channels, stride=1, padding=0): kernel_size=1, stride=stride, padding=padding) -grad = C.GradOperation('grad') -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad = C.GradOperation() +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) class ResidualBlock(nn.Cell): @@ -230,7 +230,7 @@ class FusedBatchNormGrad(nn.Cell): def __init__(self, network): super(FusedBatchNormGrad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, inp, output_grad): diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 17af407b1ff..dbb23db1679 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -36,7 +36,7 @@ from ....mindspore_test_framework.pipeline.gradient.compile_gradient \ from ....ops_common import convert -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) class InputBackward(nn.Cell): diff --git a/tests/ut/python/parallel/test_add_relu_redistribution.py b/tests/ut/python/parallel/test_add_relu_redistribution.py index f8e211ae1ad..894c29a3404 100644 --- a/tests/ut/python/parallel/test_add_relu_redistribution.py +++ b/tests/ut/python/parallel/test_add_relu_redistribution.py @@ -23,7 +23,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class AddRelu(nn.Cell): diff --git a/tests/ut/python/parallel/test_arithmetic.py b/tests/ut/python/parallel/test_arithmetic.py index 1b307d5733d..4d2b623dd5e 100644 --- a/tests/ut/python/parallel/test_arithmetic.py +++ b/tests/ut/python/parallel/test_arithmetic.py @@ -23,7 +23,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_attention.py b/tests/ut/python/parallel/test_attention.py index 25c8be5e9cb..7af99af2bd6 100644 --- a/tests/ut/python/parallel/test_attention.py +++ b/tests/ut/python/parallel/test_attention.py @@ -27,7 +27,7 @@ from mindspore.common.parameter import Parameter from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py b/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py index 32f64200618..4f9cd92c3c8 100644 --- a/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py +++ b/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_arithmetic.py b/tests/ut/python/parallel/test_auto_parallel_arithmetic.py index 0a8afcb6fb7..2d25f18081f 100644 --- a/tests/ut/python/parallel/test_auto_parallel_arithmetic.py +++ b/tests/ut/python/parallel/test_auto_parallel_arithmetic.py @@ -27,7 +27,7 @@ from tests.ut.python.ops.test_math_ops import VirtualLoss context.set_context(mode=context.GRAPH_MODE) -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py b/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py index 4a73ce6d7ce..3c3cd40abb8 100644 --- a/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py +++ b/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py @@ -25,7 +25,7 @@ from mindspore.parallel._utils import _reset_op_id as reset_op_id from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_cast.py b/tests/ut/python/parallel/test_auto_parallel_cast.py index 14ec846c9d4..0e498878ebd 100644 --- a/tests/ut/python/parallel/test_auto_parallel_cast.py +++ b/tests/ut/python/parallel/test_auto_parallel_cast.py @@ -26,7 +26,7 @@ from mindspore.parallel._utils import _reset_op_id as reset_op_id from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_common_parameter.py b/tests/ut/python/parallel/test_auto_parallel_common_parameter.py index c330d6259ed..9ab8b274065 100644 --- a/tests/ut/python/parallel/test_auto_parallel_common_parameter.py +++ b/tests/ut/python/parallel/test_auto_parallel_common_parameter.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_double_sources.py b/tests/ut/python/parallel/test_auto_parallel_double_sources.py index 2f89606974c..6ad78585051 100644 --- a/tests/ut/python/parallel/test_auto_parallel_double_sources.py +++ b/tests/ut/python/parallel/test_auto_parallel_double_sources.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_double_star.py b/tests/ut/python/parallel/test_auto_parallel_double_star.py index 08b13a6bbee..5a431599935 100644 --- a/tests/ut/python/parallel/test_auto_parallel_double_star.py +++ b/tests/ut/python/parallel/test_auto_parallel_double_star.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py b/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py index eb9c397abc6..70443858aa5 100644 --- a/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py +++ b/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py @@ -85,8 +85,8 @@ class TrainStepWarp(nn.Cell): self.optimizer_d = Adam(self.weights_d, learning_rate=3.5e-4, eps=1e-8, loss_scale=sens) self.hyper_map = C.HyperMap() - self.grad_w = C.GradOperation('grad_w', get_by_list=True, sens_param=True) - self.grad_d = C.GradOperation('grad_d', get_by_list=True, sens_param=True) + self.grad_w = C.GradOperation(get_by_list=True, sens_param=True) + self.grad_d = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.loss_net_w = IthOutputCell(network, output_index=0) self.loss_net_d = IthOutputCell(network, output_index=1) diff --git a/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py b/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py index c6339330137..05e57801c03 100644 --- a/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py +++ b/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_four_matmul.py b/tests/ut/python/parallel/test_auto_parallel_four_matmul.py index 2eb8243a022..c005fcffde3 100644 --- a/tests/ut/python/parallel/test_auto_parallel_four_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_four_matmul.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_l2normalize.py b/tests/ut/python/parallel/test_auto_parallel_l2normalize.py index 1601a99c36b..1a1c1502f32 100644 --- a/tests/ut/python/parallel/test_auto_parallel_l2normalize.py +++ b/tests/ut/python/parallel/test_auto_parallel_l2normalize.py @@ -25,7 +25,7 @@ from mindspore.parallel._utils import _reset_op_id as reset_op_id from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_matmul_drop.py b/tests/ut/python/parallel/test_auto_parallel_matmul_drop.py index d8a89be72fd..738614ab5ed 100644 --- a/tests/ut/python/parallel/test_auto_parallel_matmul_drop.py +++ b/tests/ut/python/parallel/test_auto_parallel_matmul_drop.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py b/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py index 9d7635dd02c..bc086c5907f 100644 --- a/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py +++ b/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py @@ -26,7 +26,7 @@ from mindspore.parallel._utils import _reset_op_id as reset_op_id from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_onehot.py b/tests/ut/python/parallel/test_auto_parallel_onehot.py index f36eb5e1096..302de23a50f 100644 --- a/tests/ut/python/parallel/test_auto_parallel_onehot.py +++ b/tests/ut/python/parallel/test_auto_parallel_onehot.py @@ -30,7 +30,7 @@ from tests.ut.python.ops.test_math_ops import VirtualLoss context.set_context(mode=context.GRAPH_MODE) -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class Dataset(MindData): diff --git a/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py b/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py index 0c08d6a482e..4aa2fe6b8d4 100644 --- a/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py +++ b/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_reduce_method.py b/tests/ut/python/parallel/test_auto_parallel_reduce_method.py index 337eeff49b7..415ddf94d0b 100644 --- a/tests/ut/python/parallel/test_auto_parallel_reduce_method.py +++ b/tests/ut/python/parallel/test_auto_parallel_reduce_method.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_reshape.py b/tests/ut/python/parallel/test_auto_parallel_reshape.py index 2d9ef96b420..2f4c4efb6e2 100644 --- a/tests/ut/python/parallel/test_auto_parallel_reshape.py +++ b/tests/ut/python/parallel/test_auto_parallel_reshape.py @@ -25,7 +25,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_rhombus.py b/tests/ut/python/parallel/test_auto_parallel_rhombus.py index e83eebf6488..fb7b6caf6ec 100644 --- a/tests/ut/python/parallel/test_auto_parallel_rhombus.py +++ b/tests/ut/python/parallel/test_auto_parallel_rhombus.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py b/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py index cf20ca0ef51..448e322c2a0 100644 --- a/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py +++ b/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_transformer.py b/tests/ut/python/parallel/test_auto_parallel_transformer.py index c3ea02eaa9c..4a3d8daa44b 100644 --- a/tests/ut/python/parallel/test_auto_parallel_transformer.py +++ b/tests/ut/python/parallel/test_auto_parallel_transformer.py @@ -23,7 +23,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_transpose.py b/tests/ut/python/parallel/test_auto_parallel_transpose.py index 246ab2d5881..b542004ea72 100644 --- a/tests/ut/python/parallel/test_auto_parallel_transpose.py +++ b/tests/ut/python/parallel/test_auto_parallel_transpose.py @@ -25,7 +25,7 @@ from mindspore.parallel._utils import _reset_op_id as reset_op_id from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py b/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py index ba1ffd6cd8d..8ed66b958ef 100644 --- a/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py +++ b/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py @@ -25,7 +25,7 @@ from mindspore.ops.operations.comm_ops import _VirtualDataset from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py index 2a4d4f40f6d..f4863cce1c1 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py @@ -27,7 +27,7 @@ from mindspore.parallel._utils import _reset_op_id as reset_op_id from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py b/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py index d5f6086c802..a05730aa5e6 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_zig_zag.py b/tests/ut/python/parallel/test_auto_parallel_zig_zag.py index 77c85cb271f..14affccf50b 100644 --- a/tests/ut/python/parallel/test_auto_parallel_zig_zag.py +++ b/tests/ut/python/parallel/test_auto_parallel_zig_zag.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_star_elimination.py b/tests/ut/python/parallel/test_auto_star_elimination.py index 8ab81ccc2c3..7b1945304ed 100644 --- a/tests/ut/python/parallel/test_auto_star_elimination.py +++ b/tests/ut/python/parallel/test_auto_star_elimination.py @@ -26,7 +26,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_batch_parallel.py b/tests/ut/python/parallel/test_batch_parallel.py index bd823bd416e..db0c93dbf9f 100644 --- a/tests/ut/python/parallel/test_batch_parallel.py +++ b/tests/ut/python/parallel/test_batch_parallel.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_batch_parallel_dropout.py b/tests/ut/python/parallel/test_batch_parallel_dropout.py index ce761944606..ba9c1a69337 100644 --- a/tests/ut/python/parallel/test_batch_parallel_dropout.py +++ b/tests/ut/python/parallel/test_batch_parallel_dropout.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_batch_parallel_tensoradd.py b/tests/ut/python/parallel/test_batch_parallel_tensoradd.py index 54cf437e441..a81079e8eab 100644 --- a/tests/ut/python/parallel/test_batch_parallel_tensoradd.py +++ b/tests/ut/python/parallel/test_batch_parallel_tensoradd.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_comparison_function_info.py b/tests/ut/python/parallel/test_comparison_function_info.py index 56bc8888bb5..b56a08ec51b 100644 --- a/tests/ut/python/parallel/test_comparison_function_info.py +++ b/tests/ut/python/parallel/test_comparison_function_info.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_dataset_interface.py b/tests/ut/python/parallel/test_dataset_interface.py index 46114cc0bdf..0e70b2513c2 100644 --- a/tests/ut/python/parallel/test_dataset_interface.py +++ b/tests/ut/python/parallel/test_dataset_interface.py @@ -107,7 +107,7 @@ class TrainOneStepCell(nn.Cell): self.network.add_flags(defer_inline=True) self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) def construct(self, data, sens): weights = self.weights diff --git a/tests/ut/python/parallel/test_different_type_for_div_op.py b/tests/ut/python/parallel/test_different_type_for_div_op.py index 1dcdcac8602..92480d06b03 100644 --- a/tests/ut/python/parallel/test_different_type_for_div_op.py +++ b/tests/ut/python/parallel/test_different_type_for_div_op.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): diff --git a/tests/ut/python/parallel/test_element_wise_function.py b/tests/ut/python/parallel/test_element_wise_function.py index 450914eb692..668618fcabd 100644 --- a/tests/ut/python/parallel/test_element_wise_function.py +++ b/tests/ut/python/parallel/test_element_wise_function.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_embeddinglookup.py b/tests/ut/python/parallel/test_embeddinglookup.py index f131a854578..576a6b3bc96 100644 --- a/tests/ut/python/parallel/test_embeddinglookup.py +++ b/tests/ut/python/parallel/test_embeddinglookup.py @@ -23,7 +23,7 @@ from mindspore import Tensor, context from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): diff --git a/tests/ut/python/parallel/test_gather_v2.py b/tests/ut/python/parallel/test_gather_v2.py index e914af3102b..2d657a61016 100644 --- a/tests/ut/python/parallel/test_gather_v2.py +++ b/tests/ut/python/parallel/test_gather_v2.py @@ -23,7 +23,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_gather_v2_primitive.py b/tests/ut/python/parallel/test_gather_v2_primitive.py index e6f269e2dbb..4c0534aad07 100644 --- a/tests/ut/python/parallel/test_gather_v2_primitive.py +++ b/tests/ut/python/parallel/test_gather_v2_primitive.py @@ -109,8 +109,7 @@ class TrainOneStepCell(Cell): self.network.add_flags(defer_inline=True) self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens diff --git a/tests/ut/python/parallel/test_get_next.py b/tests/ut/python/parallel/test_get_next.py index 956723a3f31..c1db710ad5a 100644 --- a/tests/ut/python/parallel/test_get_next.py +++ b/tests/ut/python/parallel/test_get_next.py @@ -25,7 +25,7 @@ from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE) -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_gpu_dropout.py b/tests/ut/python/parallel/test_gpu_dropout.py index 1a1ac553017..0eade2b9628 100644 --- a/tests/ut/python/parallel/test_gpu_dropout.py +++ b/tests/ut/python/parallel/test_gpu_dropout.py @@ -23,7 +23,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_hybird_parallel_activation.py b/tests/ut/python/parallel/test_hybird_parallel_activation.py index 32596ab44ac..cf2dd849cfa 100644 --- a/tests/ut/python/parallel/test_hybird_parallel_activation.py +++ b/tests/ut/python/parallel/test_hybird_parallel_activation.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_l2normalize.py b/tests/ut/python/parallel/test_l2normalize.py index 9a842c603af..8a26bf39432 100644 --- a/tests/ut/python/parallel/test_l2normalize.py +++ b/tests/ut/python/parallel/test_l2normalize.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_linear.py b/tests/ut/python/parallel/test_linear.py index 4c0df3c665f..b0fd4105669 100644 --- a/tests/ut/python/parallel/test_linear.py +++ b/tests/ut/python/parallel/test_linear.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_loop_two_matmul.py b/tests/ut/python/parallel/test_loop_two_matmul.py index e05c84fca42..5b066d53a84 100644 --- a/tests/ut/python/parallel/test_loop_two_matmul.py +++ b/tests/ut/python/parallel/test_loop_two_matmul.py @@ -25,7 +25,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_loss_scale.py b/tests/ut/python/parallel/test_loss_scale.py index 7737fe12715..498a83d928a 100644 --- a/tests/ut/python/parallel/test_loss_scale.py +++ b/tests/ut/python/parallel/test_loss_scale.py @@ -61,8 +61,7 @@ class TrainOneStepWithLossScaleCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.grad_reducer = F.identity diff --git a/tests/ut/python/parallel/test_matmul_dropout.py b/tests/ut/python/parallel/test_matmul_dropout.py index 41892ade40b..5dfa4cabb3b 100644 --- a/tests/ut/python/parallel/test_matmul_dropout.py +++ b/tests/ut/python/parallel/test_matmul_dropout.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_matmul_tensor.py b/tests/ut/python/parallel/test_matmul_tensor.py index 757242bf097..aff6cfca737 100644 --- a/tests/ut/python/parallel/test_matmul_tensor.py +++ b/tests/ut/python/parallel/test_matmul_tensor.py @@ -26,7 +26,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py b/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py index 4790f60c997..81bb9cae7b8 100644 --- a/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py +++ b/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_one_hot_net.py b/tests/ut/python/parallel/test_one_hot_net.py index a67010143d6..33c8fcbc823 100644 --- a/tests/ut/python/parallel/test_one_hot_net.py +++ b/tests/ut/python/parallel/test_one_hot_net.py @@ -30,7 +30,7 @@ from tests.dataset_mock import MindData from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) device_num = 16 diff --git a/tests/ut/python/parallel/test_one_weight_parameter.py b/tests/ut/python/parallel/test_one_weight_parameter.py index 558d2ec322e..8cf6b6aa8ee 100644 --- a/tests/ut/python/parallel/test_one_weight_parameter.py +++ b/tests/ut/python/parallel/test_one_weight_parameter.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_onehot.py b/tests/ut/python/parallel/test_onehot.py index e65871e55b4..725e9e33ec5 100644 --- a/tests/ut/python/parallel/test_onehot.py +++ b/tests/ut/python/parallel/test_onehot.py @@ -26,7 +26,7 @@ from mindspore.ops.operations.comm_ops import _VirtualDataset context.set_context(mode=context.GRAPH_MODE) -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_prelu.py b/tests/ut/python/parallel/test_prelu.py index 6afef01f5c5..e60aafeba0c 100644 --- a/tests/ut/python/parallel/test_prelu.py +++ b/tests/ut/python/parallel/test_prelu.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_reduce_method_info.py b/tests/ut/python/parallel/test_reduce_method_info.py index ecf3dc30943..07712a2d9df 100644 --- a/tests/ut/python/parallel/test_reduce_method_info.py +++ b/tests/ut/python/parallel/test_reduce_method_info.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLossNoBias(nn.Cell): diff --git a/tests/ut/python/parallel/test_reshape.py b/tests/ut/python/parallel/test_reshape.py index 070e874119f..9cfb376e1b6 100644 --- a/tests/ut/python/parallel/test_reshape.py +++ b/tests/ut/python/parallel/test_reshape.py @@ -36,7 +36,7 @@ context.set_context(mode=context.GRAPH_MODE) context.reset_auto_parallel_context() -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class Dataset(MindData): @@ -419,8 +419,7 @@ class TrainOneStepCell(nn.Cell): self.network.add_flags(defer_inline=True) self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens diff --git a/tests/ut/python/parallel/test_reshape_parameter.py b/tests/ut/python/parallel/test_reshape_parameter.py index 4dfaa89ba2d..f074f566f1b 100644 --- a/tests/ut/python/parallel/test_reshape_parameter.py +++ b/tests/ut/python/parallel/test_reshape_parameter.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_scalar_loss.py b/tests/ut/python/parallel/test_scalar_loss.py index 3d07cd035d3..0f8dcc03f87 100644 --- a/tests/ut/python/parallel/test_scalar_loss.py +++ b/tests/ut/python/parallel/test_scalar_loss.py @@ -24,7 +24,7 @@ from mindspore.ops import functional as F from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): diff --git a/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py b/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py index b25d86a6e47..95e642cf3d2 100644 --- a/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py +++ b/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py @@ -83,9 +83,9 @@ class TrainStepWrap(nn.Cell): self.optimizer_d = Adam(self.weights_d, learning_rate=3.5e-4, eps=1e-8, loss_scale=sens) self.hyper_map = C.HyperMap() - self.grad_w = C.GradOperation('grad_w', get_by_list=True, + self.grad_w = C.GradOperation(get_by_list=True, sens_param=True) - self.grad_d = C.GradOperation('grad_d', get_by_list=True, + self.grad_d = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.loss_net_w = IthOutputCell(network, output_index=0) diff --git a/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py b/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py index cf4ee1710c2..24b45600ba0 100644 --- a/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py +++ b/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_sparse_feature_bprop.py b/tests/ut/python/parallel/test_sparse_feature_bprop.py index 73d8097605c..f7de90d9cd2 100644 --- a/tests/ut/python/parallel/test_sparse_feature_bprop.py +++ b/tests/ut/python/parallel/test_sparse_feature_bprop.py @@ -26,7 +26,7 @@ from mindspore.common.api import _executor from mindspore.nn import TrainOneStepCell, Adam -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): diff --git a/tests/ut/python/parallel/test_sparse_gather_v2.py b/tests/ut/python/parallel/test_sparse_gather_v2.py index 1f3d70134c3..30957dcf9dc 100644 --- a/tests/ut/python/parallel/test_sparse_gather_v2.py +++ b/tests/ut/python/parallel/test_sparse_gather_v2.py @@ -25,7 +25,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_split_grad_sens.py b/tests/ut/python/parallel/test_split_grad_sens.py index 3c947283565..a181a1858af 100644 --- a/tests/ut/python/parallel/test_split_grad_sens.py +++ b/tests/ut/python/parallel/test_split_grad_sens.py @@ -24,8 +24,8 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all = C.GradOperation(get_all=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) class GradWrap(nn.Cell): diff --git a/tests/ut/python/parallel/test_step_parallel.py b/tests/ut/python/parallel/test_step_parallel.py index fce960ed950..a03a151e139 100644 --- a/tests/ut/python/parallel/test_step_parallel.py +++ b/tests/ut/python/parallel/test_step_parallel.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_strategy_checkpoint.py b/tests/ut/python/parallel/test_strategy_checkpoint.py index ada32463072..2d39b7aae65 100644 --- a/tests/ut/python/parallel/test_strategy_checkpoint.py +++ b/tests/ut/python/parallel/test_strategy_checkpoint.py @@ -25,7 +25,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) # model_parallel test diff --git a/tests/ut/python/parallel/test_sum_as_loss.py b/tests/ut/python/parallel/test_sum_as_loss.py index c8cd5e63ae9..bca26d0b2cf 100644 --- a/tests/ut/python/parallel/test_sum_as_loss.py +++ b/tests/ut/python/parallel/test_sum_as_loss.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): diff --git a/tests/ut/python/parallel/test_two_matmul.py b/tests/ut/python/parallel/test_two_matmul.py index 0e6b47286c4..854df0ca71f 100644 --- a/tests/ut/python/parallel/test_two_matmul.py +++ b/tests/ut/python/parallel/test_two_matmul.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_two_weights_parameter.py b/tests/ut/python/parallel/test_two_weights_parameter.py index 160ae9f40d7..c05fa63c2d8 100644 --- a/tests/ut/python/parallel/test_two_weights_parameter.py +++ b/tests/ut/python/parallel/test_two_weights_parameter.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_virtual_dataset_3_input.py b/tests/ut/python/parallel/test_virtual_dataset_3_input.py index 63979e59f87..a3b2f8d96bb 100644 --- a/tests/ut/python/parallel/test_virtual_dataset_3_input.py +++ b/tests/ut/python/parallel/test_virtual_dataset_3_input.py @@ -26,7 +26,7 @@ from mindspore.ops.operations.comm_ops import _VirtualDataset from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parameter_feature/test_parameter.py b/tests/ut/python/parameter_feature/test_parameter.py index 0cdb7f2469b..551f175dbe8 100644 --- a/tests/ut/python/parameter_feature/test_parameter.py +++ b/tests/ut/python/parameter_feature/test_parameter.py @@ -23,8 +23,8 @@ from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, save_graphs=True) -grad_all = C.GradOperation('get_all', get_all=True) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all = C.GradOperation(get_all=True) +grad_all_with_sens = C.GradOperation(sens_param=True) def test_parser_three_default_mixed_args_subnet(): diff --git a/tests/ut/python/parameter_feature/test_var_grad.py b/tests/ut/python/parameter_feature/test_var_grad.py index 760dd8531a0..6b3d05a9781 100644 --- a/tests/ut/python/parameter_feature/test_var_grad.py +++ b/tests/ut/python/parameter_feature/test_var_grad.py @@ -25,11 +25,11 @@ from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, save_graphs=True) -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) -grad_by_list_with_sens = C.GradOperation('grad_by_list_with_sens', get_by_list=True, sens_param=True) -grad_all = C.GradOperation('get_all', get_all=True) -grad_with_sens = C.GradOperation('grad_with_sens', sens_param=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) +grad_by_list_with_sens = C.GradOperation(get_by_list=True, sens_param=True) +grad_all = C.GradOperation(get_all=True) +grad_with_sens = C.GradOperation(sens_param=True) def test_net_vargs_expand(): @@ -200,7 +200,7 @@ def test_grad_with_param_sens(): self.weights = ParameterTuple(net.trainable_params()) self.net = net self.sens = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), name='sens', requires_grad=False) - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) def construct(self, x, y): return self.grad(self.net, self.weights)(x, y, self.sens) @@ -290,8 +290,7 @@ def test_grad_within_if_else(): super(GradNet, self).__init__() self.weights = ParameterTuple(net.trainable_params()) self.net = net - grad_op = C.GradOperation( - name='grad', get_all=False, get_by_list=True, sens_param=True) + grad_op = C.GradOperation(get_all=False, get_by_list=True, sens_param=True) sens = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) self.grad = Bprop(self.net, True, self.weights, grad_op, sens) @@ -312,8 +311,7 @@ def test_grad_for_concat(): super(GradNet, self).__init__() self.weights = ParameterTuple(net.trainable_params()) self.net = net - grad_op = C.GradOperation( - name='grad', get_all=True, get_by_list=False, sens_param=True) + grad_op = C.GradOperation(get_all=True, get_by_list=False, sens_param=True) self.grad = Bprop(self.net, False, self.weights, grad_op) def construct(self, *inputs): diff --git a/tests/ut/python/pipeline/infer/test_scalar_add_grad.py b/tests/ut/python/pipeline/infer/test_scalar_add_grad.py index 027cfdb779e..a775b822363 100644 --- a/tests/ut/python/pipeline/infer/test_scalar_add_grad.py +++ b/tests/ut/python/pipeline/infer/test_scalar_add_grad.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops.operations import TensorAdd context.set_context(mode=context.GRAPH_MODE) -grad = C.GradOperation('get_all', get_all=True, sens_param=True) +grad = C.GradOperation(get_all=True, sens_param=True) class TensorAddNetMe(Cell): diff --git a/tests/ut/python/pipeline/parse/test_parse.py b/tests/ut/python/pipeline/parse/test_parse.py index 8bafdf26c74..b5d0fb0ae47 100644 --- a/tests/ut/python/pipeline/parse/test_parse.py +++ b/tests/ut/python/pipeline/parse/test_parse.py @@ -37,7 +37,7 @@ from ...ut_filter import non_graph_engine # W0613: unused-argument -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) log = logging.getLogger("test") diff --git a/tests/ut/python/pynative_mode/ops/test_grad.py b/tests/ut/python/pynative_mode/ops/test_grad.py index d0bf9c893f9..1e8849cc970 100644 --- a/tests/ut/python/pynative_mode/ops/test_grad.py +++ b/tests/ut/python/pynative_mode/ops/test_grad.py @@ -29,8 +29,8 @@ def setup_module(module): context.set_context(mode=context.PYNATIVE_MODE) -grad = C.GradOperation('grad') -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad = C.GradOperation() +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def mul(x, y): diff --git a/tests/ut/python/pynative_mode/test_cont_cases.py b/tests/ut/python/pynative_mode/test_cont_cases.py index 518678244a0..5c7350ed567 100644 --- a/tests/ut/python/pynative_mode/test_cont_cases.py +++ b/tests/ut/python/pynative_mode/test_cont_cases.py @@ -26,8 +26,8 @@ from mindspore.ops import operations as P # from tests.vm_impl.vm_interface import * # from tests.vm_impl import * -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all = C.GradOperation('get_all', get_all=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all = C.GradOperation(get_all=True) def setup_module(): diff --git a/tests/ut/python/pynative_mode/test_framstruct.py b/tests/ut/python/pynative_mode/test_framstruct.py index 7e19b7452a3..ab5a80bbb70 100644 --- a/tests/ut/python/pynative_mode/test_framstruct.py +++ b/tests/ut/python/pynative_mode/test_framstruct.py @@ -35,10 +35,10 @@ def setup_module(module): context.set_context(mode=context.PYNATIVE_MODE) -grad = C.GradOperation('grad') -grad_all = C.GradOperation('get_all', get_all=True) -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad = C.GradOperation() +grad_all = C.GradOperation(get_all=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) @ms_function diff --git a/tests/ut/python/pynative_mode/test_high_order_grad.py b/tests/ut/python/pynative_mode/test_high_order_grad.py index 133f65c7280..e41df500c37 100644 --- a/tests/ut/python/pynative_mode/test_high_order_grad.py +++ b/tests/ut/python/pynative_mode/test_high_order_grad.py @@ -18,9 +18,9 @@ from mindspore.common.api import ms_function import mindspore.ops.composite as C -grad = C.GradOperation('grad') -grad_all = C.GradOperation('get_all', get_all=True) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad = C.GradOperation() +grad_all = C.GradOperation(get_all=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def setup_module(module): context.set_context(mode=context.PYNATIVE_MODE, check_bprop=False) diff --git a/tests/ut/python/pynative_mode/test_hook.py b/tests/ut/python/pynative_mode/test_hook.py index 532fcbebeca..6c2204f3810 100644 --- a/tests/ut/python/pynative_mode/test_hook.py +++ b/tests/ut/python/pynative_mode/test_hook.py @@ -28,7 +28,7 @@ var_hook_done = False cell_bprop_done = False -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): @@ -135,7 +135,7 @@ class GradWrap(nn.Cell): def construct(self, x, label): weights = self.weights - return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) + return C.GradOperation(get_by_list=True)(self.network, weights)(x, label) def test_hook(): diff --git a/tests/ut/python/pynative_mode/test_implicit_conversion.py b/tests/ut/python/pynative_mode/test_implicit_conversion.py index b7c6144d222..39c885bd66a 100644 --- a/tests/ut/python/pynative_mode/test_implicit_conversion.py +++ b/tests/ut/python/pynative_mode/test_implicit_conversion.py @@ -20,7 +20,7 @@ from mindspore import Tensor, nn from mindspore.ops import composite as C -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def test_float_tensor_and_int_add(): diff --git a/tests/ut/python/pynative_mode/test_insert_grad_of.py b/tests/ut/python/pynative_mode/test_insert_grad_of.py index ee84677472c..5573517b1fb 100644 --- a/tests/ut/python/pynative_mode/test_insert_grad_of.py +++ b/tests/ut/python/pynative_mode/test_insert_grad_of.py @@ -26,8 +26,8 @@ from ....mindspore_test_framework.utils.bprop_util import bprop from ....mindspore_test_framework.utils.debug_util import PrintShapeTypeCell, PrintGradShapeTypeCell -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all = C.GradOperation('get_all', get_all=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all = C.GradOperation(get_all=True) def setup_module(module): diff --git a/tests/ut/python/pynative_mode/test_kw_and_kwarg.py b/tests/ut/python/pynative_mode/test_kw_and_kwarg.py index 0100e0d0fc6..3fdbcefc413 100644 --- a/tests/ut/python/pynative_mode/test_kw_and_kwarg.py +++ b/tests/ut/python/pynative_mode/test_kw_and_kwarg.py @@ -64,7 +64,7 @@ def test_kw_grad(): def __init__(self, net): super(GradKwNet, self).__init__() self.net = net - self.grad_all_wit_sense = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) + self.grad_all_wit_sense = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, y, *arg, **kwargs): return self.grad_all_wit_sense(self.net)(x, y, *arg, **kwargs) @@ -112,7 +112,7 @@ def test_grad(): def __init__(self, net): super(GradNet, self).__init__() self.net = net - self.grad_all_wit_sense = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) + self.grad_all_wit_sense = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, y, z, sens): return self.grad_all_wit_sense(self.net)(x, y, z, sens) diff --git a/tests/ut/python/pynative_mode/test_pynative_model.py b/tests/ut/python/pynative_mode/test_pynative_model.py index 521f25a301a..a0469cdaf4b 100644 --- a/tests/ut/python/pynative_mode/test_pynative_model.py +++ b/tests/ut/python/pynative_mode/test_pynative_model.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from ..ut_filter import non_graph_engine -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) def setup_module(module): diff --git a/tests/ut/python/pynative_mode/test_sparse_pynative.py b/tests/ut/python/pynative_mode/test_sparse_pynative.py index 4d9db16cb7a..3568491b23a 100644 --- a/tests/ut/python/pynative_mode/test_sparse_pynative.py +++ b/tests/ut/python/pynative_mode/test_sparse_pynative.py @@ -26,7 +26,7 @@ from mindspore.ops import composite as C context.set_context(mode=context.PYNATIVE_MODE, enable_sparse=True) -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() diff --git a/tests/ut/python/pynative_mode/test_stop_gradient.py b/tests/ut/python/pynative_mode/test_stop_gradient.py index d880aa7b178..59ae8d64297 100644 --- a/tests/ut/python/pynative_mode/test_stop_gradient.py +++ b/tests/ut/python/pynative_mode/test_stop_gradient.py @@ -31,8 +31,8 @@ from ..ut_filter import non_graph_engine from ....mindspore_test_framework.utils.bprop_util import bprop -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all = C.GradOperation('get_all', get_all=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all = C.GradOperation(get_all=True) def setup_module(module): diff --git a/tests/ut/python/pynative_mode/test_user_define_bprop_check.py b/tests/ut/python/pynative_mode/test_user_define_bprop_check.py index 6ebe94acebf..fe76611cfe5 100644 --- a/tests/ut/python/pynative_mode/test_user_define_bprop_check.py +++ b/tests/ut/python/pynative_mode/test_user_define_bprop_check.py @@ -21,7 +21,7 @@ from mindspore import dtype as mstype from mindspore.ops import composite as C -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def test_user_define_bprop_check_ok():