diff --git a/mindspore/nn/optim/adam.py b/mindspore/nn/optim/adam.py index d5d2d2d4e00..b9cfa75af78 100755 --- a/mindspore/nn/optim/adam.py +++ b/mindspore/nn/optim/adam.py @@ -553,8 +553,8 @@ class AdamOffload(Optimizer): >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, - >>> {'params': no_conv_params, 'lr': 0.01}, - >>> {'order_params': net.trainable_params()}] + ... {'params': no_conv_params, 'lr': 0.01}, + ... {'order_params': net.trainable_params()}] >>> optim = nn.AdamOffload(group_params, learning_rate=0.1, weight_decay=0.0) >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. >>> # The no_conv_params's parameters will use learning rate of 0.01 and defaule weight decay of 0.0. diff --git a/mindspore/nn/wrap/grad_reducer.py b/mindspore/nn/wrap/grad_reducer.py index b3cd4c1e4c4..9f1e8a16a79 100644 --- a/mindspore/nn/wrap/grad_reducer.py +++ b/mindspore/nn/wrap/grad_reducer.py @@ -265,7 +265,7 @@ class DistributedGradReducer(Cell): >>> >>> device_id = int(os.environ["DEVICE_ID"]) >>> context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True, - >>> device_id=int(device_id)) + ... device_id=int(device_id)) >>> init() >>> context.reset_auto_parallel_context() >>> context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL) diff --git a/mindspore/ops/composite/base.py b/mindspore/ops/composite/base.py index e5610b66ce6..1ecbff297ce 100644 --- a/mindspore/ops/composite/base.py +++ b/mindspore/ops/composite/base.py @@ -384,9 +384,11 @@ class MultitypeFuncGraph(MultitypeFuncGraph_): >>> @add.register("Tensor", "Tensor") ... def add_tensor(x, y): ... return tensor_add(x, y) - >>> add(1, 2) + >>> ourput = add(1, 2) + >>> print(output) 3 - >>> add(Tensor(1, mstype.float32), Tensor(2, mstype.float32)) + >>> output = add(Tensor(1, mstype.float32), Tensor(2, mstype.float32)) + >>> print(output) Tensor(shape=[], dtype=Float32, 3) """ @@ -470,11 +472,13 @@ class HyperMap(HyperMap_): ... return F.square(x) >>> >>> common_map = HyperMap() - >>> common_map(square, nest_tensor_list) + >>> output = common_map(square, nest_tensor_list) + >>> print(output) ((Tensor(shape=[], dtype=Float32, 1), Tensor(shape=[], dtype=Float32, 4)), (Tensor(shape=[], dtype=Float32, 9), Tensor(shape=[], dtype=Float32, 16)) >>> square_map = HyperMap(square) - >>> square_map(nest_tensor_list) + >>> output = square_map(nest_tensor_list) + >>> print(output) ((Tensor(shape=[], dtype=Float32, 1), Tensor(shape=[], dtype=Float32, 4)), (Tensor(shape=[], dtype=Float32, 9), Tensor(shape=[], dtype=Float32, 16)) """ @@ -531,10 +535,12 @@ class Map(Map_): ... return F.square(x) >>> >>> common_map = Map() - >>> common_map(square, tensor_list) + >>> output = common_map(square, tensor_list) + >>> print(output) (Tensor(shape=[], dtype=Float32, 1), Tensor(shape=[], dtype=Float32, 4), Tensor(shape=[], dtype=Float32, 9)) >>> square_map = Map(square) - >>> square_map(tensor_list) + >>> output = square_map(tensor_list) + >>> print(output) (Tensor(shape=[], dtype=Float32, 1), Tensor(shape=[], dtype=Float32, 4), Tensor(shape=[], dtype=Float32, 9)) """ diff --git a/mindspore/ops/primitive.py b/mindspore/ops/primitive.py index b06299f498a..4a8ef37b5a0 100644 --- a/mindspore/ops/primitive.py +++ b/mindspore/ops/primitive.py @@ -35,9 +35,9 @@ class Primitive(Primitive_): >>> # or work with prim_attr_register: >>> # init a Primitive class with attr1 and attr2 >>> class Add(Primitive): - >>> @prim_attr_register - >>> def __init__(self, attr1, attr2): - >>> # check attr1 and attr2 or do some initializations + ... @prim_attr_register + ... def __init__(self, attr1, attr2): + ... # check attr1 and attr2 or do some initializations >>> # init a Primitive obj with attr1=1 and attr2=2 >>> add = Add(attr1=1, attr2=2) """