forked from mindspore-Ecosystem/mindspore
Fix the docs for its format problems
This commit is contained in:
parent
54cd78d25c
commit
f6f445a30a
|
@ -62,7 +62,7 @@ class Jvp(Cell):
|
|||
>>> from mindspore.nn.grad import Jvp
|
||||
>>> class Net(nn.Cell):
|
||||
... def construct(self, x, y):
|
||||
... return x**3 + y
|
||||
... return x**3 + y
|
||||
>>> x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
|
||||
>>> y = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
|
||||
>>> v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
|
||||
|
@ -126,7 +126,7 @@ class Vjp(Cell):
|
|||
>>> from mindspore.nn.grad import Vjp
|
||||
>>> class Net(nn.Cell):
|
||||
... def construct(self, x, y):
|
||||
... return x**3 + y
|
||||
... return x**3 + y
|
||||
>>> x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
|
||||
>>> y = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
|
||||
>>> v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
|
||||
|
|
|
@ -175,8 +175,7 @@ class FixedLossScaleUpdateCell(Cell):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, Parameter, nn
|
||||
>>> from mindspore.ops as ops
|
||||
>>> from mindspore import Tensor, Parameter, nn, ops
|
||||
>>>
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self, in_features, out_features):
|
||||
|
@ -248,8 +247,7 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, Parameter, nn
|
||||
>>> from mindspore.ops as ops
|
||||
>>> from mindspore import Tensor, Parameter, nn, ops
|
||||
>>> from mindspore import dtype as mstype
|
||||
>>>
|
||||
>>> class Net(nn.Cell):
|
||||
|
|
|
@ -143,7 +143,7 @@ class Primitive(Primitive_):
|
|||
Args:
|
||||
stage (int): The stage id for the current operation.
|
||||
Examples:
|
||||
>>> from mindspore.ops as ops
|
||||
>>> from mindspore import ops
|
||||
>>> add = ops.Add()
|
||||
>>> print(add.set_stage(0))
|
||||
Prim[Add]<stage=0>
|
||||
|
@ -162,7 +162,7 @@ class Primitive(Primitive_):
|
|||
Args:
|
||||
strategy (tuple): Strategy describes the distributed parallel mode of the current primitive.
|
||||
Examples:
|
||||
>>> from mindspore.ops as ops
|
||||
>>> from mindspore import ops
|
||||
>>> add = ops.Add()
|
||||
>>> print(add.shard(((1, 1), (1, 1))))
|
||||
Prim[Add]<strategy=((1, 1), (1, 1))>
|
||||
|
|
Loading…
Reference in New Issue