diff --git a/mindspore/nn/optim/adam.py b/mindspore/nn/optim/adam.py index 7fd1b0270ae..cb3425a046c 100755 --- a/mindspore/nn/optim/adam.py +++ b/mindspore/nn/optim/adam.py @@ -220,7 +220,7 @@ class Adam(Optimizer): >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> {'params': no_conv_params, 'lr': 0.01}, >>> {'order_params': net.trainable_params()}] - >>> optm = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0) + >>> optim = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0) >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. >>> # The no_conv_params's parameters will use learning rate of 0.01 and defaule weight decay of 0.0. >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. diff --git a/mindspore/nn/optim/lazyadam.py b/mindspore/nn/optim/lazyadam.py index a39160d2e9d..30d2992a8f4 100644 --- a/mindspore/nn/optim/lazyadam.py +++ b/mindspore/nn/optim/lazyadam.py @@ -168,7 +168,7 @@ class LazyAdam(Optimizer): >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, >>> {'params': no_conv_params, 'lr': 0.01}, >>> {'order_params': net.trainable_params()}] - >>> opt = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0) + >>> optim = nn.LazyAdam(group_params, learning_rate=0.1, weight_decay=0.0) >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0. >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index fff51ef309c..ca3da86128b 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -3013,12 +3013,12 @@ class DepthToSpace(PrimitiveWithInfer): This is the reverse operation of SpaceToDepth. + The depth of output tensor is :math:`input\_depth / (block\_size * block\_size)`. + The output tensor's `height` dimension is :math:`height * block\_size`. The output tensor's `weight` dimension is :math:`weight * block\_size`. - The depth of output tensor is :math:`input\_depth / (block\_size * block\_size)`. - The input tensor's depth must be divisible by `block_size * block_size`. The data format is "NCHW". @@ -3029,7 +3029,7 @@ class DepthToSpace(PrimitiveWithInfer): - **x** (Tensor) - The target tensor. It must be a 4-D tensor. Outputs: - Tensor, the same type as `x`. + Tensor, has the same shape and dtype as the 'x'. Examples: >>> x = Tensor(np.random.rand(1,12,1,1), mindspore.float32) diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index babb511385c..b76e6292a7e 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -741,6 +741,7 @@ class CumSum(PrimitiveWithInfer): Inputs: - **input** (Tensor) - The input tensor to accumulate. - **axis** (int) - The axis to accumulate the tensor's value. Only constant value is allowed. + Must be in the range [-rank(input), rank(input)). Outputs: Tensor, the shape of the output tensor is consistent with the input tensor's. @@ -1764,6 +1765,7 @@ class Div(_MathBinaryOp): >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32) >>> div = P.Div() >>> div(input_x, input_y) + [-1.3, 2.5, 2.0] """ def infer_value(self, x, y):