From 0bb05b0e82fec324bf4a2068ccf9ea208afa6df7 Mon Sep 17 00:00:00 2001 From: tacyi139 Date: Fri, 3 Dec 2021 11:13:06 +0800 Subject: [PATCH] optimize code docs --- mindspore/nn/cell.py | 2 +- mindspore/ops/composite/base.py | 2 ++ mindspore/ops/functional.py | 12 +++++++----- mindspore/ops/operations/_grad_ops.py | 6 +++--- mindspore/ops/operations/nn_ops.py | 4 ++-- mindspore/ops/operations/other_ops.py | 6 ++++-- 6 files changed, 19 insertions(+), 13 deletions(-) diff --git a/mindspore/nn/cell.py b/mindspore/nn/cell.py index f639770c3b8..2dfc0f9d98b 100755 --- a/mindspore/nn/cell.py +++ b/mindspore/nn/cell.py @@ -420,7 +420,7 @@ class Cell(Cell_): def shard(self, strategy): """ - for all primitive ops in this cell(including ops of cells that wrapped by this cell), + For all primitive ops in this cell(including ops of cells that wrapped by this cell), if parallel strategy is not specified, then instead of auto-searching, data parallel strategy will be generated for those primitive ops. diff --git a/mindspore/ops/composite/base.py b/mindspore/ops/composite/base.py index 8b843aae659..a3485d115b5 100644 --- a/mindspore/ops/composite/base.py +++ b/mindspore/ops/composite/base.py @@ -695,6 +695,8 @@ class Map(Map_): Examples: >>> from mindspore import dtype as mstype + >>> from mindspore import Tensor, ops + >>> from mindspore.ops import MultitypeFuncGraph, Map >>> tensor_list = (Tensor(1, mstype.float32), Tensor(2, mstype.float32), Tensor(3, mstype.float32)) >>> # square all the tensor in the list >>> diff --git a/mindspore/ops/functional.py b/mindspore/ops/functional.py index 164f38954e3..238ac654297 100644 --- a/mindspore/ops/functional.py +++ b/mindspore/ops/functional.py @@ -238,8 +238,9 @@ def jvp(fn, inputs, v): Returns: Tuple, tuple of output and jvp. - - netout(Tensors or Tuple of Tensors), the output of "fn(inputs)". - - jvp(Tensors or Tuple of Tensors), the result of the dot product. + + - **netout** (Tensors or Tuple of Tensors) - The output of "fn(inputs)". + - **jvp** (Tensors or Tuple of Tensors) - The result of the dot product. Raises: TypeError: If the input is not a tensor or tuple or list of tensors. @@ -287,9 +288,10 @@ def vjp(fn, inputs, v): v (Tensor or tuple or list): The shape and type of v should be the same as outputs. Returns: - Tuple, tuple of output and jvp. - - netout(Tensors or Tuple of Tensors), the output of "fn(inputs)". - - vjp(Tensors or Tuple of Tensors), the result of the dot product. + Tuple, tuple of output and vjp. + + - **netout** (Tensors or Tuple of Tensors) - The output of "fn(inputs)". + - **vjp** (Tensors or Tuple of Tensors) - The result of the dot product. Raises: TypeError: If the input is not a tensor or tuple or list of tensors. diff --git a/mindspore/ops/operations/_grad_ops.py b/mindspore/ops/operations/_grad_ops.py index 9e9a1ace578..7a788776f0a 100644 --- a/mindspore/ops/operations/_grad_ops.py +++ b/mindspore/ops/operations/_grad_ops.py @@ -426,7 +426,7 @@ class Conv2DBackpropFilter(Primitive): top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly. pad_list (tuple): The pad list like (top, bottom, left, right). Default: (0, 0, 0, 0). - mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution , + mode (int): Modes for different convolutions. 0 Math convolution, 1 cross-correlation convolution , 2 deconvolution, 3 depthwise convolution. Default: 1. stride (tuple): The stride to be applied to the convolution filter. Default: (1, 1). dilation (tuple): Specifies the dilation rate to be used for the dilated convolution. Default: (1, 1, 1, 1). @@ -485,7 +485,7 @@ class DepthwiseConv2dNativeBackpropFilter(PrimitiveWithInfer): Args: channel_multiplier (int): The multiplier for the original output conv. kernel_size (int or tuple): The size of the conv kernel. - mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution, + mode (int): Modes for different convolutions. 0 Math convolution, 1 cross-correlation convolution, 2 deconvolution,3 depthwise convolution. Default: 3. pad_mode (str): The mode to fill padding which can be: "valid", "same" or "pad". Default: "valid". pad (Union(int, tuple[int])): The pad value to be filled. Default: 0. If `pad` is an integer, the paddings of @@ -552,7 +552,7 @@ class DepthwiseConv2dNativeBackpropInput(PrimitiveWithInfer): Args: channel_multiplier (int): The multiplier for the original output conv. kernel_size (int or tuple): The size of the conv kernel. - mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution , + mode (int): Modes for different convolutions. 0 Math convolution, 1 cross-correlation convolution , 2 deconvolution,3 depthwise convolution. Default: 3. pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid". pad (Union(int, tuple[int])): The pad value to be filled. Default: 0. If `pad` is an integer, the paddings of diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index a6c69c29ded..95d1e2481c0 100755 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -1317,7 +1317,7 @@ class Conv2D(Primitive): and width of the 2D convolution window. Single int means the value is for both the height and the width of the kernel. A tuple of 2 ints means the first value is for the height and the other is for the width of the kernel. - mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution , + mode (int): Modes for different convolutions. 0 Math convolution, 1 cross-correlation convolution , 2 deconvolution, 3 depthwise convolution. Default: 1. pad_mode (str): Specifies padding mode. The optional values are "same", "valid", "pad". Default: "valid". @@ -2147,7 +2147,7 @@ class Conv2DTranspose(Conv2DBackpropInput): top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly. pad_list (Union[str, None]): The pad list like (top, bottom, left, right). Default: None. - mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution , + mode (int): Modes for different convolutions. 0 Math convolution, 1 cross-correlation convolution , 2 deconvolution, 3 depthwise convolution. Default: 1. stride (Union[int. tuple[int]]): The stride to be applied to the convolution filter. Default: 1. dilation (Union[int. tuple[int]]): Specifies the dilation rate to be used for the dilated convolution. diff --git a/mindspore/ops/operations/other_ops.py b/mindspore/ops/operations/other_ops.py index b956ccef65c..8b9f97e65f8 100644 --- a/mindspore/ops/operations/other_ops.py +++ b/mindspore/ops/operations/other_ops.py @@ -415,10 +415,12 @@ class Partial(Primitive): >>> partial_show_input = partial(show_input, Tensor(1)) >>> output1 = partial_show_input(Tensor(2), Tensor(3)) >>> print(output1) - (1, 2, 3) + (Tensor(shape=[], dtype=Int64, value= 1), Tensor(shape=[], dtype=Int64, value= 2), Tensor(shape=[], dtype=Int64, + value= 3)) >>> output2 = partial_show_input(Tensor(3), Tensor(4)) >>> print(output2) - (1, 3, 4) + (Tensor(shape=[], dtype=Int64, value= 1), Tensor(shape=[], dtype=Int64, value= 3), Tensor(shape=[], dtype=Int64, + value= 4)) """ # Side effect will propagated from the first argument to return value.