diff --git a/mindspore/nn/graph_kernels/graph_kernels.py b/mindspore/nn/graph_kernels/graph_kernels.py index 9cb06d7e498..b022c18d46d 100644 --- a/mindspore/nn/graph_kernels/graph_kernels.py +++ b/mindspore/nn/graph_kernels/graph_kernels.py @@ -383,7 +383,7 @@ class SoftmaxCrossEntropyWithLogits(GraphKernel): Sets input logits as `X`, input label as `Y`, output as `loss`. Then, .. math:: - p_{ij} = softmax(X_{ij}) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)} + p_{ij} = softmax(X_{ij}) = \frac{\exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)} .. math:: loss_{ij} = -\sum_j{Y_{ij} * ln(p_{ij})} @@ -666,7 +666,7 @@ class LogSoftmax(GraphKernel): the Log Softmax function is shown as follows: .. math:: - \text{output}(x_i) = \log \left(\frac{exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right), + \text{output}(x_i) = \log \left(\frac{\exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right), where :math:`N` is the length of the Tensor. @@ -674,7 +674,7 @@ class LogSoftmax(GraphKernel): axis (int): The axis to do the Log softmax operation. Default: -1. Inputs: - logits (Tensor): The input of Log Softmax. + - **logits** (Tensor) - The input of Log Softmax. Outputs: Tensor, with the same type and shape as the logits. diff --git a/mindspore/nn/layer/embedding.py b/mindspore/nn/layer/embedding.py index 8c31a9a5aca..6b0556eb17f 100755 --- a/mindspore/nn/layer/embedding.py +++ b/mindspore/nn/layer/embedding.py @@ -127,7 +127,7 @@ def _make_axis_range(start, end): class EmbeddingLookup(Cell): r""" - Returns a slice of input tensor based on the specified indices. + Returns a slice of the input tensor based on the specified indices. Note: When 'target' is set to 'CPU', this module will use diff --git a/mindspore/nn/probability/bijector/exp.py b/mindspore/nn/probability/bijector/exp.py index 62b782911bc..8e9a7a17e0e 100644 --- a/mindspore/nn/probability/bijector/exp.py +++ b/mindspore/nn/probability/bijector/exp.py @@ -22,7 +22,7 @@ class Exp(PowerTransform): This Bijector performs the operation: .. math:: - Y = exp(x). + Y = \exp(x). Args: name (str): The name of the Bijector. Default: 'Exp'. diff --git a/mindspore/ops/__init__.py b/mindspore/ops/__init__.py index 1f27b0eca3e..d66a44239a4 100644 --- a/mindspore/ops/__init__.py +++ b/mindspore/ops/__init__.py @@ -24,7 +24,7 @@ Examples: >>> import mindspore.ops as ops Note: - - The Primitive operators in operations need to be used after instantiation. + - The Primitive operators in operations need to be instantiated before being used. - The composite operators are the pre-defined combination of operators. - The functional operators are the pre-instantiated Primitive operators, which can be used directly as a function. - For functional operators usage, please refer to diff --git a/mindspore/ops/composite/base.py b/mindspore/ops/composite/base.py index 8ce301dc3ed..e5610b66ce6 100644 --- a/mindspore/ops/composite/base.py +++ b/mindspore/ops/composite/base.py @@ -352,7 +352,7 @@ class GradOperation(GradOperation_): class MultitypeFuncGraph(MultitypeFuncGraph_): """ - Generate overloaded functions. + Generates overloaded functions. MultitypeFuncGraph is a class used to generate overloaded functions, considering different types as inputs. Initialize an `MultitypeFuncGraph` object with name, and use `register` with input types as the decorator diff --git a/mindspore/ops/composite/math_ops.py b/mindspore/ops/composite/math_ops.py index 0ff299e1f2c..edf1819f79e 100644 --- a/mindspore/ops/composite/math_ops.py +++ b/mindspore/ops/composite/math_ops.py @@ -171,11 +171,11 @@ def TensorDot(x1, x2, axes): axes = 2 is the same as axes = ((0,1),(1,2)) where length of input shape is 3 for both `a` and `b` Inputs: - - **x1** (Tensor): First tensor in TensorDot op with datatype float16 or float32 - - **x2** (Tensor): Second tensor in TensorDot op with datatype float16 or float32 - - **axes** (Union[int, tuple(int), tuple(tuple(int)), list(list(int))]): Single value or - tuple/list of length 2 with dimensions specified for `a` and `b` each. If single value `N` passed, - automatically picks up first N dims from `a` input shape and last N dims from `b` input shape. + - **x1** (Tensor) - First tensor in TensorDot op with datatype float16 or float32 + - **x2** (Tensor) - Second tensor in TensorDot op with datatype float16 or float32 + - **axes** (Union[int, tuple(int), tuple(tuple(int)), list(list(int))]) - Single value or + tuple/list of length 2 with dimensions specified for `a` and `b` each. If single value `N` passed, + automatically picks up first N dims from `a` input shape and last N dims from `b` input shape. Outputs: Tensor, the shape of the output tensor is :math:`(N + M)`. Where :math:`N` and :math:`M` are the free axes not diff --git a/mindspore/ops/op_info_register.py b/mindspore/ops/op_info_register.py index 13ee23deeb8..003d883cf27 100644 --- a/mindspore/ops/op_info_register.py +++ b/mindspore/ops/op_info_register.py @@ -342,7 +342,7 @@ class AiCPURegOp(RegOp): class TBERegOp(RegOp): - """Class for TBE op info register.""" + """Class for TBE operator information register.""" def __init__(self, op_name): super(TBERegOp, self).__init__(op_name) diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 9a992d4222b..a7c6b4ef506 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -187,7 +187,7 @@ class ExpandDims(PrimitiveWithInfer): class DType(PrimitiveWithInfer): """ - Returns the data type of input tensor as mindspore.dtype. + Returns the data type of the input tensor as mindspore.dtype. Inputs: - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -219,7 +219,7 @@ class DType(PrimitiveWithInfer): class SameTypeShape(PrimitiveWithInfer): """ - Checks whether data type and shape of two tensors are the same. + Checks whether the data type and shape of two tensors are the same. Raises: TypeError: If the data types of two tensors are not the same. @@ -344,7 +344,7 @@ class Cast(PrimitiveWithInfer): class IsSubClass(PrimitiveWithInfer): """ - Checks whether one type is subtraction class of another type. + Checks whether this type is a sub-class of another type. Inputs: - **sub_type** (mindspore.dtype) - The type to be checked. Only constant value is allowed. @@ -427,7 +427,7 @@ class IsInstance(PrimitiveWithInfer): class Reshape(PrimitiveWithInfer): """ - Reshapes input tensor with the same values based on a given shape tuple. + Reshapes the input tensor with the same values based on a given shape tuple. Raises: ValueError: Given a shape tuple, if it has several -1; or if the product @@ -525,7 +525,7 @@ class Reshape(PrimitiveWithInfer): class Shape(PrimitiveWithInfer): """ - Returns the shape of input tensor. + Returns the shape of the input tensor. Inputs: - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -559,7 +559,7 @@ class Shape(PrimitiveWithInfer): class DynamicShape(Primitive): """ - Returns the shape of input tensor. + Returns the shape of the input tensor. Inputs: - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -651,7 +651,7 @@ class Squeeze(PrimitiveWithInfer): class Transpose(PrimitiveWithCheck): """ - Permutes the dimensions of input tensor according to input permutation. + Permutes the dimensions of the input tensor according to input permutation. Inputs: - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -723,7 +723,7 @@ class Unique(Primitive): class GatherV2(PrimitiveWithCheck): """ - Returns a slice of input tensor based on the specified indices and axis. + Returns a slice of the input tensor based on the specified indices and axis. Inputs: - **input_params** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -802,7 +802,7 @@ class SparseGatherV2(GatherV2): class Padding(PrimitiveWithInfer): """ - Extends the last dimension of input tensor from 1 to pad_dim_size, by filling with 0. + Extends the last dimension of the input tensor from 1 to pad_dim_size, by filling with 0. Args: pad_dim_size (int): The value of the last dimension of x to be extended, which must be positive. @@ -848,7 +848,7 @@ class Padding(PrimitiveWithInfer): class UniqueWithPad(PrimitiveWithInfer): """ - Return unique elements and relative indexes in 1-D tensor, fill with pad num. + Returns unique elements and relative indexes in 1-D tensor, filled with padding num. Inputs: - **x** (Tensor) - The tensor need to be unique. Must be 1-D vector with types: int32, int64. @@ -889,7 +889,7 @@ class UniqueWithPad(PrimitiveWithInfer): class Split(PrimitiveWithInfer): """ - Splits input tensor into output_num of tensors along the given axis and output numbers. + Splits the input tensor into output_num of tensors along the given axis and output numbers. Args: axis (int): Index of the split position. Default: 0. @@ -1032,7 +1032,7 @@ class TruncatedNormal(PrimitiveWithInfer): class Size(PrimitiveWithInfer): r""" - Returns the elements count size of a tensor. + Returns the size of a tensor. Returns an int scalar representing the elements size of input, the total number of elements in the tensor. @@ -1363,7 +1363,7 @@ class ScalarToArray(PrimitiveWithInfer): class ScalarToTensor(PrimitiveWithInfer): """ - Converts a scalar to a `Tensor`, and convert data type to specified type. + Converts a scalar to a `Tensor`, and converts the data type to the specified type. Inputs: - **input_x** (Union[int, float]) - The input is a scalar. Only constant value is allowed. @@ -1471,7 +1471,7 @@ class InvertPermutation(PrimitiveWithInfer): class Argmax(PrimitiveWithInfer): """ - Returns the indices of the max value of a tensor across the axis. + Returns the indices of the maximum value of a tensor across the axis. If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor will be :math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`. @@ -1523,7 +1523,7 @@ class Argmax(PrimitiveWithInfer): class Argmin(PrimitiveWithInfer): """ - Returns the indices of the min value of a tensor across the axis. + Returns the indices of the minimum value of a tensor across the axis. If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor is :math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`. @@ -1630,7 +1630,7 @@ class ArgMaxWithValue(PrimitiveWithInfer): class ArgMinWithValue(PrimitiveWithInfer): """ - Calculates the minimum value with corresponding index, return indices and values. + Calculates the minimum value with corresponding index, and returns indices and values. Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and indices. @@ -1770,7 +1770,7 @@ class Tile(PrimitiveWithInfer): class UnsortedSegmentSum(PrimitiveWithInfer): r""" - Computes the sum along segments of a tensor. + Computes the sum of a tensor along segments. Calculates a tensor such that :math:`\text{output}[i] = \sum_{segment\_ids[j] == i} \text{data}[j, \ldots]`, where :math:`j` is a tuple describing the index of element in data. `segment_ids` selects which elements in data to sum @@ -1853,7 +1853,7 @@ class UnsortedSegmentSum(PrimitiveWithInfer): class UnsortedSegmentMin(PrimitiveWithInfer): """ - Computes the minimum along segments of a tensor. + Computes the minimum of a tensor along segments. Inputs: - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`. @@ -1971,7 +1971,7 @@ class UnsortedSegmentMax(PrimitiveWithInfer): class UnsortedSegmentProd(PrimitiveWithInfer): """ - Computes the product along segments of a tensor. + Computes the product of a tensor along segments. Inputs: - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`. @@ -2029,9 +2029,9 @@ class UnsortedSegmentProd(PrimitiveWithInfer): class Concat(PrimitiveWithInfer): r""" - Concats tensor in specified axis. + Connect tensor in the specified axis. - Concats input tensors along with the given axis. + Connect input tensors along with the given axis. Note: The input data is a tuple of tensors. These tensors have the same rank `R`. Set the given axis as `m`, and @@ -2392,7 +2392,7 @@ class ReverseV2(PrimitiveWithInfer): class Rint(PrimitiveWithInfer): """ - Returns element-wise integer closest to x. + Returns an integer that is closest to x element-wise. Inputs: - **input_x** (Tensor) - The target tensor, which must be one of the following types: @@ -2932,7 +2932,7 @@ class ScatterNd(PrimitiveWithInfer): class ResizeNearestNeighbor(PrimitiveWithInfer): r""" - Resizes the input tensor by using nearest neighbor algorithm. + Resizes the input tensor by using the nearest neighbor algorithm. Resizes the input tensor to a given size by using the nearest neighbor algorithm. The nearest neighbor algorithm selects the value of the nearest point and does not consider the @@ -3022,7 +3022,7 @@ class GatherNd(PrimitiveWithInfer): class TensorScatterUpdate(PrimitiveWithInfer): """ - Updates tensor value using given values, along with the input indices. + Updates tensor values using given values, along with the input indices. Inputs: - **input_x** (Tensor) - The target tensor. The dimension of input_x must be equal to indices.shape[-1]. @@ -3068,7 +3068,7 @@ class TensorScatterUpdate(PrimitiveWithInfer): class ScatterUpdate(_ScatterOp_Dynamic): """ - Updates tensor value by using input indices and value. + Updates tensor values by using input indices and value. Using given values to update tensor value, along with the input indices. @@ -3115,7 +3115,7 @@ class ScatterUpdate(_ScatterOp_Dynamic): class ScatterNdUpdate(_ScatterNdOp): """ - Updates tensor value by using input indices and value. + Updates tensor values by using input indices and value. Using given values to update tensor value, along with the input indices. @@ -3165,7 +3165,7 @@ class ScatterNdUpdate(_ScatterNdOp): class ScatterMax(_ScatterOp): """ - Updates the value of the input tensor through the max operation. + Updates the value of the input tensor through the maximum operation. Using given values to update tensor value through the max operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. @@ -3210,7 +3210,7 @@ class ScatterMax(_ScatterOp): class ScatterMin(_ScatterOp): """ - Updates the value of the input tensor through the min operation. + Updates the value of the input tensor through the minimum operation. Using given values to update tensor value through the min operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. @@ -3249,7 +3249,7 @@ class ScatterMin(_ScatterOp): class ScatterAdd(_ScatterOp_Dynamic): """ - Updates the value of the input tensor through the add operation. + Updates the value of the input tensor through the addition operation. Using given values to update tensor value through the add operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. @@ -3333,7 +3333,7 @@ class ScatterSub(_ScatterOp): class ScatterMul(_ScatterOp): """ - Updates the value of the input tensor through the mul operation. + Updates the value of the input tensor through the multiply operation. Using given values to update tensor value through the mul operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. @@ -3372,7 +3372,7 @@ class ScatterMul(_ScatterOp): class ScatterDiv(_ScatterOp): """ - Updates the value of the input tensor through the div operation. + Updates the value of the input tensor through the divide operation. Using given values to update tensor value through the div operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. @@ -3411,7 +3411,7 @@ class ScatterDiv(_ScatterOp): class ScatterNdAdd(_ScatterNdOp): """ - Applies sparse addition to individual values or slices in a Tensor. + Applies sparse addition to individual values or slices in a tensor. Using given values to update tensor value through the add operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. @@ -3449,7 +3449,7 @@ class ScatterNdAdd(_ScatterNdOp): class ScatterNdSub(_ScatterNdOp): """ - Applies sparse subtraction to individual values or slices in a Tensor. + Applies sparse subtraction to individual values or slices in a tensor. Using given values to update tensor value through the subtraction operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. @@ -3487,7 +3487,7 @@ class ScatterNdSub(_ScatterNdOp): class ScatterNonAliasingAdd(_ScatterNdOp): """ - Applies sparse addition to input using individual values or slices. + Applies sparse addition to the input using individual values or slices. Using given values to update tensor value through the add operation, along with the input indices. This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value. @@ -3652,7 +3652,7 @@ class DepthToSpace(PrimitiveWithInfer): class SpaceToBatch(PrimitiveWithInfer): r""" - Divides spatial dimensions into blocks and combine the block size with the original batch. + Divides spatial dimensions into blocks and combines the block size with the original batch. This operation will divide spatial dimensions (H, W) into blocks with `block_size`, the output tensor's H and W dimension is the corresponding number of blocks after division. The output tensor's batch dimension is the @@ -3810,7 +3810,7 @@ class BatchToSpace(PrimitiveWithInfer): class SpaceToBatchND(PrimitiveWithInfer): r""" - Divides spatial dimensions into blocks and combine the block size with the original batch. + Divides spatial dimensions into blocks and combines the block size with the original batch. This operation will divide spatial dimensions (H, W) into blocks with block_shape, the output tensor's H and W dimension is the corresponding number of blocks after division. The output tensor's batch dimension is the @@ -3910,7 +3910,7 @@ class SpaceToBatchND(PrimitiveWithInfer): class BatchToSpaceND(PrimitiveWithInfer): r""" - Divides batch dimension with blocks and interleave these blocks back into spatial dimensions. + Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions. This operation will divide batch dimension N into blocks with block_shape, the output tensor's N dimension is the corresponding number of blocks after division. The output tensor's H, W dimension is product of original H, W diff --git a/mindspore/ops/operations/comm_ops.py b/mindspore/ops/operations/comm_ops.py index 55e59b1612a..ed8e9856014 100644 --- a/mindspore/ops/operations/comm_ops.py +++ b/mindspore/ops/operations/comm_ops.py @@ -25,7 +25,7 @@ from ..primitive import PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_regist class ReduceOp: """ - Operation options for reduce tensors. + Operation options for reducing tensors. There are four kinds of operation options, "SUM", "MAX", "MIN", and "PROD". diff --git a/mindspore/ops/operations/control_ops.py b/mindspore/ops/operations/control_ops.py index fae57f2a782..77cd418ffcd 100644 --- a/mindspore/ops/operations/control_ops.py +++ b/mindspore/ops/operations/control_ops.py @@ -23,7 +23,7 @@ from ..primitive import Primitive, PrimitiveWithInfer, prim_attr_register class ControlDepend(Primitive): """ - Adds control dependency relation between source and destination operation. + Adds control dependency relation between source and destination operations. In many cases, we need to control the execution order of operations. ControlDepend is designed for this. ControlDepend will instruct the execution engine to run the operations in a specific order. ControlDepend diff --git a/mindspore/ops/operations/debug_ops.py b/mindspore/ops/operations/debug_ops.py index 2abd75ba94f..30ce75358ee 100644 --- a/mindspore/ops/operations/debug_ops.py +++ b/mindspore/ops/operations/debug_ops.py @@ -84,7 +84,7 @@ class ScalarSummary(PrimitiveWithInfer): class ImageSummary(PrimitiveWithInfer): """ - Outputs image tensor to protocol buffer through image summary operator. + Outputs the image tensor to protocol buffer through image summary operator. Inputs: - **name** (str) - The name of the input variable, it must not be an empty string. @@ -167,7 +167,7 @@ class TensorSummary(PrimitiveWithInfer): class HistogramSummary(PrimitiveWithInfer): """ - Outputs tensor to protocol buffer through histogram summary operator. + Outputs the tensor to protocol buffer through histogram summary operator. Inputs: - **name** (str) - The name of the input variable. @@ -209,7 +209,7 @@ class HistogramSummary(PrimitiveWithInfer): class InsertGradientOf(PrimitiveWithInfer): """ - Attaches callback to graph node that will be invoked on the node's gradient. + Attaches callback to the graph node that will be invoked on the node's gradient. Args: f (Function): MindSpore's Function. Callback function. @@ -325,7 +325,7 @@ class HookBackward(PrimitiveWithInfer): class Print(PrimitiveWithInfer): """ - Outputs tensor or string to stdout. + Outputs the tensor or string to stdout. Note: In pynative mode, please use python print function. @@ -368,7 +368,7 @@ class Print(PrimitiveWithInfer): class Assert(PrimitiveWithInfer): """ - Asserts that the given condition is true. + Asserts that the given condition is True. If input condition evaluates to false, print the list of tensor in data. Args: diff --git a/mindspore/ops/operations/inner_ops.py b/mindspore/ops/operations/inner_ops.py index 0ff90741a35..b0552a984f5 100644 --- a/mindspore/ops/operations/inner_ops.py +++ b/mindspore/ops/operations/inner_ops.py @@ -23,7 +23,7 @@ from ..primitive import prim_attr_register, PrimitiveWithInfer class ScalarCast(PrimitiveWithInfer): """ - Cast the input scalar to another type. + Casts the input scalar to another type. Inputs: - **input_x** (scalar) - The input scalar. Only constant value is allowed. diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 3d9f390b4ad..87f898af2f8 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -330,7 +330,7 @@ class _Reduce(PrimitiveWithInfer): class ReduceMean(_Reduce): """ - Reduce a dimension of a tensor by averaging all elements in the dimension. + Reduces a dimension of a tensor by averaging all elements in the dimension. The dtype of the tensor to be reduced is number. @@ -368,7 +368,7 @@ class ReduceMean(_Reduce): class ReduceSum(_Reduce): """ - Reduce a dimension of a tensor by summing all elements in the dimension. + Reduces a dimension of a tensor by summing all elements in the dimension. The dtype of the tensor to be reduced is number. @@ -411,7 +411,7 @@ class ReduceSum(_Reduce): class ReduceAll(_Reduce): """ - Reduce a dimension of a tensor by the "logical and" of all elements in the dimension. + Reduces a dimension of a tensor by the "logicalAND" of all elements in the dimension. The dtype of the tensor to be reduced is bool. @@ -453,7 +453,7 @@ class ReduceAll(_Reduce): class ReduceAny(_Reduce): """ - Reduce a dimension of a tensor by the "logical OR" of all elements in the dimension. + Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension. The dtype of the tensor to be reduced is bool. @@ -495,7 +495,7 @@ class ReduceAny(_Reduce): class ReduceMax(_Reduce): """ - Reduce a dimension of a tensor by the maximum value in this dimension. + Reduces a dimension of a tensor by the maximum value in this dimension. The dtype of the tensor to be reduced is number. @@ -543,7 +543,7 @@ class ReduceMax(_Reduce): class ReduceMin(_Reduce): """ - Reduce a dimension of a tensor by the minimum value in the dimension. + Reduces a dimension of a tensor by the minimum value in the dimension. The dtype of the tensor to be reduced is number. @@ -582,7 +582,7 @@ class ReduceMin(_Reduce): class ReduceProd(_Reduce): """ - Reduce a dimension of a tensor by multiplying all elements in the dimension. + Reduces a dimension of a tensor by multiplying all elements in the dimension. The dtype of the tensor to be reduced is number. @@ -621,7 +621,7 @@ class ReduceProd(_Reduce): class CumProd(PrimitiveWithInfer): """ - Compute the cumulative product of the tensor x along axis. + Computes the cumulative product of the tensor x along axis. Args: exclusive (bool): If true, perform exclusive cumulative product. Default: False. @@ -1893,7 +1893,7 @@ class Maximum(_MathBinaryOp): class RealDiv(_MathBinaryOp): """ - Divide the first input tensor by the second input tensor in floating-point type element-wise. + Divides the first input tensor by the second input tensor in floating-point type element-wise. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent. The inputs must be two tensors or one tensor and one scalar. @@ -1979,7 +1979,7 @@ class Div(_MathBinaryOp): class DivNoNan(_MathBinaryOp): """ - Computes a safe divide which returns 0 if the y is zero. + Computes a safe divide and returns 0 if the y is zero. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent. The inputs must be two tensors or one tensor and one scalar. @@ -2028,7 +2028,7 @@ class DivNoNan(_MathBinaryOp): class FloorDiv(_MathBinaryOp): """ - Divide the first input tensor by the second input tensor element-wise and round down to the closest integer. + Divides the first input tensor by the second input tensor element-wise and round down to the closest integer. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent. The inputs must be two tensors or one tensor and one scalar. @@ -2062,7 +2062,7 @@ class FloorDiv(_MathBinaryOp): class TruncateDiv(_MathBinaryOp): """ - Divide the first input tensor by the second input tensor element-wise for integer types, negative numbers will + Divides the first input tensor by the second input tensor element-wise for integer types, negative numbers will round fractional quantities towards zero. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent. @@ -2097,7 +2097,7 @@ class TruncateDiv(_MathBinaryOp): class TruncateMod(_MathBinaryOp): """ - Returns element-wise remainder of division. + Returns the remainder of division element-wise. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent. The inputs must be two tensors or one tensor and one scalar. @@ -2173,7 +2173,7 @@ class Mod(_MathBinaryOp): class Floor(PrimitiveWithInfer): """ - Round a tensor down to the closest integer element-wise. + Rounds a tensor down to the closest integer element-wise. Inputs: - **input_x** (Tensor) - The input tensor. Its element data type must be float. @@ -2206,7 +2206,7 @@ class Floor(PrimitiveWithInfer): class FloorMod(_MathBinaryOp): """ - Compute the remainder of division element-wise. + Computes the remainder of division element-wise. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent. The inputs must be two tensors or one tensor and one scalar. @@ -2240,7 +2240,7 @@ class FloorMod(_MathBinaryOp): class Ceil(PrimitiveWithInfer): """ - Round a tensor up to the closest integer element-wise. + Rounds a tensor up to the closest integer element-wise. Inputs: - **input_x** (Tensor) - The input tensor. It's element data type must be float16 or float32. @@ -2273,7 +2273,7 @@ class Ceil(PrimitiveWithInfer): class Xdivy(_MathBinaryOp): """ - Divide the first input tensor by the second input tensor element-wise. Returns zero when `x` is zero. + Divides the first input tensor by the second input tensor element-wise. Returns zero when `x` is zero. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent. The inputs must be two tensors or one tensor and one scalar. @@ -2310,7 +2310,7 @@ class Xdivy(_MathBinaryOp): class Xlogy(_MathBinaryOp): """ - Computes first input tensor multiplied by the logarithm of second input tensor element-wise. + Computes the first input tensor multiplied by the logarithm of second input tensor element-wise. Returns zero when `x` is zero. Inputs of `input_x` and `input_y` comply with the implicit type conversion rules to make the data types consistent. @@ -2349,7 +2349,7 @@ class Xlogy(_MathBinaryOp): class Acosh(PrimitiveWithInfer): """ - Compute inverse hyperbolic cosine of the input element-wise. + Computes inverse hyperbolic cosine of the input element-wise. Inputs: - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -2413,7 +2413,7 @@ class Cosh(PrimitiveWithInfer): class Asinh(PrimitiveWithInfer): """ - Compute inverse hyperbolic sine of the input element-wise. + Computes inverse hyperbolic sine of the input element-wise. Inputs: - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -2446,7 +2446,7 @@ class Asinh(PrimitiveWithInfer): class Sinh(PrimitiveWithInfer): """ - Computes hyperbolic sine of input element-wise. + Computes hyperbolic sine of the input element-wise. Inputs: - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -2542,7 +2542,7 @@ class Equal(_LogicBinaryOp): class ApproximateEqual(_LogicBinaryOp): """ - Returns true if abs(x1-x2) is smaller than tolerance element-wise, otherwise false. + Returns True if abs(x1-x2) is smaller than tolerance element-wise, otherwise False. Inputs of `x1` and `x2` comply with the implicit type conversion rules to make the data types consistent. If they have different data types, lower priority data type will be converted to @@ -2938,7 +2938,7 @@ class LogicalOr(_LogicBinaryOp): class IsNan(PrimitiveWithInfer): """ - Judge which elements are nan for each position. + Determines which elements are NaN for each position. Inputs: - **input_x** (Tensor) - The input tensor. @@ -2969,7 +2969,7 @@ class IsNan(PrimitiveWithInfer): class IsInf(PrimitiveWithInfer): """ - Judging which elements are inf or -inf for each position + Determines which elements are inf or -inf for each position Inputs: - **input_x** (Tensor) - The input tensor. @@ -3000,7 +3000,7 @@ class IsInf(PrimitiveWithInfer): class IsFinite(PrimitiveWithInfer): """ - Judge which elements are finite for each position. + Deternubes which elements are finite for each position. Inputs: - **input_x** (Tensor) - The input tensor. @@ -3034,7 +3034,7 @@ class IsFinite(PrimitiveWithInfer): class FloatStatus(PrimitiveWithInfer): """ - Determine if the elements contain Not a Number(NaN), infinite or negative infinite. 0 for normal, 1 for overflow. + Determines if the elements contain Not a Number(NaN), infinite or negative infinite. 0 for normal, 1 for overflow. Inputs: - **input_x** (Tensor) - The input tensor. The data type must be float16 or float32. @@ -3103,7 +3103,7 @@ class NPUAllocFloatStatus(PrimitiveWithInfer): class NPUGetFloatStatus(PrimitiveWithInfer): """ - Updates the flag which is the output tensor of `NPUAllocFloatStatus` with latest overflow status. + Updates the flag which is the output tensor of `NPUAllocFloatStatus` with the latest overflow status. The flag is a tensor whose shape is `(8,)` and data type is `mindspore.dtype.float32`. If the sum of the flag equals to 0, there is no overflow happened. If the sum of the flag is bigger than 0, there @@ -3146,7 +3146,7 @@ class NPUGetFloatStatus(PrimitiveWithInfer): class NPUClearFloatStatus(PrimitiveWithInfer): """ - Clear the flag which stores the overflow status. + Clears the flag which stores the overflow status. Note: The flag is in the register on the `Ascend` device. It will be reset and can not be reused again after the @@ -3226,7 +3226,7 @@ class Cos(PrimitiveWithInfer): class ACos(PrimitiveWithInfer): """ - Computes arccosine of input element-wise. + Computes arccosine of input tensors element-wise. Inputs: - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -3257,7 +3257,7 @@ class ACos(PrimitiveWithInfer): class Sin(PrimitiveWithInfer): """ - Computes sine of input element-wise. + Computes sine of the input element-wise. Inputs: - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -3290,7 +3290,7 @@ class Sin(PrimitiveWithInfer): class Asin(PrimitiveWithInfer): """ - Computes arcsine of input element-wise. + Computes arcsine of input tensors element-wise. Inputs: - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. @@ -3323,7 +3323,7 @@ class Asin(PrimitiveWithInfer): class NMSWithMask(PrimitiveWithInfer): """ - Select some bounding boxes in descending order of score. + Selects some bounding boxes in descending order of score. Args: iou_threshold (float): Specifies the threshold of overlap boxes with respect to @@ -3426,7 +3426,7 @@ class Abs(PrimitiveWithInfer): class Sign(PrimitiveWithInfer): r""" - Perform :math:`sign` on tensor element-wise. + Performs sign on the tensor element-wise. Note: .. math:: @@ -3633,7 +3633,7 @@ class Atan2(_MathBinaryOp): class SquareSumAll(PrimitiveWithInfer): """ - Returns square sum all of a tensor element-wise + Returns the square sum of a tensor element-wise Inputs: - **input_x1** (Tensor) - The input tensor. The data type must be float16 or float32. @@ -3902,7 +3902,7 @@ class Invert(PrimitiveWithInfer): class Eps(PrimitiveWithInfer): """ - Creates a tensor filled with `input_x` dtype minimum val. + Creates a tensor filled with `input_x` dtype minimum value. Inputs: - **input_x** (Tensor) - Input tensor. The data type must be float16 or float32. diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index df44dbdb570..82a409196d2 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -174,7 +174,7 @@ class LogSoftmax(PrimitiveWithInfer): the Log Softmax function is shown as follows: .. math:: - \text{output}(x_i) = \log \left(\frac{exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right), + \text{output}(x_i) = \log \left(\frac{\exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right), where :math:`N` is the length of the Tensor. @@ -293,7 +293,7 @@ class Softsign(PrimitiveWithInfer): class ReLU(PrimitiveWithInfer): r""" - Computes ReLU(Rectified Linear Unit) of input tensor element-wise. + Computes ReLU (Rectified Linear Unit) of input tensors element-wise. It returns :math:`\max(x,\ 0)` element-wise. @@ -330,7 +330,7 @@ class ReLU(PrimitiveWithInfer): class ReLU6(PrimitiveWithInfer): r""" - Computes ReLU(Rectified Linear Unit) upper bounded by 6 of input tensor element-wise. + Computes ReLU (Rectified Linear Unit) upper bounded by 6 of input tensors element-wise. It returns :math:`\min(\max(0,x), 6)` element-wise. @@ -367,7 +367,7 @@ class ReLU6(PrimitiveWithInfer): class ReLUV2(PrimitiveWithInfer): r""" - Computes ReLU(Rectified Linear Unit) of input tensor element-wise. + Computes ReLU (Rectified Linear Unit) of input tensors element-wise. It returns :math:`\max(x,\ 0)` element-wise. @@ -435,7 +435,18 @@ class ReLUV2(PrimitiveWithInfer): class Elu(PrimitiveWithInfer): r""" - Computes exponential linear: `alpha * (exp(x) - 1)` if x < 0, `x` otherwise. + Computes exponential linear: + + if x < 0: + + .. math:: + \text{x} = \alpha * (\exp(\text{x}) - 1) + + if x >= 0: + + .. math:: + \text{x} = \text{x} + The data type of input tensor must be float. Args: @@ -523,7 +534,7 @@ class Sigmoid(PrimitiveWithInfer): Computes Sigmoid of input element-wise. The Sigmoid function is defined as: .. math:: - \text{sigmoid}(x_i) = \frac{1}{1 + exp(-x_i)}, + \text{sigmoid}(x_i) = \frac{1}{1 + \exp(-x_i)}, where :math:`x_i` is the element of the input. @@ -640,7 +651,7 @@ class Tanh(PrimitiveWithInfer): class FusedBatchNorm(Primitive): r""" - FusedBatchNorm is a BatchNorm that moving mean and moving variance will be computed instead of being loaded. + FusedBatchNorm is a BatchNorm. Moving mean and moving variance will be computed instead of being loaded. Batch Normalization is widely used in convolutional networks. This operation applies Batch Normalization over input to avoid internal covariate shift as described in the @@ -848,7 +859,7 @@ class FusedBatchNormEx(PrimitiveWithInfer): class BNTrainingReduce(PrimitiveWithInfer): """ - For BatchNorm operator, this operator update the moving averages for training and is used in conjunction with + For the BatchNorm operation this operator update the moving averages for training and is used in conjunction with BNTrainingUpdate. Inputs: @@ -885,7 +896,7 @@ class BNTrainingReduce(PrimitiveWithInfer): class BNTrainingUpdate(PrimitiveWithInfer): """ - For BatchNorm operator, this operator update the moving averages for training and is used in conjunction with + For the BatchNorm operation, this operator update the moving averages for training and is used in conjunction with BNTrainingReduce. Args: @@ -1508,7 +1519,7 @@ class MaxPool(_Pool): class MaxPoolWithArgmax(_Pool): r""" - Perform max pooling on the input Tensor and return both max values and indices. + Performs max pooling on the input Tensor and returns both max values and indices. Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size @@ -1915,7 +1926,7 @@ class SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer): Sets input logits as `X`, input label as `Y`, output as `loss`. Then, .. math:: - p_{ij} = softmax(X_{ij}) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)} + p_{ij} = softmax(X_{ij}) = \frac{\exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)} .. math:: loss_{ij} = -\sum_j{Y_{ij} * ln(p_{ij})} @@ -1966,7 +1977,7 @@ class SparseSoftmaxCrossEntropyWithLogits(PrimitiveWithInfer): Sets input logits as `X`, input label as `Y`, output as `loss`. Then, .. math:: - p_{ij} = softmax(X_{ij}) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)} + p_{ij} = softmax(X_{ij}) = \frac{\exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)} .. math:: loss_{ij} = \begin{cases} -ln(p_{ij}), &j = y_i \cr -ln(1 - p_{ij}), & j \neq y_i \end{cases} @@ -2283,7 +2294,7 @@ class RNNTLoss(PrimitiveWithInfer): class SGD(PrimitiveWithCheck): """ - Computes stochastic gradient descent (optionally with momentum). + Computes the stochastic gradient descent. Momentum is optional. Nesterov momentum is based on the formula from On the importance of initialization and momentum in deep learning. @@ -2775,7 +2786,7 @@ class DropoutDoMask(PrimitiveWithInfer): class ResizeBilinear(PrimitiveWithInfer): r""" - Resizes the image to certain size using bilinear interpolation. + Resizes an image to a certain size using the bilinear interpolation. The resizing only affects the lower two dimensions which represent the height and width. The input images can be represented by different data types, but the data types of output images are always float32. @@ -3067,7 +3078,7 @@ class PReLU(PrimitiveWithInfer): class LSTM(PrimitiveWithInfer): """ - Performs the long short term memory(LSTM) on the input. + Performs the Long Short-Term Memory (LSTM) on the input. For detailed information, please refer to `nn.LSTM`. @@ -3227,7 +3238,7 @@ class SigmoidCrossEntropyWithLogits(PrimitiveWithInfer): class Pad(PrimitiveWithInfer): """ - Pads input tensor according to the paddings. + Pads the input tensor according to the paddings. Args: paddings (tuple): The shape of parameter `paddings` is (N, 2). N is the rank of input data. All elements of @@ -3367,7 +3378,7 @@ class MirrorPad(PrimitiveWithInfer): class ROIAlign(PrimitiveWithInfer): """ - Computes Region of Interest (RoI) Align operator. + Computes the Region of Interest (RoI) Align operator. The operator computes the value of each sampling point by bilinear interpolation from the nearby grid points on the feature map. No quantization is performed on any coordinates involved in the RoI, its bins, or the sampling @@ -3435,7 +3446,7 @@ class ROIAlign(PrimitiveWithInfer): class Adam(PrimitiveWithInfer): r""" - Updates gradients by Adaptive Moment Estimation (Adam) algorithm. + Updates gradients by the Adaptive Moment Estimation (Adam) algorithm. The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization `_. @@ -3643,7 +3654,7 @@ class AdamNoUpdateParam(PrimitiveWithInfer): class FusedSparseAdam(PrimitiveWithInfer): r""" - Merges the duplicate value of the gradient and then updates parameters by Adaptive Moment Estimation (Adam) + Merges the duplicate value of the gradient and then updates parameters by the Adaptive Moment Estimation (Adam) algorithm. This operator is used when the gradient is sparse. The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization `_. @@ -3780,7 +3791,7 @@ class FusedSparseAdam(PrimitiveWithInfer): class FusedSparseLazyAdam(PrimitiveWithInfer): r""" - Merges the duplicate value of the gradient and then updates parameters by Adaptive Moment Estimation (Adam) + Merges the duplicate value of the gradient and then updates parameters by the Adaptive Moment Estimation (LazyAdam) algorithm. This operator is used when the gradient is sparse. The behavior is not equivalent to the original Adam algorithm, as only the current indices parameters will be updated. @@ -4815,7 +4826,7 @@ class SparseApplyAdagrad(PrimitiveWithInfer): class SparseApplyAdagradV2(PrimitiveWithInfer): r""" - Updates relevant entries according to the adagrad scheme. + Updates relevant entries according to the adagrad scheme, one more epsilon attribute than SparseApplyAdagrad. .. math:: accum += grad * grad @@ -5357,7 +5368,7 @@ class ApplyPowerSign(PrimitiveWithInfer): class ApplyGradientDescent(PrimitiveWithInfer): r""" - Updates relevant entries according to the following formula. + Updates relevant entries according to the following. .. math:: var = var - \alpha * \delta @@ -5521,7 +5532,7 @@ class ApplyProximalGradientDescent(PrimitiveWithInfer): class LARSUpdate(PrimitiveWithInfer): """ - Conducts lars (layer-wise adaptive rate scaling) update on the sum of squares of gradient. + Conducts LARS (layer-wise adaptive rate scaling) update on the sum of squares of gradient. Args: epsilon (float): Term added to the denominator to improve numerical stability. Default: 1e-05. @@ -5800,7 +5811,8 @@ class SparseApplyFtrl(PrimitiveWithCheck): class SparseApplyFtrlV2(PrimitiveWithInfer): """ - Updates relevant entries according to the FTRL-proximal scheme. + Updates relevant entries according to the FTRL-proximal scheme. This class has one more attribute, named + l2_shrinkage, than class SparseApplyFtrl. All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent. If they have different data types, lower priority data type will be converted to @@ -6362,7 +6374,7 @@ class DynamicRNN(PrimitiveWithInfer): class InTopK(PrimitiveWithInfer): r""" - Whether the targets are in the top `k` predictions. + Determines whether the targets are in the top `k` predictions. Args: k (int): Specifies the number of top elements to be used for computing precision. diff --git a/mindspore/ops/primitive.py b/mindspore/ops/primitive.py index a30600f53b5..b06299f498a 100644 --- a/mindspore/ops/primitive.py +++ b/mindspore/ops/primitive.py @@ -287,7 +287,7 @@ class PrimitiveWithCheck(Primitive): class PrimitiveWithInfer(Primitive): """ - PrimitiveWithInfer is the base class of primitives in python defines functions for tracking inference in python. + PrimitiveWithInfer is the base class of primitives in python and defines functions for tracking inference in python. There are four method can be overide to define the infer logic of the primitive: __infer__(), infer_shape(), infer_dtype(), and infer_value(). If __infer__() is defined in primitive, the __infer__() has highest priority @@ -464,8 +464,8 @@ def prim_attr_register(fn): def constexpr(fn=None, get_instance=True, name=None): """ - Make a PrimitiveWithInfer operator that can infer the value at compile time. We can use it to define a function to - compute constant value using the constants in the constructor. + Creates a PrimitiveWithInfer operator that can infer the value at compile time. We can use it to define a function + to compute constant value using the constants in the constructor. Args: fn (function): A `fn` use as the infer_value of the output operator. diff --git a/mindspore/ops/vm_impl_registry.py b/mindspore/ops/vm_impl_registry.py index 3265d539b21..b8b00be1616 100644 --- a/mindspore/ops/vm_impl_registry.py +++ b/mindspore/ops/vm_impl_registry.py @@ -37,7 +37,7 @@ Examples: def get_vm_impl_fn(prim): """ - Get the virtual implementation function by a primitive object or primitive name. + Gets the virtual implementation function by a primitive object or primitive name. Args: prim (Union[Primitive, str]): primitive object or name for operator register.