From d497ad8d849cc85243f4a43bb5cce7df83d5b275 Mon Sep 17 00:00:00 2001 From: luojianing Date: Thu, 12 Jan 2023 14:52:56 +0800 Subject: [PATCH] fix docs issues --- docs/api/api_python/nn/mindspore.nn.GELU.rst | 4 ++-- docs/api/api_python/ops/mindspore.ops.FFTWithSize.rst | 6 +++--- docs/api/api_python/ops/mindspore.ops.func_coo_sqrt.rst | 2 +- docs/api/api_python/ops/mindspore.ops.func_csr_sqrt.rst | 2 +- docs/api/api_python/ops/mindspore.ops.func_div.rst | 2 +- docs/api/api_python/ops/mindspore.ops.func_gelu.rst | 2 +- mindspore/python/mindspore/nn/layer/activation.py | 4 ++-- mindspore/python/mindspore/numpy/math_ops.py | 2 +- mindspore/python/mindspore/ops/function/nn_func.py | 2 +- mindspore/python/mindspore/ops/operations/math_ops.py | 2 +- 10 files changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/api/api_python/nn/mindspore.nn.GELU.rst b/docs/api/api_python/nn/mindspore.nn.GELU.rst index 08a77f0c60f..1f882cd95ee 100644 --- a/docs/api/api_python/nn/mindspore.nn.GELU.rst +++ b/docs/api/api_python/nn/mindspore.nn.GELU.rst @@ -20,9 +20,9 @@ mindspore.nn.GELU 参数: - **approximate** (bool) - 是否启用approximation,默认值:True。如果approximate的值为True,则高斯误差线性激活函数为: - :math:`0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` , + :math:`0.5 * x * (1 + tanh(\sqrt(2 / \pi) * (x + 0.044715 * x^3)))` , - 否则为: :math:`x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`,其中P(X) ~ N(0, 1) 。 + 否则为: :math:`x * P(X <= x) = 0.5 * x * (1 + erf(x / \sqrt(2)))`,其中P(X) ~ N(0, 1) 。 输入: - **x** (Tensor) - 用于计算GELU的Tensor。数据类型为float16或float32。shape是 :math:`(N,*)` , :math:`*` 表示任意的附加维度数。 diff --git a/docs/api/api_python/ops/mindspore.ops.FFTWithSize.rst b/docs/api/api_python/ops/mindspore.ops.FFTWithSize.rst index 81ee763e591..a72b914c3f3 100644 --- a/docs/api/api_python/ops/mindspore.ops.FFTWithSize.rst +++ b/docs/api/api_python/ops/mindspore.ops.FFTWithSize.rst @@ -42,9 +42,9 @@ mindspore.ops.FFTWithSize - **norm** (str,可选) - 表示该操作的规范化方式,可选值:["backward", "forward", "ortho"]。默认值:"backward"。 - - "backward",正向变换不缩放,逆变换按 :math:`1/sqrt(n)` 缩放,其中 `n` 表示输入 `x` 的元素数量。。 - - "ortho",正向变换与逆变换均按 :math:`1/sqrt(n)` 缩放。 - - "forward",正向变换按 :math:`1/sqrt(n)` 缩放,逆变换不缩放。 + - "backward",正向变换不缩放,逆变换按 :math:`1/n` 缩放,其中 `n` 表示输入 `x` 的元素数量。。 + - "ortho",正向变换与逆变换均按 :math:`1/\sqrt(n)` 缩放。 + - "forward",正向变换按 :math:`1/n` 缩放,逆变换不缩放。 - **onesided** (bool,可选) - 控制输入是否减半以避免冗余。默认值:True。 - **signal_sizes** (list,可选) - 原始信号的大小(RFFT变换之前的信号,不包含batch这一维),只有在IRFFT模式下和设置 `onesided=True` 时需要该参数。默认值: :math:`[]` 。 diff --git a/docs/api/api_python/ops/mindspore.ops.func_coo_sqrt.rst b/docs/api/api_python/ops/mindspore.ops.func_coo_sqrt.rst index 752e9236561..65b76a0d4e4 100755 --- a/docs/api/api_python/ops/mindspore.ops.func_coo_sqrt.rst +++ b/docs/api/api_python/ops/mindspore.ops.func_coo_sqrt.rst @@ -6,7 +6,7 @@ mindspore.ops.coo_sqrt 逐元素返回当前COOTensor的平方根。 .. math:: - out_{i} = \\sqrt{x_{i}} + out_{i} = \sqrt{x_{i}} 参数: - **x** (COOTensor) - 输入COOTensor,数据类型为number.Number,其rank需要在[0, 7]范围内. diff --git a/docs/api/api_python/ops/mindspore.ops.func_csr_sqrt.rst b/docs/api/api_python/ops/mindspore.ops.func_csr_sqrt.rst index c304ea3832c..ad16858a35e 100755 --- a/docs/api/api_python/ops/mindspore.ops.func_csr_sqrt.rst +++ b/docs/api/api_python/ops/mindspore.ops.func_csr_sqrt.rst @@ -6,7 +6,7 @@ mindspore.ops.csr_sqrt 逐元素返回当前CSRTensor的平方根。 .. math:: - out_{i} = \\sqrt{x_{i}} + out_{i} = \sqrt{x_{i}} 参数: - **x** (CSRTensor) - 输入CSRTensor,数据类型为number.Number,其rank需要在[0, 7]范围内. diff --git a/docs/api/api_python/ops/mindspore.ops.func_div.rst b/docs/api/api_python/ops/mindspore.ops.func_div.rst index 4cd4a8b39ea..3fee8ff9b31 100644 --- a/docs/api/api_python/ops/mindspore.ops.func_div.rst +++ b/docs/api/api_python/ops/mindspore.ops.func_div.rst @@ -20,7 +20,7 @@ mindspore.ops.div - **None**: 默认行为。相当于Python中的 `true division` 或NumPy中的 `true_divide` 。 - **"floor"**: 将除法的结果向下舍入。相当于Python中的 `floor division` 或NumPy中的 `floor_divide` 。 - - **"trunc"**: 将除法的结果舍入到零。相当于C型整数除法。 + - **"trunc"**: 将除法的结果舍入到零。相当于C语言风格的整数除法。 返回: Tensor,输出的shape与广播后的shape相同,数据类型取两个输入中精度较高或数字较高的。 diff --git a/docs/api/api_python/ops/mindspore.ops.func_gelu.rst b/docs/api/api_python/ops/mindspore.ops.func_gelu.rst index e40c77a3828..521d621f5d4 100644 --- a/docs/api/api_python/ops/mindspore.ops.func_gelu.rst +++ b/docs/api/api_python/ops/mindspore.ops.func_gelu.rst @@ -18,7 +18,7 @@ mindspore.ops.gelu 当 `approximate` 为 `tanh` ,GELU的定义如下: .. math:: - GELU(x_i) = 0.5 * x_i * (1 + tanh[\sqrt{\\frac{2}{pi}}(x + 0.044715 * x_{i}^{3})] ) + GELU(x_i) = 0.5 * x_i * (1 + tanh(\sqrt(2 / \pi) * (x_i + 0.044715 * x_i^3))) GELU相关图参见 `GELU `_ 。 diff --git a/mindspore/python/mindspore/nn/layer/activation.py b/mindspore/python/mindspore/nn/layer/activation.py index 6081e02cca4..290b412cdee 100644 --- a/mindspore/python/mindspore/nn/layer/activation.py +++ b/mindspore/python/mindspore/nn/layer/activation.py @@ -857,11 +857,11 @@ class GELU(Cell): If approximate is True, The gaussian error linear activation is: - :math:`0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` + :math:`0.5 * x * (1 + tanh(\sqrt(2 / \pi) * (x + 0.044715 * x^3)))` else, it is: - :math:`x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`, where P(X) ~ N(0, 1). + :math:`x * P(X <= x) = 0.5 * x * (1 + erf(x / \sqrt(2)))`, where P(X) ~ N(0, 1). Inputs: - **x** (Tensor) - The input of GELU with data type of float16 or float32. diff --git a/mindspore/python/mindspore/numpy/math_ops.py b/mindspore/python/mindspore/numpy/math_ops.py index 83738a8999e..d695cd21ec0 100644 --- a/mindspore/python/mindspore/numpy/math_ops.py +++ b/mindspore/python/mindspore/numpy/math_ops.py @@ -1915,7 +1915,7 @@ def diff(a, n=1, axis=-1, prepend=None, append=None): prepend/append (Tensor, optional): Values to prepend or append to a along `axis` prior to performing the difference. Scalar values are expanded to arrays with length 1 in the direction of `axis` and the shape of the input - array in along all other axes. Otherwise the dimension and shape must + array in along all other axis. Otherwise the dimension and shape must match `a` except along axis. Default: `None`. Returns: diff --git a/mindspore/python/mindspore/ops/function/nn_func.py b/mindspore/python/mindspore/ops/function/nn_func.py index b4881748a5a..ad4a6a53fad 100644 --- a/mindspore/python/mindspore/ops/function/nn_func.py +++ b/mindspore/python/mindspore/ops/function/nn_func.py @@ -4671,7 +4671,7 @@ def gelu(input_x, approximate='none'): When `approximate` argument is `tanh`, GeLU is estimated with: .. math:: - GELU(x_i) = 0.5 * x_i * (1 + tanh[\sqrt{\\frac{2}{pi}}(x + 0.044715 * x_{i}^{3})] ) + GELU(x_i) = 0.5 * x_i * (1 + tanh(\sqrt(2 / \pi) * (x_i + 0.044715 * x_i^3))) Args: input_x (Tensor): The input of the activation function GeLU, the data type is float16, float32 or float64. diff --git a/mindspore/python/mindspore/ops/operations/math_ops.py b/mindspore/python/mindspore/ops/operations/math_ops.py index 1a5a580f25b..50ca578660a 100644 --- a/mindspore/python/mindspore/ops/operations/math_ops.py +++ b/mindspore/python/mindspore/ops/operations/math_ops.py @@ -7105,7 +7105,7 @@ class FFTWithSize(Primitive): - "backward" has the direct (forward) transforms unscaled and the inverse (backward) transforms scaled by 1/n, where n is the input x's element numbers. - - "ortho" has both direct and inverse transforms are scaled by 1/sqrt(n). + - "ortho" has both direct and inverse transforms are scaled by 1/\sqrt(n). - "forward" has the direct transforms scaled by 1/n and the inverse transforms unscaled. onesided (bool, optional): Controls whether the input is halved to avoid redundancy. Default: True.