update BroadcastTo doc
This commit is contained in:
parent
52ddf7b4a5
commit
fd0f955be4
|
@ -3,9 +3,25 @@ mindspore.ops.broadcast_to
|
|||
|
||||
.. py:function:: mindspore.ops.broadcast_to(x, shape)
|
||||
|
||||
将输入shape广播到目标shape。如果目标shape中有-1的维度,它将被该维度中的输入shape的值替换。
|
||||
将输入shape广播到目标shape。输入shape维度必须小于等于目标shape维度,设输入shape为 :math: `(x1, x2, ..., xm)`,目标shape为 :math:`(*, y_1, y_2, ..., y_m)`,其中 :math:`*` 为任意额外的维度。广播规则如下:
|
||||
|
||||
当输入shape广播到目标shape时,它从最后一个维度开始。如果目标shape中有-1维度,则-1维度不能位于一个不存在的维度中。
|
||||
依次比较 `x_m` 与 `y_m` 、 `x_{m-1}` 与 `y_{m-1}` 、...、 `x_1` 与 `y_1` 的值确定是否可以广播以及广播后输出shape对应维的值。
|
||||
|
||||
- 如果相等,则这个值即为目标shape该维的值。比如说输入shape为 :math:`(2, 3)` ,目标shape为 :math:`(2, 3)` ,则输出shape为 :math:`(2, 3)`。
|
||||
|
||||
- 如果不相等,分以下三种情况:
|
||||
|
||||
- 情况一:如果目标shape该维的值为-1, 则输出shape该维的值为对应输入shape该维的值。比如说输入shape为 :math:`(3, 3)` ,目标shape为 :math:`(-1, 3)` ,则输出shape为 :math:`(3, 3)` ;
|
||||
|
||||
- 情况二:如果目标shape该维的值不为-1,但是输入shape该维的值为1,则输出shape该维的值为目标shape该维的值。比如说输入shape为 :math:` (1, 3)` ,目标shape为 :math:`(8, 3)` ,则输出shape为 :math:`(8, 3)` ;
|
||||
|
||||
- 情况三:如果两个shape对应值不满足以上情况则说明不支持由输入shape广播到目标shape。
|
||||
|
||||
至此输出shape后面m维就确定好了,现在看一下前面 :math:`*` 维,有以下两种情况:
|
||||
|
||||
- 如果额外的 :math:`*` 维中不含有-1,则输入shape从低维度补充维度使之与目标shape维度一致,比如说目标shape为 :math:` (3, 1, 4, 1, 5, 9)` ,输入shape为 :math:`(1, 5, 9)` ,则输入shape增维变成 :math:`(1, 1, 1, 1, 5, 9)`,根据上面提到的情况二可以得出输出shape为 :math:` (3, 1, 4, 1, 5, 9)`;
|
||||
|
||||
- 如果额外的 :math:`*` 维中含有-1,说明此时该-1对应一个不存在的维度,不支持广播。比如说目标shape为 :math:` (3, -1, 4, 1, 5, 9)` ,输入shape为 :math:`(1, 5, 9)` ,此时不进行增维处理,而是直接报错。
|
||||
|
||||
参数:
|
||||
- **x** (Tensor) - 第一个输入,任意维度的Tensor,数据类型为float16、float32、int32、int8、uint8、bool。
|
||||
|
|
|
@ -1008,7 +1008,7 @@ class Tensor(Tensor_):
|
|||
Tensor, has the same type as the `x`.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Tensor
|
||||
|
@ -1035,7 +1035,7 @@ class Tensor(Tensor_):
|
|||
Tensor, has the same type as the `x`.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Tensor
|
||||
|
@ -1062,7 +1062,7 @@ class Tensor(Tensor_):
|
|||
Tensor, has the same type as the `x`.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Tensor
|
||||
|
@ -1160,7 +1160,7 @@ class Tensor(Tensor_):
|
|||
ValueError: If length of shape of self tensor is less than the last dimension of shape of `indices`.
|
||||
|
||||
Supported Platforms:
|
||||
``GPU`` ``CPU``
|
||||
``GPU`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
|
@ -1709,7 +1709,7 @@ class Tensor(Tensor_):
|
|||
ValueError: If `weight` could not be broadcast to tensor with shapes of `end` when it is a tensor.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> start = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
|
||||
|
@ -1824,7 +1824,7 @@ class Tensor(Tensor_):
|
|||
but data type conversion of Parameter is not supported.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.common import dtype as mstype
|
||||
|
@ -3195,7 +3195,7 @@ class Tensor(Tensor_):
|
|||
ValueError: the Tensor shape is different from that of v.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
|
|
|
@ -141,7 +141,7 @@ class Adadelta(Optimizer):
|
|||
ValueError: If `learning_rate`, `epsilon` or `weight_decay` is less than 0.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
|
|
|
@ -155,7 +155,7 @@ class AdaMax(Optimizer):
|
|||
ValueError: If `weight_decay` is less than 0.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore as ms
|
||||
|
|
|
@ -158,7 +158,7 @@ class ProximalAdagrad(Optimizer):
|
|||
ValueError: If `accum`, `l1`, `l2` or `weight_decay` is less than 0.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend``
|
||||
``Ascend`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore as ms
|
||||
|
|
|
@ -3436,13 +3436,39 @@ def affine_grid(theta, output_size, align_corners=False):
|
|||
|
||||
def broadcast_to(x, shape):
|
||||
"""
|
||||
Broadcasts input tensor to a given shape.
|
||||
Input shape can be broadcast to target shape if for each dimension pair they are either equal or input is one or
|
||||
the target dimension is -1. In case of -1 in target shape, it will be replaced by the input shape's value
|
||||
in that dimension.
|
||||
When input shape is broadcast to target shape, it starts with the trailing
|
||||
dimensions. If there is a -1 in the target shape, the -1 cannot be in a leading,
|
||||
non-existing dimension.
|
||||
Broadcasts input tensor to a given shape. The dim of input shape must be smaller
|
||||
than or equal to that of target shape, suppose input shape :math:`(x1, x2, ..., xm)`,
|
||||
target shape :math:`(*, y_1, y_2, ..., y_m)`. The broadcast rules are as follows:
|
||||
|
||||
Compare the value of `x_m` and `y_m`, `x_{m-1}` and `y_{m-1}`, ..., `x_1` and `y_1` consecutively and
|
||||
decide whether these shapes are broadcastable and what the broadcast result is.
|
||||
|
||||
If the value pairs at a specific dim are equal, then that value goes right into that dim of output shape.
|
||||
With an input shape :math:`(2, 3)`, target shape :math:`(2, 3)` , the inferred outpyt shape is :math:`(2, 3)`.
|
||||
|
||||
If the value pairs are unequal, there are three cases:
|
||||
|
||||
Case 1: Value of target shape is -1, then the value of the output shape is that of the input shape's.
|
||||
With an input shape :math:`(3, 3)`, target shape :math:`(-1, 3)`, the output shape is :math:`(3, 3)`.
|
||||
|
||||
Case 2: Value of target shape is not -1 but the value ot the input shape is 1, then the value of the output shape
|
||||
is that of the target shape's. With an input shape :math:`(1, 3)`, target
|
||||
shape :math:`(8, 3)`, the output shape is :math:`(8, 3)`.
|
||||
|
||||
Case 3: All other cases mean that the two shapes are not broadcastable.
|
||||
|
||||
So far we got the last m dims of the outshape, now focus on the first :math:`*` dims, there are
|
||||
two cases:
|
||||
|
||||
If the first :math:`*` dims of output shape does not have -1 in it, then fill the input
|
||||
shape with ones until their length are the same, and then refer to
|
||||
Case 2 mentioned above to calculate the output shape. With target shape :math:` (3, 1, 4, 1, 5, 9)`,
|
||||
input shape :math:`(1, 5, 9)`, the filled input shape will be :math:`(1, 1, 1, 1, 5, 9)` and thus the
|
||||
output shape is :math:` (3, 1, 4, 1, 5, 9)`.
|
||||
|
||||
If the first :math:`*` dims of output shape have -1 in it, it implies this -1 is conrresponding to
|
||||
a non-existing dim so they're not broadcastable. With target shape :math:` (3, -1, 4, 1, 5, 9)`,
|
||||
input shape :math:`(1, 5, 9)`, instead of operating the dim-filling process first, it raises errors directly.
|
||||
|
||||
Args:
|
||||
x (Tensor): The input tensor.
|
||||
|
|
|
@ -5656,7 +5656,7 @@ class ApplyAdaMax(Primitive):
|
|||
RuntimeError: If the data type of `var`, `m`, `v` and `grad` conversion of Parameter is not supported.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> class Net(nn.Cell):
|
||||
|
|
Loading…
Reference in New Issue