!39030 [MS][DOC]fix bug of doc
Merge pull request !39030 from mengyuanli/code_docs_bug_fix
This commit is contained in:
commit
12c19dd0c0
|
@ -2529,9 +2529,10 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
||||||
output[i][j][indices[i][j][k]] = updates[i][j][k] # if axis == 2, reduction == "none"
|
output[i][j][indices[i][j][k]] = updates[i][j][k] # if axis == 2, reduction == "none"
|
||||||
|
|
||||||
.. warning::
|
.. warning::
|
||||||
The order in which updates are applied is nondeterministic, meaning that if there
|
- The order in which updates are applied is nondeterministic, meaning that if there are multiple index vectors
|
||||||
are multiple index vectors in `indices` that correspond to the same position, the
|
in `indices` that correspond to the same position, the value of that position in the output will be
|
||||||
value of that position in the output will be nondeterministic.
|
nondeterministic.
|
||||||
|
- On Ascend, the reduction only support set to "none" for now.
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
If some values of the `indices` are out of bound, instead of raising an index error,
|
If some values of the `indices` are out of bound, instead of raising an index error,
|
||||||
|
@ -2562,12 +2563,13 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
||||||
``Ascend`` ``GPU`` ``CPU``
|
``Ascend`` ``GPU`` ``CPU``
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
>>> from mindspore.ops import functional as F
|
||||||
>>> input_x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32), name="input_x")
|
>>> input_x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32), name="input_x")
|
||||||
>>> indices = Tensor(np.array([[1, 0, 2], [0, 2, 1]]), mindspore.int32)
|
>>> indices = Tensor(np.array([[1, 0, 2], [0, 2, 1]]), mindspore.int32)
|
||||||
>>> updates = Tensor(np.array([[1, 1, 1], [1, 1, 1]]), mindspore.float32)
|
>>> updates = Tensor(np.array([[1, 1, 1], [1, 1, 1]]), mindspore.float32)
|
||||||
>>> axis = 0
|
>>> axis = 0
|
||||||
>>> reduction = "add"
|
>>> reduction = "add"
|
||||||
>>> output = tensor_scatter_elements(input_x, indices, updates, axis, reduction)
|
>>> output = F.tensor_scatter_elements(input_x, indices, updates, axis, reduction)
|
||||||
>>> print(output)
|
>>> print(output)
|
||||||
[[ 2.0 3.0 3.0]
|
[[ 2.0 3.0 3.0]
|
||||||
[ 5.0 5.0 7.0]
|
[ 5.0 5.0 7.0]
|
||||||
|
@ -2577,7 +2579,7 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
||||||
>>> updates = Tensor(np.array([[8, 8]]), mindspore.int32)
|
>>> updates = Tensor(np.array([[8, 8]]), mindspore.int32)
|
||||||
>>> axis = 1
|
>>> axis = 1
|
||||||
>>> reduction = "none"
|
>>> reduction = "none"
|
||||||
>>> output = tensor_scatter_elements(input_x, indices, updates, axis, reduction)
|
>>> output = F.tensor_scatter_elements(input_x, indices, updates, axis, reduction)
|
||||||
>>> print(output)
|
>>> print(output)
|
||||||
[[ 1 2 8 4 8]]
|
[[ 1 2 8 4 8]]
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -1465,10 +1465,10 @@ def smooth_l1_loss(logits, labels, beta=1.0, reduction='none'):
|
||||||
``Ascend`` ``GPU`` ``CPU``
|
``Ascend`` ``GPU`` ``CPU``
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> loss = ops.SmoothL1Loss()
|
>>> from mindspore.ops import functional as F
|
||||||
>>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
>>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
||||||
>>> labels = Tensor(np.array([1, 2, 2]), mindspore.float32)
|
>>> labels = Tensor(np.array([1, 2, 2]), mindspore.float32)
|
||||||
>>> output = loss(logits, labels)
|
>>> output = F.smooth_l1_loss(logits, labels)
|
||||||
>>> print(output)
|
>>> print(output)
|
||||||
[0. 0. 0.5]
|
[0. 0. 0.5]
|
||||||
"""
|
"""
|
||||||
|
|
Loading…
Reference in New Issue