forked from mindspore-Ecosystem/mindspore
!38078 updates op supported platform
Merge pull request !38078 from 李林杰/code_docs_0714_update_op_supported_platfoorms_master
This commit is contained in:
commit
56e763be39
|
@ -2028,7 +2028,7 @@ def scatter_nd_min(input_x, indices, updates, use_locking=False):
|
|||
is required when data type conversion of Parameter is not supported.
|
||||
|
||||
Supported Platforms:
|
||||
``GPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> input_x = Parameter(Tensor(np.ones(8) * 10, mindspore.float32), name="x")
|
||||
|
|
|
@ -5194,7 +5194,7 @@ class ScatterNdMin(_ScatterNdOp):
|
|||
Refer to :func:`mindspore.ops.scatter_nd_min` for more details.
|
||||
|
||||
Supported Platforms:
|
||||
``GPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> input_x = Parameter(Tensor(np.ones(8) * 10, mindspore.float32), name="x")
|
||||
|
|
|
@ -5513,7 +5513,7 @@ class Real(Primitive):
|
|||
TypeError: If the input is not a Tensor.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU`` ``GPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
|
||||
|
@ -5577,7 +5577,7 @@ class Imag(Primitive):
|
|||
TypeError: If the input is not a Tensor.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU`` ``GPU``
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
|
||||
|
|
|
@ -4926,7 +4926,7 @@ class AdamWeightDecay(PrimitiveWithInfer):
|
|||
- **v** (Tensor) - The same shape and data type as `v`.
|
||||
|
||||
Supported Platforms:
|
||||
``GPU`` ``CPU``
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
|
|
|
@ -1019,7 +1019,7 @@ class Model:
|
|||
>>> loss_scale_manager = ms.FixedLossScaleManager()
|
||||
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics=None,
|
||||
loss_scale_manager=loss_scale_manager)
|
||||
... loss_scale_manager=loss_scale_manager)
|
||||
>>> model.train(2, dataset)
|
||||
"""
|
||||
Validator.check_bool(dataset_sink_mode)
|
||||
|
@ -1565,7 +1565,7 @@ class Model:
|
|||
>>> loss_scale_manager = ms.FixedLossScaleManager()
|
||||
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics=None,
|
||||
loss_scale_manager=loss_scale_manager)
|
||||
... loss_scale_manager=loss_scale_manager)
|
||||
>>> layout_dict = model.infer_train_layout(dataset)
|
||||
"""
|
||||
self._infer_train_check(train_dataset, dataset_sink_mode, sink_size)
|
||||
|
|
Loading…
Reference in New Issue