forked from mindspore-Ecosystem/mindspore
modify format
This commit is contained in:
parent
ce02868f99
commit
9b8949b565
|
@ -1,7 +1,7 @@
|
|||
mindspore.nn.TripletMarginLoss
|
||||
===============================
|
||||
|
||||
.. py:class:: class TripletMarginLoss(p=2, swap=False, eps=1e-06, reduction='mean')
|
||||
.. py:class:: mindspore.nn.TripletMarginLoss(p=2, swap=False, eps=1e-06, reduction='mean')
|
||||
|
||||
执行三元组损失函数的操作。
|
||||
|
||||
|
|
|
@ -22,5 +22,5 @@ mindspore.ops.dropout1d
|
|||
异常:
|
||||
- **TypeError** - `x` 不是Tensor。
|
||||
- **TypeError** - `p` 的数据类型不是float。
|
||||
- **ValueError** - `p` 值不在 `[0.0,1.0]` 之间。
|
||||
- **ValueError** - `p` 值不在 `[0.0, 1.0]` 之间。
|
||||
- **ValueError** - `x` 的维度不是 `2D` 或 `3D`。
|
||||
|
|
|
@ -590,7 +590,7 @@ class Dataset:
|
|||
>>> # and drops the last incomplete batch if there is one.
|
||||
>>> dataset = dataset.batch(100, True)
|
||||
>>>
|
||||
>>> # 2)resize image according to its batch number, if it's 5-th batch, resize to (5^2, 5^2) = (25, 25)
|
||||
>>> # 2) resize image according to its batch number, if it's 5-th batch, resize to (5^2, 5^2) = (25, 25)
|
||||
>>> def np_resize(col, BatchInfo):
|
||||
... output = col.copy()
|
||||
... s = (BatchInfo.get_batch_num() + 1) ** 2
|
||||
|
@ -603,7 +603,7 @@ class Dataset:
|
|||
... return (output,)
|
||||
>>> dataset = dataset.batch(batch_size=8, input_columns=["image"], per_batch_map=np_resize)
|
||||
>>>
|
||||
>>> # 3)Create a dataset where its batch size is dynamic
|
||||
>>> # 3) Create a dataset where its batch size is dynamic
|
||||
>>> # Define a callable batch size function and let batch size increase 1 each time.
|
||||
>>> def add_one(BatchInfo):
|
||||
... return BatchInfo.get_batch_num() + 1
|
||||
|
@ -654,7 +654,7 @@ class Dataset:
|
|||
>>> # and drops the last incomplete batch if there is one.
|
||||
>>> dataset = dataset.padded_batch(100, True)
|
||||
>>>
|
||||
>>> # 3)Create a dataset where its batch size is dynamic
|
||||
>>> # 3) Create a dataset where its batch size is dynamic
|
||||
>>> # Define a callable batch size function and let batch size increase 1 each time.
|
||||
>>> def add_one(BatchInfo):
|
||||
... return BatchInfo.get_batch_num() + 1
|
||||
|
|
|
@ -471,7 +471,7 @@ def ravel(x):
|
|||
Args:
|
||||
x (Tensor): A tensor to be flattened.
|
||||
|
||||
Outputs:
|
||||
Returns:
|
||||
Tensor, a 1-D tensor, containing the same elements of the input.
|
||||
|
||||
Raises:
|
||||
|
|
|
@ -1172,7 +1172,6 @@ def dropout1d(x, p=0.5, training=True):
|
|||
|
||||
Raises:
|
||||
TypeError: If `x` is not a Tensor.
|
||||
TypeError: If dtype of `x` is not int8, int16, int32, int64, float16, float32 or float64.
|
||||
TypeError: If the data type of `p` is not float.
|
||||
ValueError: If `p` is out of the range `[0.0, 1.0]`.
|
||||
ValueError: If `x` shape is not `2D` or `3D`.
|
||||
|
|
Loading…
Reference in New Issue