forked from mindspore-Ecosystem/mindspore
fix doc issue2
This commit is contained in:
parent
dc1304afee
commit
2172934177
|
@ -40,8 +40,6 @@
|
|||
- 如果线下训练,建议为每次训练设置 `BATCH_JOB_ID` 环境变量。
|
||||
- 分布式训练中,假如使用多个节点(服务器),则必须使用每个节点全部的8张卡。如果只有一个节点(服务器),则没有这样的限制。
|
||||
|
||||
.. include:: mindspore.dataset.Dataset.add_sampler.rst
|
||||
|
||||
.. include:: mindspore.dataset.Dataset.rst
|
||||
|
||||
.. include:: mindspore.dataset.Dataset.b.rst
|
||||
|
@ -50,6 +48,4 @@
|
|||
|
||||
.. include:: mindspore.dataset.Dataset.d.rst
|
||||
|
||||
.. include:: mindspore.dataset.Dataset.use_sampler.rst
|
||||
|
||||
.. include:: mindspore.dataset.Dataset.zip.rst
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
**参数:**
|
||||
|
||||
- **file_name** (str) - 转换生成的MindRecord文件路径。
|
||||
- **shard_num** (int,可选) - 生成MindRecord的文件个数。默认值:1。取值范围为[1, 1000]。
|
||||
- **shard_num** (int,可选) - 生成MindRecord的文件个数。取值范围为[1, 1000]。默认值:1。
|
||||
- **overwrite** (bool,可选) - 当指定目录存在同名文件时是否覆盖写。默认值:False。
|
||||
|
||||
**异常:**
|
||||
|
@ -52,7 +52,7 @@
|
|||
**参数:**
|
||||
|
||||
- **content** (dict) - schema内容的字典。
|
||||
- **desc** (str,可选) - schema的描述字符。默认值:None。
|
||||
- **desc** (str,可选) - schema的描述。默认值:None。
|
||||
|
||||
**返回:**
|
||||
|
||||
|
@ -87,7 +87,7 @@
|
|||
|
||||
.. py:method:: open_and_set_header()
|
||||
|
||||
打开MindRecord文件准备写入并且设置header。该函数仅用于并行写入,并在 `write_raw_data` 函数之前调用。
|
||||
打开MindRecord文件准备写入并且设置描述其meta信息的头部,该函数仅用于并行写入,并在 `write_raw_data` 函数之前调用。
|
||||
|
||||
**返回:**
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
**参数:**
|
||||
|
||||
- **source** (str) - 包含t10k-images-idx3-ubyte.gz、train-images-idx3-ubyte.gz、t10k-labels-idx1-ubyte.gz和train-labels-idx1-ubyte.gz数据集文件的目录路径。
|
||||
- **source** (str) - 数据集目录路径,其包含t10k-images-idx3-ubyte.gz、train-images-idx3-ubyte.gz、t10k-labels-idx1-ubyte.gz和train-labels-idx1-ubyte.gz数据集文件。
|
||||
- **destination** (str) - 转换生成的MindRecord文件路径,需提前创建目录并且目录下不能存在同名文件。
|
||||
- **partition_number** (int,可选) - 生成MindRecord的文件个数。默认值:1。
|
||||
|
||||
|
|
|
@ -421,20 +421,8 @@ class OBSMindDataset(GeneratorDataset):
|
|||
|
||||
|
||||
def add_sampler(self, new_sampler):
|
||||
"""
|
||||
Add a sampler for current dataset, it's not supported for OBSMindDataset.
|
||||
|
||||
Args:
|
||||
new_sampler (Sampler): The sampler to be added as the parent sampler for current dataset.
|
||||
"""
|
||||
raise NotImplementedError("add_sampler is not supported for OBSMindDataset.")
|
||||
|
||||
|
||||
def use_sampler(self, new_sampler):
|
||||
"""
|
||||
Make the current dataset use the new_sampler provided by other API, it's not supported for OBSMindDataset.
|
||||
|
||||
Args:
|
||||
new_sampler (Sampler): The sampler to use for the current dataset.
|
||||
"""
|
||||
raise NotImplementedError("use_sampler is not supported for OBSMindDataset.")
|
||||
|
|
|
@ -135,7 +135,7 @@ def _convert_row(row):
|
|||
"""
|
||||
if isinstance(row, dict):
|
||||
raise TypeError("Input data is expected to be " \
|
||||
"int, float, str, bytes, Numpy array, Tensor or list-or-tuple of them, but got dict.")
|
||||
"int, float, str, bytes, numpy.ndarray, Tensor or list/tuple of them, but got dict.")
|
||||
|
||||
# convert single item to np.array
|
||||
prim_type = (int, float, str, bytes, np.ndarray, Tensor)
|
||||
|
@ -147,7 +147,7 @@ def _convert_row(row):
|
|||
else:
|
||||
item = np.array(row, copy=False)
|
||||
if item.dtype == 'object':
|
||||
raise TypeError("Data type of the input or converted Numpy array is expected to be " \
|
||||
raise TypeError("Data type of the input or its converted Numpy array is expected to be " \
|
||||
"int or float or str, but got {}.".format(item.dtype))
|
||||
return tuple([item])
|
||||
|
||||
|
@ -161,12 +161,12 @@ def _convert_row(row):
|
|||
elif isinstance(x, Tensor): # mindspore.Tensor
|
||||
value.append(x.asnumpy())
|
||||
elif isinstance(x, dict):
|
||||
raise TypeError("The {}st item of input data is expected to be " \
|
||||
"int, float, str, bytes, Numpy array, Tensor, but got dict.".format(idx))
|
||||
raise TypeError("The {}th item of input data is expected to be " \
|
||||
"int, float, str, bytes, numpy.ndarray, Tensor, but got dict.".format(idx))
|
||||
else:
|
||||
item = np.array(x, copy=False)
|
||||
if item.dtype == 'object':
|
||||
raise TypeError("Data type of {}st item of the input or converted Numpy array is expected to be " \
|
||||
raise TypeError("Data type of {}th item of the input or its converted Numpy array is expected to be " \
|
||||
"int or float or str, but got {}.".format(idx, item.dtype))
|
||||
value.append(item)
|
||||
return tuple(value)
|
||||
|
|
|
@ -47,7 +47,7 @@ class _Config:
|
|||
return self._convert_type(key)
|
||||
if key in self.config:
|
||||
return self.config[key]
|
||||
raise RuntimeError("Variable {} can not found in configuration file or environment variables.".format(key))
|
||||
raise RuntimeError("Variable {} can not be found in configuration file or environment variables.".format(key))
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if key == 'config':
|
||||
|
|
|
@ -42,8 +42,8 @@ class FileWriter:
|
|||
|
||||
Args:
|
||||
file_name (str): File name of MindRecord file.
|
||||
shard_num (int, optional): The Number of MindRecord files. Default: 1.
|
||||
It should be between [1, 1000].
|
||||
shard_num (int, optional): The Number of MindRecord files.
|
||||
It should be between [1, 1000]. Default: 1.
|
||||
overwrite (bool, optional): Whether to overwrite if the file already exists. Default: False.
|
||||
|
||||
Raises:
|
||||
|
@ -273,7 +273,7 @@ class FileWriter:
|
|||
|
||||
def open_and_set_header(self):
|
||||
"""
|
||||
Open writer and set header. The function is only used for parallel \
|
||||
Open writer and set header which stores meta information. The function is only used for parallel \
|
||||
writing and is called before the `write_raw_data`.
|
||||
|
||||
Returns:
|
||||
|
|
|
@ -641,7 +641,7 @@ def test_generator_error_1():
|
|||
data1 = ds.GeneratorDataset(generator_np, ["data"])
|
||||
for _ in data1:
|
||||
pass
|
||||
assert "Data type of 1st item of the input or converted Numpy array is expected" in str(info.value)
|
||||
assert "Data type of 1th item of the input or its converted Numpy array is expected" in str(info.value)
|
||||
|
||||
|
||||
def test_generator_error_2():
|
||||
|
@ -653,7 +653,7 @@ def test_generator_error_2():
|
|||
data1 = ds.GeneratorDataset(generator_np, ["data"])
|
||||
for _ in data1:
|
||||
pass
|
||||
assert "Data type of 1st item of the input or converted Numpy array is expected" in str(info.value)
|
||||
assert "Data type of 1th item of the input or its converted Numpy array is expected" in str(info.value)
|
||||
|
||||
|
||||
def test_generator_error_3():
|
||||
|
@ -1360,7 +1360,7 @@ def test_generator_single_input_6():
|
|||
data1 = ds.GeneratorDataset(data, ["data"], shuffle=False)
|
||||
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary
|
||||
pass
|
||||
assert " Data type of the input or converted Numpy array is expected" in str(info.value)
|
||||
assert " Data type of the input or its converted Numpy array is expected" in str(info.value)
|
||||
|
||||
assert_generator_single_input_6(generator_nested_np)
|
||||
assert_generator_single_input_6(RandomAccessDatasetInner())
|
||||
|
|
Loading…
Reference in New Issue