!44050 add pynative mode check for context mempool_block_size

Merge pull request !44050 from laiyongqiang/revert_mem_blk
This commit is contained in:
i-robot 2022-10-18 03:05:23 +00:00 committed by Gitee
commit 9877a867ef
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
4 changed files with 9 additions and 2 deletions

View File

@ -79,7 +79,7 @@ mindspore.set_context
- **device_target** (str) - 表示待运行的目标设备,支持'Ascend'、'GPU'和'CPU'。如果未设置此参数则使用MindSpore包对应的后端设备。
- **max_device_memory** (str) - 设置设备可用的最大内存。格式为"xxGB"。默认值1024GB。实际使用的内存大小是设备的可用内存和 `max_device_memory` 值中的最小值。
- **variable_memory_max_size** (str) - 此参数已弃用,将被删除。请使用 `max_device_memory`
- **mempool_block_size** (str) - 设置设备内存池的块大小。格式为"xxGB"。默认值1GB。最小值是1GB。实际使用的内存池块大小是设备的可用内存和 `mempool_block_size` 值中的最小值。
- **mempool_block_size** (str) - 设置PyNative模式下设备内存池的块大小。格式为"xxGB"。默认值1GB。最小值是1GB。实际使用的内存池块大小是设备的可用内存和 `mempool_block_size` 值中的最小值。
- **op_timeout** (int) - 设置一个算子的最大执行时间以秒为单位。如果执行时间超过这个值系统将终止该任务。默认值28。
- **save_graphs** (bool) - 表示是否保存计算图。默认值False。当 `save_graphs` 属性设为True时 `save_graphs_path` 属性用于设置中间编译图的存储路径。默认情况下,计算图保存在当前目录下。
- **save_graphs_path** (str) - 表示保存计算图的路径。默认值:"."。如果指定的目录不存在,系统将自动创建该目录。在分布式训练中,图形将被保存到 `save_graphs_path/rank_${rank_id}/` 目录下。 `rank_id` 为集群中当前设备的ID。

View File

@ -465,6 +465,7 @@ constexpr auto kAttrKeepDims = "keep_dims";
constexpr auto kAttrShapeGamma = "shape_gamma";
constexpr auto kAttrPerm = "perm";
constexpr auto kAttrTransposeFirst = "transpose_first";
constexpr auto kAttrTbeFusionType = "tbe_fusion_type";
constexpr auto kAttrAtomicAddMemSize = "automic_add_mem_size";
constexpr auto kAttrAtomicOutputIndexs = "atomic_output_clean_indexs";
constexpr auto kAttrNeedAtomic = "need_atomic";

View File

@ -565,10 +565,12 @@ void TbeKernelCompileManager::UpdateFusionTypeAndOutputDataDesc(const std::vecto
auto kernel_name = pre_build_full_name_to_json_name_[full_name];
auto pre_res = prebuild_res_map_[kernel_name];
auto fusion_type = pre_res.fusion_type;
auto fusion_name = GetFusionNameByType(fusion_type);
auto output_data_desc = pre_res.output_data_desc;
auto core_type = pre_res.core_type;
AnfAlgo::SetCoreType(node, core_type);
AnfAlgo::SetFusionType(node, fusion_type);
common::AnfAlgo::SetNodeAttr(kAttrTbeFusionType, MakeValue(fusion_name), node);
AnfAlgo::SetOutputDataDesc(node, {output_data_desc});
}
MS_LOG(INFO) << "End update fusion type after pre build";

View File

@ -308,6 +308,10 @@ class _Context:
def set_mempool_block_size(self, mempool_block_size):
"""Set the block size of memory pool."""
if _get_mode() == GRAPH_MODE:
logger.warning("Graph mode doesn't support to set parameter 'mempool_block_size' of context currently, "
"you can use context.set_context to set pynative mode.")
return
if not Validator.check_str_by_regular(mempool_block_size, _re_pattern):
raise ValueError("For 'context.set_context', the argument 'mempool_block_size' should be in "
"correct format! Such as \"10GB\", "
@ -777,7 +781,7 @@ def set_context(**kwargs):
The actual used memory size is the minimum of the available memory of the device and max_device_memory.
variable_memory_max_size (str): This parameter is deprecated, and will be removed in a future version.
Please use parameter 'max_device_memory' instead.
mempool_block_size (str): Set the size of the memory pool block for devices.
mempool_block_size (str): Set the size of the memory pool block in PyNative mode for devices.
The format is "xxGB". Default: "1GB". Minimum size is "1G". The actual used memory block size is the minimum
of the available memory of the device and mempool_block_size.
op_timeout (int): Set the maximum duration of executing an operator in seconds.