diff --git a/model_zoo/official/cv/FCN8s/README.md b/model_zoo/official/cv/FCN8s/README.md index 8e52cc942fd..7168a98247e 100755 --- a/model_zoo/official/cv/FCN8s/README.md +++ b/model_zoo/official/cv/FCN8s/README.md @@ -340,6 +340,27 @@ Dataset used: python export.py ``` +- 在modelarts上导出MindIR + +```Modelarts +在ModelArts上导出MindIR示例 +数据集存放方式同Modelart训练 +# (1) 选择a(修改yaml文件参数)或者b(ModelArts创建训练作业修改参数)其中一种方式。 +# a. 设置 "enable_modelarts=True" +# 设置 "file_name=fcn8s" +# 设置 "file_format=MINDIR" +# 设置 "ckpt_file=/cache/data/checkpoint file name" + +# b. 增加 "enable_modelarts=True" 参数在modearts的界面上。 +# 在modelarts的界面上设置方法a所需要的参数 +# 注意:路径参数不需要加引号 +# (2)设置网络配置文件的路径 "_config_path=/The path of config in default_config.yaml/" +# (3) 在modelarts的界面上设置代码的路径 "/path/fcn8s"。 +# (4) 在modelarts的界面上设置模型的启动文件 "export.py" 。 +# (5) 在modelarts的界面上设置模型的数据路径 ".../VOC2012/checkpoint"(选择VOC2012/checkpoint文件夹路径) , +# MindIR的输出路径"Output file path" 和模型的日志路径 "Job log path" 。 +``` + ## 推理过程 ### 推理 diff --git a/model_zoo/official/cv/FCN8s/default_config.yaml b/model_zoo/official/cv/FCN8s/default_config.yaml index 645dc8fb2b1..592c76d6a6b 100644 --- a/model_zoo/official/cv/FCN8s/default_config.yaml +++ b/model_zoo/official/cv/FCN8s/default_config.yaml @@ -10,8 +10,7 @@ output_path: "/cache/train" load_path: "/cache/checkpoint_path" device_target: "Ascend" enable_profiling: False -checkpoint_path: "./checkpoint/" -checkpoint_file: "./checkpoint/.ckpt" + # ====================================================================================== # common options diff --git a/model_zoo/official/cv/FCN8s/export.py b/model_zoo/official/cv/FCN8s/export.py index 0cdac6a6dee..3eb4dd03f62 100644 --- a/model_zoo/official/cv/FCN8s/export.py +++ b/model_zoo/official/cv/FCN8s/export.py @@ -13,9 +13,8 @@ # limitations under the License. # ============================================================================ """export FCN8s.""" - +import os import numpy as np - import mindspore as ms from mindspore import Tensor from mindspore import context @@ -23,10 +22,18 @@ from mindspore.train.serialization import load_checkpoint, load_param_into_net, from src.nets.FCN8s import FCN8s from src.model_utils.config import config from src.model_utils.device_adapter import get_device_id +from src.model_utils.moxing_adapter import moxing_wrapper + context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, device_id=get_device_id()) -if __name__ == '__main__': + +def modelarts_pre_process(): + config.file_name = os.path.join(config.output_path, config.file_name) + + +@moxing_wrapper(pre_process=modelarts_pre_process) +def model_export(): net = FCN8s(n_class=config.num_classes) # load model @@ -35,3 +42,7 @@ if __name__ == '__main__': input_arr = Tensor(np.zeros([1, 3, config.crop_size, config.crop_size]), ms.float32) export(net, input_arr, file_name=config.file_name, file_format=config.file_format) + + +if __name__ == '__main__': + model_export() diff --git a/model_zoo/official/cv/cnn_direction_model/README.md b/model_zoo/official/cv/cnn_direction_model/README.md index bbb43fb3b23..980b1333ff6 100644 --- a/model_zoo/official/cv/cnn_direction_model/README.md +++ b/model_zoo/official/cv/cnn_direction_model/README.md @@ -240,6 +240,27 @@ python export.py --ckpt_file [CKPT_PATH] --device_target [DEVICE_TARGET] --file_ `EXPORT_FORMAT` should be in ["AIR", "MINDIR"] +- Export MindIR on Modelarts + +```Modelarts +Export MindIR example on ModelArts +Data storage method is the same as training +# (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters)。 +# a. set "enable_modelarts=True" +# set "file_name=cnn_direction_model" +# set "file_format=MINDIR" +# set "ckpt_file=/cache/data/checkpoint file name" + +# b. Add "enable_modelarts=True" parameter on the interface of modearts。 +# Set the parameters required by method a on the modelarts interface +# Note: The path parameter does not need to be quoted +# (2)Set the path of the network configuration file "_config_path=/The path of config in default_config.yaml/" +# (3) Set the code path on the modelarts interface "/path/crnn"。 +# (4) Set the model's startup file on the modelarts interface "export.py" 。 +# (5) Set the data path of the model on the modelarts interface ".../crnn_dataset/eval/checkpoint"(choices crnn_dataset/eval/checkpoint Folder path) , +# The output path of the model "Output file path" and the log path of the model "Job log path" 。 +``` + ## [Inference Process](#contents) ### Usage diff --git a/model_zoo/official/cv/cnn_direction_model/default_config.yaml b/model_zoo/official/cv/cnn_direction_model/default_config.yaml index adfad461983..b8ec741e7a2 100644 --- a/model_zoo/official/cv/cnn_direction_model/default_config.yaml +++ b/model_zoo/official/cv/cnn_direction_model/default_config.yaml @@ -17,7 +17,6 @@ modelarts_dataset_unzip_name: "FSNS" # common options run_distribute: False - # ====================================================================================== # Training options @@ -77,5 +76,3 @@ data_path: "The location of input data" output_pah: "The location of the output file" device_target: "device id of GPU or Ascend. (Default: None)" enable_profiling: "Whether enable profiling while training default: False" - - diff --git a/model_zoo/official/cv/cnn_direction_model/export.py b/model_zoo/official/cv/cnn_direction_model/export.py index 51bf1a60676..d98490e22cd 100644 --- a/model_zoo/official/cv/cnn_direction_model/export.py +++ b/model_zoo/official/cv/cnn_direction_model/export.py @@ -13,24 +13,38 @@ # limitations under the License. # ============================================================================ """export script""" - +import os import numpy as np import mindspore as ms from mindspore import Tensor, context, load_checkpoint, export from src.cnn_direction_model import CNNDirectionModel from src.model_utils.config import config from src.model_utils.device_adapter import get_device_id +from src.model_utils.moxing_adapter import moxing_wrapper + context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target) + + +def modelarts_pre_process(): + config.file_name = os.path.join(config.output_path, config.file_name) + + device_id = get_device_id() context.set_context(device_id=device_id) -if __name__ == '__main__': + +@moxing_wrapper(pre_process=modelarts_pre_process) +def model_export(): net = CNNDirectionModel([3, 64, 48, 48, 64], [64, 48, 48, 64, 64], [256, 64], [64, 512]) - param_dict = load_checkpoint(config.ckpt_file, net=net) + load_checkpoint(config.ckpt_file, net=net) net.set_train(False) input_data = Tensor(np.zeros([1, 3, config.im_size_h, config.im_size_w]), ms.float32) export(net, input_data, file_name=config.file_name, file_format=config.file_format) + + +if __name__ == '__main__': + model_export() diff --git a/model_zoo/official/cv/cnnctc/README.md b/model_zoo/official/cv/cnnctc/README.md index 1c2d45f03da..485aebf392c 100644 --- a/model_zoo/official/cv/cnnctc/README.md +++ b/model_zoo/official/cv/cnnctc/README.md @@ -164,7 +164,6 @@ The entire code structure is as following: |---src |---__init__.py // init file |---cnn_ctc.py // cnn_ctc network - |---config.py // total config |---callback.py // loss callback file |---dataset.py // process dataset |---util.py // routine operation @@ -291,7 +290,7 @@ epoch: 1 step: 8698 , loss is 9.708542263610315, average time per step is 0.2184 # (2) Set the path of the network configuration file "_config_path=/The path of config in default_config.yaml/" # (3) Set the code path on the modelarts interface "/path/cnnctc"。 -# (4) Set the model's startup file on the modelarts interface "export.py" 。 +# (4) Set the model's startup file on the modelarts interface "train.py" 。 # (5) Set the data path of the model on the modelarts interface ".../CNNCTC_Data/train"(choices CNNCTC_Data/train Folder path) , # The output path of the model "Output file path" and the log path of the model "Job log path" 。 # (6) start trainning the model。 @@ -309,7 +308,7 @@ epoch: 1 step: 8698 , loss is 9.708542263610315, average time per step is 0.2184 # (3) Set the path of the network configuration file "_config_path=/The path of config in default_config.yaml/" # (4) Set the code path on the modelarts interface "/path/cnnctc"。 -# (5) Set the model's startup file on the modelarts interface "export.py" 。 +# (5) Set the model's startup file on the modelarts interface "train.py" 。 # (6) Set the data path of the model on the modelarts interface ".../CNNCTC_Data/train"(choices CNNCTC_Data/train Folder path) , # The output path of the model "Output file path" and the log path of the model "Job log path" 。 # (7) Start model inference。 @@ -346,7 +345,7 @@ Export MindIR example on ModelArts Data storage method is the same as training # (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters)。 # a. set "enable_modelarts=True" -# set "file_name=/cache/train/cnnctc" +# set "file_name=cnnctc" # set "file_format=MINDIR" # set "ckpt_file=/cache/data/checkpoint file name" diff --git a/model_zoo/official/cv/cnnctc/README_CN.md b/model_zoo/official/cv/cnnctc/README_CN.md index 6aa4ee06e14..892b2c38a55 100644 --- a/model_zoo/official/cv/cnnctc/README_CN.md +++ b/model_zoo/official/cv/cnnctc/README_CN.md @@ -347,7 +347,7 @@ python export.py --ckpt_file [CKPT_PATH] --file_format [EXPORT_FORMAT] --TEST_BA 数据集存放方式同Modelart训练 # (1) 选择a(修改yaml文件参数)或者b(ModelArts创建训练作业修改参数)其中一种方式。 # a. 设置 "enable_modelarts=True" -# 设置 "file_name=/cache/train/cnnctc" +# 设置 "file_name=cnnctc" # 设置 "file_format=MINDIR" # 设置 "ckpt_file=/cache/data/checkpoint file name" diff --git a/model_zoo/official/cv/cnnctc/default_config.yaml b/model_zoo/official/cv/cnnctc/default_config.yaml index f751434d6ce..40854269711 100644 --- a/model_zoo/official/cv/cnnctc/default_config.yaml +++ b/model_zoo/official/cv/cnnctc/default_config.yaml @@ -11,10 +11,6 @@ load_path: "/cache/checkpoint_path" device_target: "Ascend" enable_profiling: False -# ====================================================================================== -# common options - - # ====================================================================================== # Training options CHARACTER: "0123456789abcdefghijklmnopqrstuvwxyz" @@ -73,4 +69,3 @@ enable_profiling: "Whether enable profiling while training default: False" file_name: "CNN&CTC output air name" file_format: "choices [AIR, MINDIR]" ckpt_file: "CNN&CTC ckpt file" - diff --git a/model_zoo/official/cv/cnnctc/export.py b/model_zoo/official/cv/cnnctc/export.py index cf451f8c58d..949d0b8b12b 100644 --- a/model_zoo/official/cv/cnnctc/export.py +++ b/model_zoo/official/cv/cnnctc/export.py @@ -15,6 +15,7 @@ """export checkpoint file into air, onnx, mindir models suggest run as python export.py --filename cnnctc --file_format MINDIR --ckpt_file [ckpt file path] """ +import os import numpy as np from mindspore import Tensor, context, load_checkpoint, export import mindspore.common.dtype as mstype @@ -29,7 +30,7 @@ if config.device_target == "Ascend": def modelarts_pre_process(): - pass + config.file_name = os.path.join(config.output_path, config.file_name) @moxing_wrapper(pre_process=modelarts_pre_process) diff --git a/model_zoo/official/cv/crnn/README.md b/model_zoo/official/cv/crnn/README.md index b721a9cd4a8..1497d01074a 100644 --- a/model_zoo/official/cv/crnn/README.md +++ b/model_zoo/official/cv/crnn/README.md @@ -307,7 +307,7 @@ Export MindIR example on ModelArts Data storage method is the same as training # (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters)。 # a. set "enable_modelarts=True" -# set "file_name=/cache/train/crnn" +# set "file_name=crnn" # set "file_format=MINDIR" # set "ckpt_file=/cache/data/checkpoint file name" diff --git a/model_zoo/official/cv/crnn/export.py b/model_zoo/official/cv/crnn/export.py index a60dd2e36a9..324ec090dcb 100644 --- a/model_zoo/official/cv/crnn/export.py +++ b/model_zoo/official/cv/crnn/export.py @@ -14,6 +14,7 @@ # ============================================================================ """ export model for CRNN """ +import os import numpy as np import mindspore as ms from mindspore import Tensor, context, load_checkpoint, export @@ -24,7 +25,7 @@ from src.model_utils.device_adapter import get_device_id def modelarts_pre_process(): - pass + config.file_name = os.path.join(config.output_path, config.file_name) @moxing_wrapper(pre_process=modelarts_pre_process) diff --git a/model_zoo/official/cv/ctpn/README.md b/model_zoo/official/cv/ctpn/README.md index f77668253d5..59cebe0f9ce 100644 --- a/model_zoo/official/cv/ctpn/README.md +++ b/model_zoo/official/cv/ctpn/README.md @@ -338,7 +338,7 @@ Export MindIR example on ModelArts Data storage method is the same as training # (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters)。 # a. set "enable_modelarts=True" -# set "file_name=/cache/train/cnnctc" +# set "file_name=ctpn" # set "file_format=MINDIR" # set "ckpt_file=/cache/data/checkpoint file name" diff --git a/model_zoo/official/cv/ctpn/export.py b/model_zoo/official/cv/ctpn/export.py index 0aa61641b0c..ebd87ce0a1a 100644 --- a/model_zoo/official/cv/ctpn/export.py +++ b/model_zoo/official/cv/ctpn/export.py @@ -13,6 +13,7 @@ # limitations under the License. # ============================================================================ """export checkpoint file into air, onnx, mindir models""" +import os import numpy as np import mindspore as ms from mindspore import Tensor, load_checkpoint, load_param_into_net, export, context @@ -29,7 +30,7 @@ if config.device_target == "Ascend": def modelarts_pre_process(): - pass + config.file_name = os.path.join(config.output_path, config.file_name) @moxing_wrapper(pre_process=modelarts_pre_process) diff --git a/model_zoo/official/cv/densenet/README.md b/model_zoo/official/cv/densenet/README.md index 336ec8d1030..130ce53c722 100644 --- a/model_zoo/official/cv/densenet/README.md +++ b/model_zoo/official/cv/densenet/README.md @@ -399,6 +399,27 @@ python export.py --net [NET_NAME] --ckpt_file [CKPT_PATH] --device_target [DEVIC `EXPORT_FORMAT` should be in ["AIR", "MINDIR"] +- Export MindIR on Modelarts + +```Modelarts +Export MindIR example on ModelArts +Data storage method is the same as training +# (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters)。 +# a. set "enable_modelarts=True" +# set "file_name=densenet121" +# set "file_format=MINDIR" +# set "ckpt_file=/cache/data/checkpoint file name" + +# b. Add "enable_modelarts=True" parameter on the interface of modearts。 +# Set the parameters required by method a on the modelarts interface +# Note: The path parameter does not need to be quoted +# (2)Set the path of the network configuration file "_config_path=/The path of config in densenet121_config.yaml/" +# (3) Set the code path on the modelarts interface "/path/densenet121"。 +# (4) Set the model's startup file on the modelarts interface "export.py" 。 +# (5) Set the data path of the model on the modelarts interface ".../ImageNet_Original/checkpoint"(choices ImageNet_Original/checkpoint Folder path) , +# The output path of the model "Output file path" and the log path of the model "Job log path" 。 +``` + ## [Inference Process](#contents) ### Inference diff --git a/model_zoo/official/cv/densenet/README_CN.md b/model_zoo/official/cv/densenet/README_CN.md index 01801aed772..2b35f51847c 100644 --- a/model_zoo/official/cv/densenet/README_CN.md +++ b/model_zoo/official/cv/densenet/README_CN.md @@ -398,6 +398,27 @@ python export.py --net [NET_NAME] --ckpt_file [CKPT_PATH] --device_target [DEVIC `EXPORT_FORMAT` 可选 ["AIR", "MINDIR"] +- 在modelarts上导出MindIR + +```Modelarts +在ModelArts上导出MindIR示例 +数据集存放方式同Modelart训练 +# (1) 选择a(修改yaml文件参数)或者b(ModelArts创建训练作业修改参数)其中一种方式。 +# a. 设置 "enable_modelarts=True" +# 设置 "file_name=densenet121" +# 设置 "file_format=MINDIR" +# 设置 "ckpt_file=/cache/data/checkpoint file name" + +# b. 增加 "enable_modelarts=True" 参数在modearts的界面上。 +# 在modelarts的界面上设置方法a所需要的参数 +# 注意:路径参数不需要加引号 +# (2)设置网络配置文件的路径 "_config_path=/The path of config in densenet121_config.yaml/" +# (3) 在modelarts的界面上设置代码的路径 "/path/densenet"。 +# (4) 在modelarts的界面上设置模型的启动文件 "export.py" 。 +# (5) 在modelarts的界面上设置模型的数据路径 ".../ImageNet_Original/eval/checkpoint"(选择ImageNet_Original/eval/checkpoint文件夹路径) , +# MindIR的输出路径"Output file path" 和模型的日志路径 "Job log path" 。 +``` + ## 推理过程 ### 推理 diff --git a/model_zoo/official/cv/densenet/densenet100_config.yaml b/model_zoo/official/cv/densenet/densenet100_config.yaml index 506246aa13e..717943ab9a5 100644 --- a/model_zoo/official/cv/densenet/densenet100_config.yaml +++ b/model_zoo/official/cv/densenet/densenet100_config.yaml @@ -10,9 +10,6 @@ output_path: "/cache/train" load_path: "/cache/checkpoint_path/" device_target: 'Ascend' enable_profiling: False -checkpoint_path: "./checkpoint/" -checkpoint_file: "./checkpoint/.ckpt" - # ============================================================================== # Common options @@ -22,7 +19,6 @@ is_distributed: 0 rank: 0 group_size: 1 - # ============================================================================== # Training options train_data_dir: "" @@ -50,7 +46,6 @@ ckpt_interval: 3124 save_ckpt_path: "./" is_save_on_master: 1 - # Eval options eval_data_dir: "" backbone: "resnet50" @@ -58,7 +53,6 @@ ckpt_files: "" log_path: "" eval_url: "" - # export options device_id: 0 batch_size: 32 diff --git a/model_zoo/official/cv/densenet/densenet121_config.yaml b/model_zoo/official/cv/densenet/densenet121_config.yaml index 4b95dd07b4b..bdd7fea35b3 100644 --- a/model_zoo/official/cv/densenet/densenet121_config.yaml +++ b/model_zoo/official/cv/densenet/densenet121_config.yaml @@ -10,8 +10,6 @@ output_path: "/cache/train" load_path: "/cache/checkpoint_path/" device_target: 'Ascend' enable_profiling: False -checkpoint_path: "./checkpoint/" -checkpoint_file: "./checkpoint/.ckpt" # ============================================================================== # Common options @@ -21,7 +19,6 @@ is_distributed: 0 rank: 0 group_size: 1 - # ============================================================================== # Training options train_data_dir: "" @@ -49,7 +46,6 @@ ckpt_interval: 50000 save_ckpt_path: "./" is_save_on_master: 1 - # Eval options eval_data_dir: "" backbone: "resnet50" @@ -57,7 +53,6 @@ ckpt_files: "" log_path: "" eval_url: "" - # export options device_id: 0 batch_size: 32 @@ -95,4 +90,3 @@ backbone: "backbone" device_id: "Device id" file_name: "output file name" file_format: "file format choices [AIR MINDIR ONNX]" - diff --git a/model_zoo/official/cv/densenet/export.py b/model_zoo/official/cv/densenet/export.py index 1ce5cded5f6..34584b30b48 100644 --- a/model_zoo/official/cv/densenet/export.py +++ b/model_zoo/official/cv/densenet/export.py @@ -16,27 +16,32 @@ """export checkpoint file into air, onnx, mindir models Suggest run as python export.py --file_name [file_name] --ckpt_files [ckpt path] --file_format [file format] """ - +import os import numpy as np from mindspore.common import dtype as mstype from mindspore import context, Tensor from mindspore.train.serialization import export, load_checkpoint, load_param_into_net from src.model_utils.config import config +from src.model_utils.moxing_adapter import moxing_wrapper context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target) -if config.device_target == "Ascend": - context.set_context(device_id=config.device_id) - -if config.net == "densenet100": - from src.network.densenet import DenseNet100 as DenseNet -else: - from src.network.densenet import DenseNet121 as DenseNet +def modelarts_pre_process(): + config.file_name = os.path.join(config.output_path, config.file_name) -if __name__ == "__main__": +@moxing_wrapper(pre_process=modelarts_pre_process) +def model_export(): + if config.device_target == "Ascend": + context.set_context(device_id=config.device_id) + + if config.net == "densenet100": + from src.network.densenet import DenseNet100 as DenseNet + else: + from src.network.densenet import DenseNet121 as DenseNet + network = DenseNet(config.num_classes) param_dict = load_checkpoint(config.ckpt_files) @@ -59,3 +64,7 @@ if __name__ == "__main__": input_data = Tensor(np.zeros(shape), mstype.float32) export(network, input_data, file_name=config.file_name, file_format=config.file_format) + + +if __name__ == '__main__': + model_export() diff --git a/model_zoo/official/cv/dpn/README.md b/model_zoo/official/cv/dpn/README.md index 520ac5d1cc9..8c05bdfa211 100644 --- a/model_zoo/official/cv/dpn/README.md +++ b/model_zoo/official/cv/dpn/README.md @@ -337,6 +337,27 @@ python export.py --config_path [CONFIG_PATH] --ckpt_file [CKPT_PATH] --file_name The ckpt_file parameter is required, `FILE_FORMAT` should be in ["AIR", "MINDIR"] +- Export MindIR on Modelarts + +```Modelarts +Export MindIR example on ModelArts +Data storage method is the same as training +# (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters)。 +# a. set "enable_modelarts=True" +# set "file_name=dpn" +# set "file_format=MINDIR" +# set "ckpt_file=/cache/data/checkpoint file name" + +# b. Add "enable_modelarts=True" parameter on the interface of modearts。 +# Set the parameters required by method a on the modelarts interface +# Note: The path parameter does not need to be quoted +# (2)Set the path of the network configuration file "_config_path=/The path of config in default_config.yaml/" +# (3) Set the code path on the modelarts interface "/path/dpn"。 +# (4) Set the model's startup file on the modelarts interface "export.py" 。 +# (5) Set the data path of the model on the modelarts interface ".../ImageNet_Original/eval/checkpoint"(choices ImageNet_Original/eval/checkpoint Folder path) , +# The output path of the model "Output file path" and the log path of the model "Job log path" 。 +``` + ### [Infer on Ascend310](#contents) Before performing inference, the mindir file must be exported by `export.py` script. We only provide an example of inference using MINDIR model. diff --git a/model_zoo/official/cv/dpn/export.py b/model_zoo/official/cv/dpn/export.py index 6d3a9f2bf27..5ed7f164488 100644 --- a/model_zoo/official/cv/dpn/export.py +++ b/model_zoo/official/cv/dpn/export.py @@ -15,18 +15,27 @@ """Export DPN suggest run as python export.py --file_name [filename] --file_format [file format] --checkpoint_path [ckpt path] """ - +import os import numpy as np from mindspore import Tensor, context, load_checkpoint, load_param_into_net, export from src.dpn import dpns from src.model_utils.config import config +from src.model_utils.moxing_adapter import moxing_wrapper context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target) + + +def modelarts_pre_process(): + config.file_name = os.path.join(config.output_path, config.file_name) + + if config.device_target == "Ascend": context.set_context(device_id=config.device_id) -if __name__ == "__main__": + +@moxing_wrapper(pre_process=modelarts_pre_process) +def model_export(): # define net backbone = config.backbone num_classes = config.num_classes @@ -39,3 +48,7 @@ if __name__ == "__main__": image = Tensor(np.zeros([config.batch_size, 3, config.height, config.width], np.float32)) export(net, image, file_name=config.file_name, file_format=config.file_format) + + +if __name__ == '__main__': + model_export() diff --git a/model_zoo/official/cv/openpose/README.md b/model_zoo/official/cv/openpose/README.md index 41573ce274b..d5307ad3e95 100644 --- a/model_zoo/official/cv/openpose/README.md +++ b/model_zoo/official/cv/openpose/README.md @@ -269,7 +269,7 @@ Export MindIR example on ModelArts Data storage method is the same as training # (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters)。 # a. set "enable_modelarts=True" -# set "file_name=/cache/train/openpose" +# set "file_name=openpose" # set "file_format=MINDIR" # set "ckpt_file=/cache/data/checkpoint file name" diff --git a/model_zoo/official/cv/openpose/default_config.yaml b/model_zoo/official/cv/openpose/default_config.yaml index 0d9601056bd..95685a655d5 100644 --- a/model_zoo/official/cv/openpose/default_config.yaml +++ b/model_zoo/official/cv/openpose/default_config.yaml @@ -55,7 +55,6 @@ max_epoch_train: 60 max_epoch_train_NP: 80 loss_scale: 16384 - # default param batch_size: 10 min_keypoints: 5 @@ -74,7 +73,6 @@ max_scale: 2.0 max_rotate_degree: 40 center_perterb_max: 40 - # ====================================================================================== # Eval options is_distributed: 0 @@ -84,7 +82,6 @@ imgpath_val: "" ann: "" output_img_path: "./output_imgs/" - # inference params inference_img_size: 368 inference_scales: [0.5, 1, 1.5, 2] diff --git a/model_zoo/official/cv/openpose/export.py b/model_zoo/official/cv/openpose/export.py index 8792f7e83d2..5e83cbe8acd 100644 --- a/model_zoo/official/cv/openpose/export.py +++ b/model_zoo/official/cv/openpose/export.py @@ -13,7 +13,7 @@ # limitations under the License. # ============================================================================ """export""" - +import os import numpy as np from mindspore import Tensor from mindspore import context @@ -27,7 +27,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, def modelarts_pre_process(): - pass + config.file_name = os.path.join(config.output_path, config.file_name) @moxing_wrapper(pre_process=None) diff --git a/model_zoo/official/cv/psenet/README.md b/model_zoo/official/cv/psenet/README.md index 2aa05520fd2..f7d78c40bf1 100644 --- a/model_zoo/official/cv/psenet/README.md +++ b/model_zoo/official/cv/psenet/README.md @@ -270,7 +270,7 @@ Export MindIR example on ModelArts Data storage method is the same as training # (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters)。 # a. set "enable_modelarts=True" -# set "file_name=/cache/train/psenet" +# set "file_name=psenet" # set "file_format=MINDIR" # set "ckpt_file=/cache/data/checkpoint file name" diff --git a/model_zoo/official/cv/psenet/README_CN.md b/model_zoo/official/cv/psenet/README_CN.md index 37ce389d612..edcef4b0f9d 100644 --- a/model_zoo/official/cv/psenet/README_CN.md +++ b/model_zoo/official/cv/psenet/README_CN.md @@ -269,7 +269,7 @@ python export.py --ckpt [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_ 数据集存放方式同Modelart训练 # (1) 选择a(修改yaml文件参数)或者b(ModelArts创建训练作业修改参数)其中一种方式。 # a. 设置 "enable_modelarts=True" -# 设置 "file_name=/cache/train/psenet" +# 设置 "file_name=psenet" # 设置 "file_format=MINDIR" # 设置 "ckpt_file=/cache/data/checkpoint file name" diff --git a/model_zoo/official/cv/psenet/default_config.yaml b/model_zoo/official/cv/psenet/default_config.yaml index 2da09e4e576..0301b9b56af 100644 --- a/model_zoo/official/cv/psenet/default_config.yaml +++ b/model_zoo/official/cv/psenet/default_config.yaml @@ -15,7 +15,6 @@ checkpoint_file: "./checkpoint/.ckpt" modelarts_home: "/home/work/user-job-dir" object_name: "psenet" - # ====================================================================================== # Training options pre_trained: "" @@ -46,7 +45,6 @@ TRAIN_REPEAT_NUM: 1800 TRAIN_DROP_REMAINDER: True TRAIN_MODEL_SAVE_PATH: "./" - # ====================================================================================== # Eval options ckpt: "" diff --git a/model_zoo/official/cv/psenet/export.py b/model_zoo/official/cv/psenet/export.py index f4410f2c5e6..b425b944a8f 100755 --- a/model_zoo/official/cv/psenet/export.py +++ b/model_zoo/official/cv/psenet/export.py @@ -15,7 +15,7 @@ """ ##############export checkpoint file into air models################# """ - +import os import numpy as np import mindspore as ms from mindspore import Tensor, load_checkpoint, load_param_into_net, export, context @@ -25,11 +25,17 @@ from src.model_utils.moxing_adapter import moxing_wrapper context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target) + + +def modelarts_pre_process(): + config.file_name = os.path.join(config.output_path, config.file_name) + + if config.device_target == "Ascend": context.set_context(device_id=config.device_id) -@moxing_wrapper(pre_process=None) +@moxing_wrapper(pre_process=modelarts_pre_process) def model_export(): net = ETSNet(config) param_dict = load_checkpoint(config.ckpt) diff --git a/model_zoo/official/cv/retinanet/README_CN.md b/model_zoo/official/cv/retinanet/README_CN.md index d682c4c17f4..b727c5c054e 100644 --- a/model_zoo/official/cv/retinanet/README_CN.md +++ b/model_zoo/official/cv/retinanet/README_CN.md @@ -346,7 +346,7 @@ python export.py --file_name retinanet --file_format MINDIR --checkpoint_path / 在ModelArts上导出MindIR示例 # (1) 选择a(修改yaml文件参数)或者b(ModelArts创建训练作业修改参数)其中一种方式。 # a. 设置 "enable_modelarts=True" -# 设置 "file_name=/cache/train/cnnctc" +# 设置 "file_name=retinanet" # 设置 "file_format=MINDIR" # 设置 "checkpoint_path=/cache/data/checkpoint/checkpoint file name" diff --git a/model_zoo/official/cv/retinanet/export.py b/model_zoo/official/cv/retinanet/export.py index 3fb7cf2ae54..9acd45892cc 100644 --- a/model_zoo/official/cv/retinanet/export.py +++ b/model_zoo/official/cv/retinanet/export.py @@ -13,6 +13,7 @@ # limitations under the License. # ============================================================================ """export for retinanet""" +import os import numpy as np import mindspore.common.dtype as mstype from mindspore import context, Tensor @@ -24,7 +25,7 @@ from src.model_utils.moxing_adapter import moxing_wrapper def modelarts_pre_process(): - pass + config.file_name = os.path.join(config.output_path, config.file_name) @moxing_wrapper(pre_process=modelarts_pre_process) diff --git a/model_zoo/official/cv/shufflenetv1/README_CN.md b/model_zoo/official/cv/shufflenetv1/README_CN.md index ccd251a2116..6f5cf9e9355 100644 --- a/model_zoo/official/cv/shufflenetv1/README_CN.md +++ b/model_zoo/official/cv/shufflenetv1/README_CN.md @@ -227,6 +227,27 @@ python export.py --ckpt_path [CKPT_PATH] --device_target [DEVICE_TARGET] --file_ `EXPORT_FORMAT` 可选 ["AIR", "MINDIR"] +- Export MindIR on Modelarts + +```Modelarts +Export MindIR example on ModelArts +Data storage method is the same as training +# (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters)。 +# a. set "enable_modelarts=True" +# set "file_name=shufflenetv1" +# set "file_format=MINDIR" +# set "ckpt_file=/cache/data/checkpoint file name" + +# b. Add "enable_modelarts=True" parameter on the interface of modearts。 +# Set the parameters required by method a on the modelarts interface +# Note: The path parameter does not need to be quoted +# (2)Set the path of the network configuration file "_config_path=/The path of config in default_config.yaml/" +# (3) Set the code path on the modelarts interface "/path/shufflenetv1"。 +# (4) Set the model's startup file on the modelarts interface "export.py" 。 +# (5) Set the data path of the model on the modelarts interface ".../ImageNet_Original/eval/checkpoint"(choices ImageNet_Original/eval/checkpoint Folder path) , +# The output path of the model "Output file path" and the log path of the model "Job log path" 。 +``` + ## 推理过程 ### 推理 diff --git a/model_zoo/official/cv/shufflenetv1/default_config.yaml b/model_zoo/official/cv/shufflenetv1/default_config.yaml index 7703beda1d5..271a71d91d8 100644 --- a/model_zoo/official/cv/shufflenetv1/default_config.yaml +++ b/model_zoo/official/cv/shufflenetv1/default_config.yaml @@ -10,8 +10,7 @@ output_path: "/cache/train" load_path: "/cache/checkpoint_path" device_target: "Ascend" enable_profiling: False -checkpoint_path: "./checkpoint/" -checkpoint_file: "./checkpoint/.ckpt" + # ====================================================================================== # common options num_classes: 1000 @@ -46,13 +45,11 @@ loss_scale: 1024 weight_decay: 0.00004 momentum: 0.9 - # ====================================================================================== # Eval options ckpt_path: "" eval_dataset_path: "" - # ====================================================================================== # export options file_name: "shufflenetv1" diff --git a/model_zoo/official/cv/shufflenetv1/export.py b/model_zoo/official/cv/shufflenetv1/export.py index 8e62f3c114d..dec005028b6 100644 --- a/model_zoo/official/cv/shufflenetv1/export.py +++ b/model_zoo/official/cv/shufflenetv1/export.py @@ -16,21 +16,28 @@ ##############export checkpoint file into air, onnx, mindir models################# suggest run as python export.py --file_name [file name] --ckpt_path [ckpt path] --file_format [file format] """ - +import os import numpy as np import mindspore as ms from mindspore import Tensor, load_checkpoint, load_param_into_net, export, context from src.model_utils.config import config from src.shufflenetv1 import ShuffleNetV1 +from src.model_utils.moxing_adapter import moxing_wrapper context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target) + + +def modelarts_pre_process(): + config.file_name = os.path.join(config.output_path, config.file_name) + + if config.device_target == "Ascend": context.set_context(device_id=config.device_id) -if __name__ == '__main__': - +@moxing_wrapper(pre_process=modelarts_pre_process) +def model_export(): net = ShuffleNetV1(model_size=config.model_size) param_dict = load_checkpoint(config.ckpt_path) @@ -39,3 +46,7 @@ if __name__ == '__main__': image_height, image_width = (224, 224) input_arr = Tensor(np.ones([config.batch_size, 3, image_height, image_width]), ms.float32) export(net, input_arr, file_name=config.file_name, file_format=config.file_format) + + +if __name__ == '__main__': + model_export() diff --git a/model_zoo/research/nlp/dscnn/README.md b/model_zoo/research/nlp/dscnn/README.md index 805af2178b2..78b9488af02 100644 --- a/model_zoo/research/nlp/dscnn/README.md +++ b/model_zoo/research/nlp/dscnn/README.md @@ -397,7 +397,7 @@ Export MindIR example on ModelArts Data storage method is the same as training # (1) Choose either a (modify yaml file parameters) or b (modelArts create training job to modify parameters)。 # a. set "enable_modelarts=True" -# set "file_name=/cache/train/dscnn" +# set "file_name=dscnn" # set "file_format=MINDIR" # set "ckpt_file=/cache/data/checkpoint file name" diff --git a/model_zoo/research/nlp/dscnn/default_config.yaml b/model_zoo/research/nlp/dscnn/default_config.yaml index 44480c26ae3..212cfc4a55f 100644 --- a/model_zoo/research/nlp/dscnn/default_config.yaml +++ b/model_zoo/research/nlp/dscnn/default_config.yaml @@ -28,9 +28,6 @@ per_batch_size: 100 model_size_info: [6, 276, 10, 4, 2, 1, 276, 3, 3, 2, 2, 276, 3, 3, 1, 1, 276, 3, 3, 1, 1, 276, 3, 3, 1, 1, 276, 3, 3, 1, 1 ] - - - # ====================================================================================== # Training options amp_level: "O0" diff --git a/model_zoo/research/nlp/dscnn/export.py b/model_zoo/research/nlp/dscnn/export.py index f732bb9fb38..08a759ef4f4 100644 --- a/model_zoo/research/nlp/dscnn/export.py +++ b/model_zoo/research/nlp/dscnn/export.py @@ -13,6 +13,7 @@ # limitations under the License. # =========================================================================== """DSCNN export.""" +import os import numpy as np from mindspore import Tensor from mindspore.train.serialization import export @@ -23,7 +24,7 @@ from src.model_utils.moxing_adapter import moxing_wrapper def modelarts_pre_process(): - pass + config.file_name = os.path.join(config.output_path, config.file_name) @moxing_wrapper(pre_process=None)