!19012 modify model_zoo net export
Merge pull request !19012 from lilei/modify_model_zoo_bug
This commit is contained in:
commit
cc4583d892
|
@ -244,13 +244,39 @@ acc=93.88%(TOP5)
|
|||
|
||||
## [Model Export](#contents)
|
||||
|
||||
Export MindIR on local
|
||||
|
||||
```shell
|
||||
python export.py --device_target [PLATFORM] --checkpoint_file_path [CKPT_PATH] --file_format [EXPORT_FORMAT]
|
||||
```
|
||||
|
||||
The `ckpt_file` parameter is required.
|
||||
The `checkpoint_file_path` parameter is required.
|
||||
`EXPORT_FORMAT` should be in ["AIR", "MINDIR"].
|
||||
|
||||
Export on ModelArts (If you want to run in modelarts, please check the official documentation of [modelarts](https://support.huaweicloud.com/modelarts/), and you can start as follows)
|
||||
|
||||
```python
|
||||
# Export on ModelArts
|
||||
# (1) Perform a or b.
|
||||
# a. Set "enable_modelarts=True" on default_config.yaml file.
|
||||
# Set "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on default_config.yaml file.
|
||||
# Set "checkpoint_url='s3://dir_to_trained_ckpt/'" on default_config.yaml file.
|
||||
# Set "file_name='./resnext50'" on default_config.yaml file.
|
||||
# Set "file_format='AIR'" on default_config.yaml file.
|
||||
# Set other parameters on default_config.yaml file you need.
|
||||
# b. Add "enable_modelarts=True" on the website UI interface.
|
||||
# Add "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on the website UI interface.
|
||||
# Add "checkpoint_url='s3://dir_to_trained_ckpt/'" on the website UI interface.
|
||||
# Add "file_name='./resnext50'" on the website UI interface.
|
||||
# Add "file_format='AIR'" on the website UI interface.
|
||||
# Add other parameters on the website UI interface.
|
||||
# (2) Set the config_path="/path/yaml file" on the website UI interface.
|
||||
# (3) Set the code directory to "/path/resnext50" on the website UI interface.
|
||||
# (4) Set the startup file to "export.py" on the website UI interface.
|
||||
# (5) Set the "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (6) Create your job.
|
||||
```
|
||||
|
||||
## [Inference Process](#contents)
|
||||
|
||||
### Usage
|
||||
|
|
|
@ -257,13 +257,37 @@ acc=94.72%(TOP5)
|
|||
|
||||
## 模型导出
|
||||
|
||||
本地导出mindir
|
||||
|
||||
```shell
|
||||
python export.py --device_target [PLATFORM] --checkpoint_file_path [CKPT_PATH] --file_format [EXPORT_FORMAT]
|
||||
```
|
||||
|
||||
`ckpt_file` 参数为必填项。
|
||||
`checkpoint_file_path` 参数为必填项。
|
||||
`EXPORT_FORMAT` 可选 ["AIR", "MINDIR"]。
|
||||
|
||||
ModelArts导出mindir
|
||||
|
||||
```python
|
||||
# (1) 把训练好的模型地方到桶的对应位置。
|
||||
# (2) 选址a或者b其中一种方式。
|
||||
# a. 设置 "enable_modelarts=True"
|
||||
# 设置 "checkpoint_file_path='/cache/checkpoint_path/model.ckpt" 在 yaml 文件。
|
||||
# 设置 "checkpoint_url=/The path of checkpoint in S3/" 在 yaml 文件。
|
||||
# 设置 "file_name='./resnext50'"参数在yaml文件。
|
||||
# 设置 "file_format='AIR'" 参数在yaml文件。
|
||||
# b. 增加 "enable_modelarts=True" 参数在modearts的界面上。
|
||||
# 增加 "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" 参数在modearts的界面上。
|
||||
# 增加 "checkpoint_url=/The path of checkpoint in S3/" 参数在modearts的界面上。
|
||||
# 设置 "file_name='./resnext50'"参数在modearts的界面上。
|
||||
# 设置 "file_format='AIR'" 参数在modearts的界面上。
|
||||
# (3) 设置网络配置文件的路径 "config_path=/The path of config in S3/"
|
||||
# (4) 在modelarts的界面上设置代码的路径 "/path/resnext50"。
|
||||
# (5) 在modelarts的界面上设置模型的启动文件 "export.py" 。
|
||||
# 模型的输出路径"Output file path" 和模型的日志路径 "Job log path" 。
|
||||
# (6) 开始导出mindir。
|
||||
```
|
||||
|
||||
## [推理过程](#contents)
|
||||
|
||||
### 用法
|
||||
|
|
|
@ -15,10 +15,12 @@
|
|||
"""
|
||||
resnext export mindir.
|
||||
"""
|
||||
import os
|
||||
import numpy as np
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore import context, Tensor, load_checkpoint, load_param_into_net, export
|
||||
from src.model_utils.config import config
|
||||
from src.model_utils.moxing_adapter import moxing_wrapper
|
||||
from src.image_classification import get_network
|
||||
from src.utils.auto_mixed_precision import auto_mixed_precision
|
||||
|
||||
|
@ -27,7 +29,13 @@ context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
|
|||
if config.device_target == "Ascend":
|
||||
context.set_context(device_id=config.device_id)
|
||||
|
||||
if __name__ == '__main__':
|
||||
def modelarts_pre_process():
|
||||
'''modelarts pre process function.'''
|
||||
config.file_name = os.path.join(config.output_path, config.file_name)
|
||||
|
||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||
def run_export():
|
||||
"""run export."""
|
||||
network = get_network(network=config.network, num_classes=config.num_classes, platform=config.device_target)
|
||||
|
||||
param_dict = load_checkpoint(config.checkpoint_file_path)
|
||||
|
@ -40,3 +48,6 @@ if __name__ == '__main__':
|
|||
input_shp = [config.batch_size, 3, config.height, config.width]
|
||||
input_array = Tensor(np.random.uniform(-1.0, 1.0, size=input_shp).astype(np.float32))
|
||||
export(network, input_array, file_name=config.file_name, file_format=config.file_format)
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_export()
|
||||
|
|
|
@ -384,16 +384,42 @@ result: {'top_1_accuracy': 0.6094950384122919, 'top_5_accuracy': 0.8263244238156
|
|||
|
||||
### Export MindIR
|
||||
|
||||
Export MindIR on local
|
||||
|
||||
```shell
|
||||
python export.py --checkpoint_file_path [CKPT_PATH] --batch_size [BATCH_SIZE] --net_name [NET] --dataset [DATASET] --file_format [EXPORT_FORMAT]
|
||||
python export.py --checkpoint_file_path [CKPT_PATH] --batch_size [BATCH_SIZE] --net_name [NET] --dataset [DATASET] --file_format [EXPORT_FORMAT] --config_path [CONFIG_PATH]
|
||||
```
|
||||
|
||||
The ckpt_file parameter is required,
|
||||
The checkpoint_file_path parameter is required,
|
||||
`BATCH_SIZE` can only be set to 1
|
||||
`NET` should be in ["squeezenet", "squeezenet_residual"]
|
||||
`DATASET` should be in ["cifar10", "imagenet"]
|
||||
`EXPORT_FORMAT` should be in ["AIR", "MINDIR"]
|
||||
|
||||
Export on ModelArts (If you want to run in modelarts, please check the official documentation of [modelarts](https://support.huaweicloud.com/modelarts/), and you can start as follows)
|
||||
|
||||
```python
|
||||
# Export on ModelArts
|
||||
# (1) Perform a or b.
|
||||
# a. Set "enable_modelarts=True" on default_config.yaml file.
|
||||
# Set "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on default_config.yaml file.
|
||||
# Set "checkpoint_url='s3://dir_to_trained_ckpt/'" on default_config.yaml file.
|
||||
# Set "file_name='./squeezenet'" on default_config.yaml file.
|
||||
# Set "file_format='AIR'" on default_config.yaml file.
|
||||
# Set other parameters on default_config.yaml file you need.
|
||||
# b. Add "enable_modelarts=True" on the website UI interface.
|
||||
# Add "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on the website UI interface.
|
||||
# Add "checkpoint_url='s3://dir_to_trained_ckpt/'" on the website UI interface.
|
||||
# Add "file_name='./squeezenet'" on the website UI interface.
|
||||
# Add "file_format='AIR'" on the website UI interface.
|
||||
# Add other parameters on the website UI interface.
|
||||
# (2) Set the config_path="/path/yaml file" on the website UI interface.
|
||||
# (3) Set the code directory to "/path/squeezenet" on the website UI interface.
|
||||
# (4) Set the startup file to "export.py" on the website UI interface.
|
||||
# (5) Set the "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (6) Create your job.
|
||||
```
|
||||
|
||||
### Infer on Ascend310
|
||||
|
||||
Before performing inference, the mindir file must be exported by `export.py` script. We only provide an example of inference using MINDIR model.
|
||||
|
|
|
@ -16,9 +16,10 @@
|
|||
##############export checkpoint file into air , mindir and onnx models#################
|
||||
python export.py --net squeezenet --dataset cifar10 --checkpoint_path squeezenet_cifar10-120_1562.ckpt
|
||||
"""
|
||||
|
||||
import os
|
||||
import numpy as np
|
||||
from model_utils.config import config
|
||||
from model_utils.moxing_adapter import moxing_wrapper
|
||||
from mindspore import context, Tensor, load_checkpoint, load_param_into_net, export
|
||||
|
||||
if config.net_name == "squeezenet":
|
||||
|
@ -34,7 +35,13 @@ context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
|
|||
if config.device_target == "Ascend":
|
||||
context.set_context(device_id=config.device_id)
|
||||
|
||||
if __name__ == '__main__':
|
||||
def modelarts_pre_process():
|
||||
'''modelarts pre process function.'''
|
||||
config.file_name = os.path.join(config.output_path, config.file_name)
|
||||
|
||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||
def run_export():
|
||||
"""run export."""
|
||||
net = squeezenet(num_classes=num_classes)
|
||||
|
||||
param_dict = load_checkpoint(config.checkpoint_file_path)
|
||||
|
@ -42,3 +49,6 @@ if __name__ == '__main__':
|
|||
|
||||
input_data = Tensor(np.zeros([config.batch_size, 3, config.height, config.width], np.float32))
|
||||
export(net, input_data, file_name=config.file_name, file_format=config.file_format)
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_export()
|
||||
|
|
|
@ -470,6 +470,8 @@ mAP: 0.2244936111705981
|
|||
|
||||
### [Export MindIR](#contents)
|
||||
|
||||
Export MindIR on local
|
||||
|
||||
```shell
|
||||
python export.py --checkpoint_file_path [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT] --config_path [CONFIG_PATH]
|
||||
```
|
||||
|
@ -477,6 +479,30 @@ python export.py --checkpoint_file_path [CKPT_PATH] --file_name [FILE_NAME] --fi
|
|||
The ckpt_file parameter is required,
|
||||
`EXPORT_FORMAT` should be in ["AIR", "MINDIR"]
|
||||
|
||||
Export on ModelArts (If you want to run in modelarts, please check the official documentation of [modelarts](https://support.huaweicloud.com/modelarts/), and you can start as follows)
|
||||
|
||||
```python
|
||||
# Export on ModelArts
|
||||
# (1) Perform a or b.
|
||||
# a. Set "enable_modelarts=True" on default_config.yaml file.
|
||||
# Set "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on default_config.yaml file.
|
||||
# Set "checkpoint_url='s3://dir_to_trained_ckpt/'" on default_config.yaml file.
|
||||
# Set "file_name='./ssd'" on default_config.yaml file.
|
||||
# Set "file_format='AIR'" on default_config.yaml file.
|
||||
# Set other parameters on default_config.yaml file you need.
|
||||
# b. Add "enable_modelarts=True" on the website UI interface.
|
||||
# Add "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on the website UI interface.
|
||||
# Add "checkpoint_url='s3://dir_to_trained_ckpt/'" on the website UI interface.
|
||||
# Add "file_name='./ssd'" on the website UI interface.
|
||||
# Add "file_format='AIR'" on the website UI interface.
|
||||
# Add other parameters on the website UI interface.
|
||||
# (2) Set the config_path="/path/yaml file" on the website UI interface.
|
||||
# (3) Set the code directory to "/path/ssd" on the website UI interface.
|
||||
# (4) Set the startup file to "export.py" on the website UI interface.
|
||||
# (5) Set the "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (6) Create your job.
|
||||
```
|
||||
|
||||
### Infer on Ascend310
|
||||
|
||||
Before performing inference, the mindir file must bu exported by `export.py` script. We only provide an example of inference using MINDIR model.
|
||||
|
|
|
@ -395,6 +395,8 @@ mAP: 0.2244936111705981
|
|||
|
||||
### [导出MindIR](#contents)
|
||||
|
||||
本地导出mindir
|
||||
|
||||
```shell
|
||||
python export.py --checkpoint_file_path [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT] --config_path [CONFIG_PATH]
|
||||
```
|
||||
|
@ -402,6 +404,28 @@ python export.py --checkpoint_file_path [CKPT_PATH] --file_name [FILE_NAME] --fi
|
|||
参数ckpt_file为必填项,
|
||||
`EXPORT_FORMAT` 必须在 ["AIR", "MINDIR"]中选择。
|
||||
|
||||
ModelArts导出mindir
|
||||
|
||||
```python
|
||||
# (1) 把训练好的模型地方到桶的对应位置。
|
||||
# (2) 选址a或者b其中一种方式。
|
||||
# a. 设置 "enable_modelarts=True"
|
||||
# 设置 "checkpoint_file_path='/cache/checkpoint_path/model.ckpt" 在 yaml 文件。
|
||||
# 设置 "checkpoint_url=/The path of checkpoint in S3/" 在 yaml 文件。
|
||||
# 设置 "file_name='./ssd'"参数在yaml文件。
|
||||
# 设置 "file_format='AIR'" 参数在yaml文件。
|
||||
# b. 增加 "enable_modelarts=True" 参数在modearts的界面上。
|
||||
# 增加 "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" 参数在modearts的界面上。
|
||||
# 增加 "checkpoint_url=/The path of checkpoint in S3/" 参数在modearts的界面上。
|
||||
# 设置 "file_name='./ssd'"参数在modearts的界面上。
|
||||
# 设置 "file_format='AIR'" 参数在modearts的界面上。
|
||||
# (3) 设置网络配置文件的路径 "config_path=/The path of config in S3/"
|
||||
# (4) 在modelarts的界面上设置代码的路径 "/path/ssd"。
|
||||
# (5) 在modelarts的界面上设置模型的启动文件 "export.py" 。
|
||||
# 模型的输出路径"Output file path" 和模型的日志路径 "Job log path" 。
|
||||
# (6) 开始导出mindir。
|
||||
```
|
||||
|
||||
### 在Ascend310执行推理
|
||||
|
||||
在执行推理前,mindir文件必须通过`export.py`脚本导出。以下展示了使用minir模型执行推理的示例。
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
import os
|
||||
import numpy as np
|
||||
|
||||
import mindspore
|
||||
|
@ -20,13 +21,20 @@ from mindspore import context, Tensor
|
|||
from mindspore.train.serialization import load_checkpoint, load_param_into_net, export
|
||||
from src.ssd import SSD300, SsdInferWithDecoder, ssd_mobilenet_v2, ssd_mobilenet_v1_fpn, ssd_resnet50_fpn, ssd_vgg16
|
||||
from src.model_utils.config import config
|
||||
from src.model_utils.moxing_adapter import moxing_wrapper
|
||||
from src.box_utils import default_boxes
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
|
||||
if config.device_target == "Ascend":
|
||||
context.set_context(device_id=config.device_id)
|
||||
|
||||
if __name__ == '__main__':
|
||||
def modelarts_pre_process():
|
||||
'''modelarts pre process function.'''
|
||||
config.file_name = os.path.join(config.output_path, config.file_name)
|
||||
|
||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||
def run_export():
|
||||
"""run export."""
|
||||
if hasattr(config, 'num_ssd_boxes') and config.num_ssd_boxes == -1:
|
||||
num = 0
|
||||
h, w = config.img_shape
|
||||
|
@ -55,3 +63,6 @@ if __name__ == '__main__':
|
|||
input_shp = [config.batch_size, 3] + config.img_shape
|
||||
input_array = Tensor(np.random.uniform(-1.0, 1.0, size=input_shp), mindspore.float32)
|
||||
export(net, input_array, file_name=config.file_name, file_format=config.file_format)
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_export()
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
if [[ $# -lt 3 || $# -gt 4 ]]; then
|
||||
echo "Usage: bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [DVPP] [DEVICE_ID]
|
||||
if [[ $# -lt 4 || $# -gt 5 ]]; then
|
||||
echo "Usage: bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [DVPP] [CONFIG_PATH] [DEVICE_ID]
|
||||
DVPP is mandatory, and must choose from [DVPP|CPU], it's case-insensitive
|
||||
DEVICE_ID is optional, it can be set by environment variable device_id, otherwise the value is zero"
|
||||
exit 1
|
||||
|
@ -31,10 +31,11 @@ get_real_path(){
|
|||
model=$(get_real_path $1)
|
||||
data_path=$(get_real_path $2)
|
||||
DVPP=${3^^}
|
||||
cfg_path=$4
|
||||
|
||||
device_id=0
|
||||
if [ $# == 4 ]; then
|
||||
device_id=$4
|
||||
if [ $# == 5 ]; then
|
||||
device_id=$5
|
||||
fi
|
||||
|
||||
echo "mindir name: "$model
|
||||
|
@ -85,7 +86,7 @@ function infer()
|
|||
|
||||
function cal_acc()
|
||||
{
|
||||
python3.7 ../postprocess.py --result_path=./result_Files --img_path=$data_path --drop=True &> acc.log &
|
||||
python3.7 ../postprocess.py --result_path=./result_Files --img_path=$data_path --config_path=${cfg_path} --drop=True &> acc.log &
|
||||
}
|
||||
|
||||
compile_app
|
||||
|
|
|
@ -472,7 +472,7 @@ the steps below, this is a simple example:
|
|||
|
||||
#### Running on Ascend 310
|
||||
|
||||
Export MindIR
|
||||
Export MindIR on local
|
||||
|
||||
Before exporting, you need to modify the parameter in the configuration — checkpoint_file_path and batch_ Size . checkpoint_ file_ Path is the CKPT file path, batch_ Size is set to 1.
|
||||
|
||||
|
@ -483,6 +483,30 @@ python export.py --config_path=[CONFIG_PATH]
|
|||
The checkpoint_file_path parameter is required,
|
||||
`EXPORT_FORMAT` should be in ["AIR", "MINDIR"]
|
||||
|
||||
Export on ModelArts (If you want to run in modelarts, please check the official documentation of [modelarts](https://support.huaweicloud.com/modelarts/), and you can start as follows)
|
||||
|
||||
```python
|
||||
# Export on ModelArts
|
||||
# (1) Perform a or b.
|
||||
# a. Set "enable_modelarts=True" on default_config.yaml file.
|
||||
# Set "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on default_config.yaml file.
|
||||
# Set "checkpoint_url='s3://dir_to_trained_ckpt/'" on default_config.yaml file.
|
||||
# Set "file_name='./unet'" on default_config.yaml file.
|
||||
# Set "file_format='AIR'" on default_config.yaml file.
|
||||
# Set other parameters on default_config.yaml file you need.
|
||||
# b. Add "enable_modelarts=True" on the website UI interface.
|
||||
# Add "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on the website UI interface.
|
||||
# Add "checkpoint_url='s3://dir_to_trained_ckpt/'" on the website UI interface.
|
||||
# Add "file_name='./unet'" on the website UI interface.
|
||||
# Add "file_format='AIR'" on the website UI interface.
|
||||
# Add other parameters on the website UI interface.
|
||||
# (2) Set the config_path="/path/yaml file" on the website UI interface.
|
||||
# (3) Set the code directory to "/path/unet" on the website UI interface.
|
||||
# (4) Set the startup file to "export.py" on the website UI interface.
|
||||
# (5) Set the "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (6) Create your job.
|
||||
```
|
||||
|
||||
Before performing inference, the MINDIR file must be exported by export script on the 910 environment.
|
||||
|
||||
```shell
|
||||
|
|
|
@ -473,10 +473,34 @@ python eval.py --data_path=/path/to/data/ --checkpoint_file_path=/path/to/checkp
|
|||
|
||||
在执行导出前需要修改配置文件中的checkpoint_file_path和batch_size参数。checkpoint_file_path为ckpt文件路径,batch_size设置为1。
|
||||
|
||||
本地导出mindir
|
||||
|
||||
```shell
|
||||
python export.py --config_path=[CONFIG_PATH]
|
||||
```
|
||||
|
||||
ModelArts导出mindir
|
||||
|
||||
```python
|
||||
# (1) 把训练好的模型地方到桶的对应位置。
|
||||
# (2) 选址a或者b其中一种方式。
|
||||
# a. 设置 "enable_modelarts=True"
|
||||
# 设置 "checkpoint_file_path='/cache/checkpoint_path/model.ckpt" 在 yaml 文件。
|
||||
# 设置 "checkpoint_url=/The path of checkpoint in S3/" 在 yaml 文件。
|
||||
# 设置 "file_name='./unet'"参数在yaml文件。
|
||||
# 设置 "file_format='AIR'" 参数在yaml文件。
|
||||
# b. 增加 "enable_modelarts=True" 参数在modearts的界面上。
|
||||
# 增加 "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" 参数在modearts的界面上。
|
||||
# 增加 "checkpoint_url=/The path of checkpoint in S3/" 参数在modearts的界面上。
|
||||
# 设置 "file_name='./unet'"参数在modearts的界面上。
|
||||
# 设置 "file_format='AIR'" 参数在modearts的界面上。
|
||||
# (3) 设置网络配置文件的路径 "config_path=/The path of config in S3/"
|
||||
# (4) 在modelarts的界面上设置代码的路径 "/path/unet"。
|
||||
# (5) 在modelarts的界面上设置模型的启动文件 "export.py" 。
|
||||
# 模型的输出路径"Output file path" 和模型的日志路径 "Job log path" 。
|
||||
# (6) 开始导出mindir。
|
||||
```
|
||||
|
||||
在执行推理前,MINDIR文件必须在910上通过export.py文件导出。
|
||||
|
||||
```shell
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
import os
|
||||
import numpy as np
|
||||
|
||||
from mindspore import Tensor, export, load_checkpoint, load_param_into_net, context
|
||||
|
@ -22,13 +23,20 @@ from src.unet_nested import NestedUNet, UNet
|
|||
from src.utils import UnetEval
|
||||
from src.model_utils.config import config
|
||||
from src.model_utils.device_adapter import get_device_id
|
||||
from src.model_utils.moxing_adapter import moxing_wrapper
|
||||
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
|
||||
if config.device_target == "Ascend":
|
||||
context.set_context(device_id=get_device_id())
|
||||
|
||||
if __name__ == "__main__":
|
||||
def modelarts_pre_process():
|
||||
'''modelarts pre process function.'''
|
||||
config.file_name = os.path.join(config.output_path, config.file_name)
|
||||
|
||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||
def run_export():
|
||||
"""run export."""
|
||||
if config.model_name == 'unet_medical':
|
||||
net = UNetMedical(n_channels=config.num_channels, n_classes=config.num_classes)
|
||||
elif config.model_name == 'unet_nested':
|
||||
|
@ -46,3 +54,6 @@ if __name__ == "__main__":
|
|||
input_data = Tensor(np.ones([config.batch_size, config.num_channels, config.height, \
|
||||
config.width]).astype(np.float32))
|
||||
export(net, input_data, file_name=config.file_name, file_format=config.file_format)
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_export()
|
||||
|
|
|
@ -144,10 +144,10 @@ If you want to run in modelarts, please check the official documentation of [mod
|
|||
# Add "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on the website UI interface.
|
||||
# Add "checkpoint_url=/The path of checkpoint in S3/" on the website UI interface.
|
||||
# (3) Download nibabel and set pip-requirements.txt to code directory
|
||||
# (5) Set the code directory to "/path/unet3d" on the website UI interface.
|
||||
# (6) Set the startup file to "eval.py" on the website UI interface.
|
||||
# (7) Set the "Dataset path" and "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (8) Create your job.
|
||||
# (4) Set the code directory to "/path/unet3d" on the website UI interface.
|
||||
# (5) Set the startup file to "eval.py" on the website UI interface.
|
||||
# (6) Set the "Dataset path" and "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (7) Create your job.
|
||||
```
|
||||
|
||||
## [Script Description](#contents)
|
||||
|
|
|
@ -619,10 +619,35 @@ sh run_gpu.sh -t i -n 1 -i 1 -o {outputfile}
|
|||
|
||||
### [Export MindIR](#contents)
|
||||
|
||||
Export MindIR on local
|
||||
|
||||
```shell
|
||||
python export.py --checkpoint_file_path [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT]
|
||||
```
|
||||
|
||||
Export on ModelArts (If you want to run in modelarts, please check the official documentation of [modelarts](https://support.huaweicloud.com/modelarts/), and you can start as follows)
|
||||
|
||||
```python
|
||||
# Export on ModelArts
|
||||
# (1) Perform a or b.
|
||||
# a. Set "enable_modelarts=True" on default_config.yaml file.
|
||||
# Set "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on default_config.yaml file.
|
||||
# Set "checkpoint_url='s3://dir_to_trained_ckpt/'" on default_config.yaml file.
|
||||
# Set "file_name='./mass'" on default_config.yaml file.
|
||||
# Set "file_format='AIR'" on default_config.yaml file.
|
||||
# Set other parameters on default_config.yaml file you need.
|
||||
# b. Add "enable_modelarts=True" on the website UI interface.
|
||||
# Add "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on the website UI interface.
|
||||
# Add "checkpoint_url='s3://dir_to_trained_ckpt/'" on the website UI interface.
|
||||
# Add "file_name='./mass'" on the website UI interface.
|
||||
# Add "file_format='AIR'" on the website UI interface.
|
||||
# Add other parameters on the website UI interface.
|
||||
# (2) Set the code directory to "/path/mass" on the website UI interface.
|
||||
# (3) Set the startup file to "export.py" on the website UI interface.
|
||||
# (4) Set the "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (5) Create your job.
|
||||
```
|
||||
|
||||
The ckpt_file parameter is required,
|
||||
`EXPORT_FORMAT` should be in ["AIR", "MINDIR"]
|
||||
|
||||
|
|
|
@ -623,6 +623,8 @@ sh run_gpu.sh -t i -n 1 -i 1 -o {outputfile}
|
|||
|
||||
### [导出模型](#contents)
|
||||
|
||||
本地导出mindir
|
||||
|
||||
```shell
|
||||
python export.py --checkpoint_file_path [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT]
|
||||
```
|
||||
|
@ -630,6 +632,27 @@ python export.py --checkpoint_file_path [CKPT_PATH] --file_name [FILE_NAME] --fi
|
|||
参数checkpoint_file_path为必填项,
|
||||
`EXPORT_FORMAT` 必须在 ["AIR", "MINDIR"]中选择。
|
||||
|
||||
ModelArts导出mindir
|
||||
|
||||
```python
|
||||
# (1) 把训练好的模型地方到桶的对应位置。
|
||||
# (2) 选址a或者b其中一种方式。
|
||||
# a. 设置 "enable_modelarts=True"
|
||||
# 设置 "checkpoint_file_path='/cache/checkpoint_path/model.ckpt" 在 yaml 文件。
|
||||
# 设置 "checkpoint_url=/The path of checkpoint in S3/" 在 yaml 文件。
|
||||
# 设置 "file_name='./mass'"参数在yaml文件。
|
||||
# 设置 "file_format='AIR'" 参数在yaml文件。
|
||||
# b. 增加 "enable_modelarts=True" 参数在modearts的界面上。
|
||||
# 增加 "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" 参数在modearts的界面上。
|
||||
# 增加 "checkpoint_url=/The path of checkpoint in S3/" 参数在modearts的界面上。
|
||||
# 设置 "file_name='./mass'"参数在modearts的界面上。
|
||||
# 设置 "file_format='AIR'" 参数在modearts的界面上。
|
||||
# (3) 在modelarts的界面上设置代码的路径 "/path/mass"。
|
||||
# (4) 在modelarts的界面上设置模型的启动文件 "export.py" 。
|
||||
# 模型的输出路径"Output file path" 和模型的日志路径 "Job log path" 。
|
||||
# (5) 开始导出mindir。
|
||||
```
|
||||
|
||||
### 在Ascend310执行推理
|
||||
|
||||
在执行推理前,mindir文件必须通过`export.py`脚本导出。以下展示了使用minir模型执行推理的示例。
|
||||
|
|
|
@ -14,15 +14,16 @@
|
|||
# ============================================================================
|
||||
"""export checkpoint file into air models"""
|
||||
|
||||
import os
|
||||
import numpy as np
|
||||
|
||||
from mindspore import Tensor, context
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.train.serialization import export
|
||||
|
||||
from src.utils import Dictionary
|
||||
from src.utils.load_weights import load_infer_weights
|
||||
from src.model_utils.config import config
|
||||
from src.model_utils.moxing_adapter import moxing_wrapper
|
||||
from src.transformer.transformer_for_infer import TransformerInferModel
|
||||
|
||||
|
||||
|
@ -34,10 +35,14 @@ context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
|
|||
if config.device_target == "Ascend":
|
||||
context.set_context(device_id=config.device_id)
|
||||
|
||||
if __name__ == '__main__':
|
||||
vocab = Dictionary.load_from_persisted_dict(config.vocab_file)
|
||||
def modelarts_pre_process():
|
||||
'''modelarts pre process function.'''
|
||||
config.file_name = os.path.join(config.output_path, config.file_name)
|
||||
|
||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||
def run_export():
|
||||
"""run export."""
|
||||
get_config()
|
||||
dec_len = config.max_decode_length
|
||||
|
||||
tfm_model = TransformerInferModel(config=config, use_one_hot_embeddings=False)
|
||||
tfm_model.init_parameters_data()
|
||||
|
@ -72,3 +77,6 @@ if __name__ == '__main__':
|
|||
source_mask = Tensor(np.ones((1, config.seq_length)).astype(np.int32))
|
||||
|
||||
export(tfm_model, source_ids, source_mask, file_name=config.file_name, file_format=config.file_format)
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_export()
|
||||
|
|
|
@ -78,10 +78,11 @@ If you want to run in modelarts, please check the official documentation of [mod
|
|||
# Set other parameters on yaml file you need.
|
||||
# b. Add "enable_modelarts=True" on the website UI interface.
|
||||
# Add other parameters on the website UI interface.
|
||||
# (2) Set the code directory to "/path/textcnn" on the website UI interface.
|
||||
# (3) Set the startup file to "train.py" on the website UI interface.
|
||||
# (4) Set the "Dataset path" and "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (5) Create your job.
|
||||
# (2) Set the config_path="/path/yaml" on the website UI interface.
|
||||
# (3) Set the code directory to "/path/textcnn" on the website UI interface.
|
||||
# (4) Set the startup file to "train.py" on the website UI interface.
|
||||
# (5) Set the "Dataset path" and "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (6) Create your job.
|
||||
|
||||
# run evaluation on modelarts example
|
||||
# (1) Copy or upload your trained model to S3 bucket.
|
||||
|
@ -92,10 +93,11 @@ If you want to run in modelarts, please check the official documentation of [mod
|
|||
# b. Add "enable_modelarts=True" on the website UI interface.
|
||||
# Add "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on the website UI interface.
|
||||
# Add "checkpoint_url=/The path of checkpoint in S3/" on the website UI interface.
|
||||
# (3) Set the code directory to "/path/textcnn" on the website UI interface.
|
||||
# (4) Set the startup file to "eval.py" on the website UI interface.
|
||||
# (5) Set the "Dataset path" and "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (6) Create your job.
|
||||
# (3) Set the config_path="/path/yaml" on the website UI interface
|
||||
# (4) Set the code directory to "/path/textcnn" on the website UI interface.
|
||||
# (5) Set the startup file to "eval.py" on the website UI interface.
|
||||
# (6) Set the "Dataset path" and "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (7) Create your job.
|
||||
```
|
||||
|
||||
# [Script Description](#contents)
|
||||
|
@ -184,7 +186,7 @@ For more configuration details, please refer the script `*.yaml`.
|
|||
Before running the command below, please check the checkpoint path used for evaluation. Please set the checkpoint path to be the absolute full path, e.g., "username/textcnn/ckpt/train_textcnn.ckpt".
|
||||
|
||||
```python
|
||||
# need set config_path in config.py file and set data_path, checkpoint_file_path in yaml file
|
||||
# need set config_path and set data_path in yaml file, checkpoint_file_path in yaml file
|
||||
python eval.py > eval.log 2>&1 &
|
||||
OR
|
||||
sh scripts/run_eval.sh checkpoint_file_path dataset
|
||||
|
@ -199,13 +201,41 @@ For more configuration details, please refer the script `*.yaml`.
|
|||
|
||||
## [Export MindIR](#contents)
|
||||
|
||||
Export on local
|
||||
|
||||
```shell
|
||||
python export.py --checkpoint_file_path [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT]
|
||||
python export.py --checkpoint_file_path [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT] --config_path [CONFIG_FILE]
|
||||
```
|
||||
|
||||
The ckpt_file parameter is required,
|
||||
The checkpoint_file_path parameter is required,
|
||||
`EXPORT_FORMAT` should be in ["AIR", "MINDIR"]
|
||||
|
||||
Export on ModelArts (If you want to run in modelarts, please check the official documentation of [modelarts](https://support.huaweicloud.com/modelarts/), and you can start as follows)
|
||||
|
||||
```python
|
||||
# Export on ModelArts
|
||||
# (1) Perform a or b.
|
||||
# a. Set "enable_modelarts=True" on default_config.yaml file.
|
||||
# Set "data_path='/cache/data/' " on default_config.yaml file.
|
||||
# Set "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on default_config.yaml file.
|
||||
# Set "checkpoint_url='s3://dir_to_trained_ckpt/'" on default_config.yaml file.
|
||||
# Set "file_name='./textcnn'" on default_config.yaml file.
|
||||
# Set "file_format='AIR'" on default_config.yaml file.
|
||||
# Set other parameters on default_config.yaml file you need.
|
||||
# b. Add "enable_modelarts=True" on the website UI interface.
|
||||
# Add "data_path='/cache/data/' " on default_config.yaml file.
|
||||
# Add "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on the website UI interface.
|
||||
# Add "checkpoint_url='s3://dir_to_trained_ckpt/'" on the website UI interface.
|
||||
# Add "file_name='./textcnn'" on the website UI interface.
|
||||
# Add "file_format='AIR'" on the website UI interface.
|
||||
# Add other parameters on the website UI interface.
|
||||
# (2) Set the config_path="/path/yaml file" on the website UI interface.
|
||||
# (3) Set the code directory to "/path/textcnn" on the website UI interface.
|
||||
# (4) Set the startup file to "export.py" on the website UI interface.
|
||||
# (5) Set the "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (6) Create your job.
|
||||
```
|
||||
|
||||
## [Inference Process](#contents)
|
||||
|
||||
### Usage
|
||||
|
|
|
@ -16,11 +16,12 @@
|
|||
##############export checkpoint file into air, onnx, mindir models#################
|
||||
python export.py
|
||||
"""
|
||||
import os
|
||||
import numpy as np
|
||||
|
||||
from mindspore import Tensor, load_checkpoint, load_param_into_net, export, context
|
||||
|
||||
from model_utils.config import config
|
||||
from model_utils.moxing_adapter import moxing_wrapper
|
||||
from src.textcnn import TextCNN
|
||||
from src.dataset import MovieReview, SST2, Subjectivity
|
||||
|
||||
|
@ -28,8 +29,13 @@ context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
|
|||
if config.device_target == "Ascend":
|
||||
context.set_context(device_id=config.device_id)
|
||||
|
||||
if __name__ == '__main__':
|
||||
def modelarts_pre_process():
|
||||
'''modelarts pre process function.'''
|
||||
config.file_name = os.path.join(config.output_path, config.file_name)
|
||||
|
||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||
def run_export():
|
||||
"""run export."""
|
||||
if config.dataset == 'MR':
|
||||
instance = MovieReview(root_dir=config.data_path, maxlen=config.word_len, split=0.9)
|
||||
elif config.dataset == 'SUBJ':
|
||||
|
@ -47,3 +53,6 @@ if __name__ == '__main__':
|
|||
|
||||
input_arr = Tensor(np.ones([config.batch_size, config.word_len], np.int32))
|
||||
export(net, input_arr, file_name=config.file_name, file_format=config.file_format)
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_export()
|
||||
|
|
|
@ -70,7 +70,7 @@ Dataset used: [COCO2017](<http://images.cocodataset.org/>)
|
|||
|
||||
```
|
||||
|
||||
And change the COCO_ROOT and other settings you need in `config.py`. The directory structure is as follows:
|
||||
And change the COCO_ROOT and other settings you need in `default_config.yaml`. The directory structure is as follows:
|
||||
|
||||
```python
|
||||
.
|
||||
|
@ -238,13 +238,38 @@ python eval.py --device_id 0 --dataset coco --checkpoint_path LOG4/ssd-500_458.c
|
|||
|
||||
### Export MindIR
|
||||
|
||||
Export MindIR on local
|
||||
|
||||
```shell
|
||||
python export.py --ckpt_file [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT]
|
||||
python export.py --checkpoint_file_path [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT]
|
||||
```
|
||||
|
||||
The ckpt_file parameter is required,
|
||||
The checkpoint_file_path parameter is required,
|
||||
`FILE_FORMAT` should be in ["AIR", "MINDIR"]
|
||||
|
||||
Export on ModelArts (If you want to run in modelarts, please check the official documentation of [modelarts](https://support.huaweicloud.com/modelarts/), and you can start as follows)
|
||||
|
||||
```python
|
||||
# Export on ModelArts
|
||||
# (1) Perform a or b.
|
||||
# a. Set "enable_modelarts=True" on default_config.yaml file.
|
||||
# Set "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on default_config.yaml file.
|
||||
# Set "checkpoint_url='s3://dir_to_trained_ckpt/'" on default_config.yaml file.
|
||||
# Set "file_name='./ssd_ghostnet'" on default_config.yaml file.
|
||||
# Set "file_format='AIR'" on default_config.yaml file.
|
||||
# Set other parameters on default_config.yaml file you need.
|
||||
# b. Add "enable_modelarts=True" on the website UI interface.
|
||||
# Add "checkpoint_file_path='/cache/checkpoint_path/model.ckpt'" on the website UI interface.
|
||||
# Add "checkpoint_url='s3://dir_to_trained_ckpt/'" on the website UI interface.
|
||||
# Add "file_name='./ssd_ghostnet'" on the website UI interface.
|
||||
# Add "file_format='AIR'" on the website UI interface.
|
||||
# Add other parameters on the website UI interface.
|
||||
# (2) Set the code directory to "/path/ssd_ghostnet" on the website UI interface.
|
||||
# (3) Set the startup file to "export.py" on the website UI interface.
|
||||
# (4) Set the "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (5) Create your job.
|
||||
```
|
||||
|
||||
### Infer on Ascend310
|
||||
|
||||
Before performing inference, the mindir file must be exported by `export.py` script. We only provide an example of inference using MINDIR model.
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# ============================================================================
|
||||
"""export"""
|
||||
|
||||
import os
|
||||
import numpy as np
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
|
@ -21,10 +22,17 @@ from mindspore.train.serialization import load_checkpoint, load_param_into_net,
|
|||
|
||||
from src.ssd_ghostnet import SSD300, ssd_ghostnet
|
||||
from src.model_utils.config import config
|
||||
from src.model_utils.moxing_adapter import moxing_wrapper
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, device_id=config.device_id)
|
||||
|
||||
if __name__ == "__main__":
|
||||
def modelarts_pre_process():
|
||||
'''modelarts pre process function.'''
|
||||
config.file_name = os.path.join(config.output_path, config.file_name)
|
||||
|
||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||
def run_export():
|
||||
"""run export."""
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
|
||||
# define net
|
||||
net = SSD300(ssd_ghostnet(), is_training=False)
|
||||
|
@ -35,3 +43,6 @@ if __name__ == "__main__":
|
|||
input_shape = config.img_shape
|
||||
inputs = np.ones([config.batch_size, 3, input_shape[0], input_shape[1]]).astype(np.float32)
|
||||
export(net, Tensor(inputs), file_name=config.file_name, file_format=config.file_format)
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_export()
|
||||
|
|
Loading…
Reference in New Issue