forked from mindspore-Ecosystem/mindspore
!19283 modify export for autodis\simple_pose\tinydarknet\textrcnn
Merge pull request !19283 from 郑彬/textrcnn
This commit is contained in:
commit
577a2a1be6
|
@ -373,10 +373,32 @@ Total boxes: 104125
|
|||
|
||||
### [Export MindIR](#contents)
|
||||
|
||||
- Export on local
|
||||
|
||||
```shell
|
||||
python export.py
|
||||
```
|
||||
|
||||
- Export on ModelArts (If you want to run in modelarts, please check the official documentation of [modelarts](https://support.huaweicloud.com/modelarts/), and you can start as follows)
|
||||
|
||||
```python
|
||||
# (1) Upload the code folder to S3 bucket.
|
||||
# (2) Click to "create training task" on the website UI interface.
|
||||
# (3) Set the code directory to "/{path}/simple_pose" on the website UI interface.
|
||||
# (4) Set the startup file to /{path}/simple_pose/export.py" on the website UI interface.
|
||||
# (5) Perform a .
|
||||
# a. setting parameters in /{path}/simple_pose/default_config.yaml.
|
||||
# 1. Set ”enable_modelarts: True“
|
||||
# 2. Set “TEST.MODEL_FILE: ./{path}/*.ckpt”('TEST.MODEL_FILE' indicates the path of the weight file to be exported relative to the file `export.py`, and the weight file must be included in the code directory.)
|
||||
# 3. Set ”EXPORT.FILE_NAME: simple_pose“
|
||||
# 4. Set ”EXPORT.FILE_FORMAT:MINDIR“
|
||||
# (7) Check the "data storage location" on the website UI interface and set the "Dataset path" path (This step is useless, but necessary.).
|
||||
# (8) Set the "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (9) Under the item "resource pool selection", select the specification of a single card.
|
||||
# (10) Create your job.
|
||||
# You will see simple_pose.mindir under {Output file path}.
|
||||
```
|
||||
|
||||
The `TEST.MODEL_FILE` parameter is required
|
||||
`FILE_FORMAT` should be in ["AIR", "MINDIR"]
|
||||
|
||||
|
|
|
@ -20,8 +20,17 @@ from mindspore.train.serialization import load_checkpoint, load_param_into_net,
|
|||
from src.model import get_pose_net
|
||||
from src.model_utils.config import config
|
||||
from src.model_utils.device_adapter import get_device_id
|
||||
from src.model_utils.moxing_adapter import moxing_wrapper
|
||||
|
||||
if __name__ == '__main__':
|
||||
def modelarts_pre_process():
|
||||
'''modelarts pre process function.'''
|
||||
config.EXPORT.FILE_NAME = os.path.join(config.output_path, config.EXPORT.FILE_NAME)
|
||||
config.TEST.MODEL_FILE = os.path.join(os.path.abspath(os.path.dirname(__file__)), config.TEST.MODEL_FILE)
|
||||
|
||||
|
||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||
def run_export():
|
||||
"""export function"""
|
||||
# set context
|
||||
device_id = get_device_id()
|
||||
context.set_context(mode=context.GRAPH_MODE,
|
||||
|
@ -39,3 +48,6 @@ if __name__ == '__main__':
|
|||
input_shape = [config.TEST.BATCH_SIZE, 3, config.MODEL.IMAGE_SIZE[1], config.MODEL.IMAGE_SIZE[0]]
|
||||
input_ids = Tensor(np.zeros(input_shape), float32)
|
||||
export(model, input_ids, file_name=config.EXPORT.FILE_NAME, file_format=config.EXPORT.FILE_FORMAT)
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_export()
|
||||
|
|
|
@ -326,11 +326,38 @@ For more configuration details, please refer the script `imagenet_config.yaml`.
|
|||
|
||||
### Export MindIR
|
||||
|
||||
- Export on local
|
||||
|
||||
```shell
|
||||
# Ascend310 inference
|
||||
python export.py --dataset [DATASET] --file_name [FILE_NAME] --file_format [EXPORT_FORMAT]
|
||||
```
|
||||
|
||||
- Export on ModelArts (If you want to run in modelarts, please check the official documentation of [modelarts](https://support.huaweicloud.com/modelarts/), and you can start as follows)
|
||||
|
||||
```python
|
||||
# (1) Upload the code folder to S3 bucket.
|
||||
# (2) Click to "create training task" on the website UI interface.
|
||||
# (3) Set the code directory to "/{path}/tinydarknet" on the website UI interface.
|
||||
# (4) Set the startup file to /{path}/tinydarknet/export.py" on the website UI interface.
|
||||
# (5) Perform a or b.
|
||||
# a. setting parameters in /{path}/tinydarknet/default_config.yaml.
|
||||
# 1. Set ”enable_modelarts: True“
|
||||
# 2. Set “checkpoint_path: ./{path}/*.ckpt”('checkpoint_path' indicates the path of the weight file to be exported relative to the file `export.py`, and the weight file must be included in the code directory.)
|
||||
# 3. Set ”file_name: tinydarknet“
|
||||
# 4. Set ”file_format:MINDIR“
|
||||
# b. adding on the website UI interface.
|
||||
# 1. Add ”enable_modelarts=True“
|
||||
# 2. Add “checkpoint_path=./{path}/*.ckpt”('checkpoint_path' indicates the path of the weight file to be exported relative to the file `export.py`, and the weight file must be included in the code directory.)
|
||||
# 3. Add ”file_name=tinydarknet“
|
||||
# 4. Add ”file_format=MINDIR“
|
||||
# (7) Check the "data storage location" on the website UI interface and set the "Dataset path" path (This step is useless, but necessary.).
|
||||
# (8) Set the "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (9) Under the item "resource pool selection", select the specification of a single card.
|
||||
# (10) Create your job.
|
||||
# You will see tinydarknet.mindir under {Output file path}.
|
||||
```
|
||||
|
||||
The parameter does not have the ckpt_file option. Please store the ckpt file according to the path of the parameter `checkpoint_path` in `imagenet_config.yaml`.
|
||||
`EXPORT_FORMAT` should be in ["AIR", "MINDIR"]
|
||||
|
||||
|
|
|
@ -333,10 +333,37 @@ Tiny-DarkNet是Joseph Chet Redmon等人提出的一个16层的针对于经典的
|
|||
|
||||
### 导出MindIR
|
||||
|
||||
- 在本地导出
|
||||
|
||||
```shell
|
||||
python export.py --dataset [DATASET] --file_name [FILE_NAME] --file_format [EXPORT_FORMAT]
|
||||
```
|
||||
|
||||
- 在ModelArts上导出
|
||||
|
||||
```python
|
||||
# (1) 上传你的代码到 s3 桶上
|
||||
# (2) 在ModelArts上创建训练任务
|
||||
# (3) 选择代码目录 /{path}/tinydarknet
|
||||
# (4) 选择启动文件 /{path}/tinydarknet/export.py
|
||||
# (5) 执行a或b
|
||||
# a. 在 /{path}/tinydarknet/default_config.yaml 文件中设置参数
|
||||
# 1. 设置 ”enable_modelarts: True“
|
||||
# 2. 设置 “checkpoint_path: ./{path}/*.ckpt”('checkpoint_path' 指待导出的'*.ckpt'权重文件相对于`export.py`的路径, 且权重文件必须包含在代码目录下)
|
||||
# 3. 设置 ”file_name: tinydarknet“
|
||||
# 4. 设置 ”file_format:MINDIR“
|
||||
# b. 在 网页上设置
|
||||
# 1. 添加 ”enable_modelarts=True“
|
||||
# 2. 添加 “checkpoint_path=./{path}/*.ckpt”(('checkpoint_path' 指待导出的'*.ckpt'权重文件相对于`export.py`的路径, 且权重文件必须包含在代码目录下)
|
||||
# 3. 添加 ”file_name=tinydarknet“
|
||||
# 4. 添加 ”file_format=MINDIR“
|
||||
# (7) 在网页上勾选数据存储位置,设置“训练数据集”路径(这一步不起作用,但必须要有)
|
||||
# (8) 在网页上设置“训练输出文件路径”、“作业日志路径”
|
||||
# (9) 在网页上的’资源池选择‘项目下, 选择单卡规格的资源
|
||||
# (10) 创建训练作业
|
||||
# 你将在{Output file path}下看到 'tinydarknet.mindir'文件
|
||||
```
|
||||
|
||||
参数没有ckpt_file选项,ckpt文件请按照`imagenet_config.yaml`中参数`checkpoint_path`的路径存放。
|
||||
`EXPORT_FORMAT` 可选 ["AIR", "MINDIR"].
|
||||
|
||||
|
|
|
@ -24,8 +24,18 @@ from mindspore.train.serialization import load_checkpoint, load_param_into_net,
|
|||
|
||||
from src.model_utils.config import config
|
||||
from src.tinydarknet import TinyDarkNet
|
||||
from src.model_utils.moxing_adapter import moxing_wrapper
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
def modelarts_pre_process():
|
||||
'''modelarts pre process function.'''
|
||||
config.file_name = os.path.join(config.output_path, config.file_name)
|
||||
config.checkpoint_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), config.checkpoint_path)
|
||||
|
||||
|
||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||
def run_export():
|
||||
"""export function"""
|
||||
if config.dataset_name != 'imagenet':
|
||||
raise ValueError("Dataset is not support.")
|
||||
|
||||
|
@ -37,3 +47,6 @@ if __name__ == '__main__':
|
|||
|
||||
input_arr = Tensor(np.random.uniform(0.0, 1.0, size=[config.batch_size, 3, 224, 224]), ms.float32)
|
||||
export(net, input_arr, file_name=config.file_name, file_format=config.file_format)
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_export()
|
||||
|
|
|
@ -39,6 +39,11 @@ ckpt_file: ''
|
|||
file_name: 'textrcnn'
|
||||
file_format: "MINDIR"
|
||||
|
||||
# postprocess and result_path related
|
||||
pre_result_path: "./preprocess_result"
|
||||
label_path: "./preprocess_Result/label_ids.npy"
|
||||
result_path: "./result_Files"
|
||||
|
||||
---
|
||||
|
||||
# Help description for each configuration
|
||||
|
|
|
@ -20,7 +20,17 @@ from mindspore import Tensor, context, load_checkpoint, load_param_into_net, exp
|
|||
from src.textrcnn import textrcnn
|
||||
from src.model_utils.config import config
|
||||
from src.model_utils.device_adapter import get_device_id
|
||||
from src.model_utils.moxing_adapter import moxing_wrapper
|
||||
|
||||
|
||||
def modelarts_pre_process():
|
||||
'''modelarts pre process function.'''
|
||||
config.file_name = os.path.join(config.output_path, config.file_name)
|
||||
config.ckpt_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), config.ckpt_file)
|
||||
config.preprocess_path = config.data_path
|
||||
|
||||
|
||||
@moxing_wrapper(pre_process=modelarts_pre_process)
|
||||
def run_export():
|
||||
'''export function.'''
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, device_id=get_device_id())
|
||||
|
|
|
@ -14,27 +14,22 @@
|
|||
# ============================================================================
|
||||
"""postprocess"""
|
||||
import os
|
||||
import argparse
|
||||
import numpy as np
|
||||
|
||||
from mindspore.nn.metrics import Accuracy
|
||||
from src.model_utils.config import config as cfg
|
||||
|
||||
parser = argparse.ArgumentParser(description='postprocess')
|
||||
parser.add_argument('--label_path', type=str, default="./preprocess_Result/label_ids.npy")
|
||||
parser.add_argument('--result_path', type=str, default="./result_Files")
|
||||
args = parser.parse_args()
|
||||
|
||||
def get_acc():
|
||||
'''calculate accuracy'''
|
||||
metric = Accuracy()
|
||||
metric.clear()
|
||||
label_list = np.load(args.label_path, allow_pickle=True)
|
||||
file_num = len(os.listdir(args.result_path))
|
||||
label_list = np.load(cfg.label_path, allow_pickle=True)
|
||||
file_num = len(os.listdir(cfg.result_path))
|
||||
|
||||
for i in range(file_num):
|
||||
f_name = "textcrnn_bs" + str(cfg.batch_size) + "_" + str(i) + "_0.bin"
|
||||
pred = np.fromfile(os.path.join(args.result_path, f_name), np.float16)
|
||||
pred = np.fromfile(os.path.join(cfg.result_path, f_name), np.float16)
|
||||
pred = pred.reshape(cfg.batch_size, int(pred.shape[0]/cfg.batch_size))
|
||||
metric.update(pred, label_list[i])
|
||||
acc = metric.eval()
|
||||
|
|
|
@ -14,20 +14,16 @@
|
|||
# ============================================================================
|
||||
"""preprocess"""
|
||||
import os
|
||||
import argparse
|
||||
import numpy as np
|
||||
|
||||
from src.model_utils.config import config as cfg
|
||||
from src.dataset import create_dataset
|
||||
|
||||
parser = argparse.ArgumentParser(description='preprocess')
|
||||
parser.add_argument('--result_path', type=str, default="./preprocess_Result")
|
||||
args = parser.parse_args()
|
||||
|
||||
def get_bin():
|
||||
'''generate bin files.'''
|
||||
ds_eval = create_dataset(cfg.preprocess_path, cfg.batch_size, False)
|
||||
img_path = os.path.join(args.result_path, "00_feature")
|
||||
img_path = os.path.join(cfg.pre_result_path, "00_feature")
|
||||
os.makedirs(img_path)
|
||||
label_list = []
|
||||
|
||||
|
@ -38,7 +34,7 @@ def get_bin():
|
|||
data["feature"].tofile(file_path)
|
||||
label_list.append(data["label"])
|
||||
|
||||
np.save(os.path.join(args.result_path, "label_ids.npy"), label_list)
|
||||
np.save(os.path.join(cfg.pre_result_path, "label_ids.npy"), label_list)
|
||||
print("=" * 20, "bin files finished", "=" * 20)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -198,10 +198,37 @@ Parameters for both training and evaluation can be set in `default_config.yaml`
|
|||
|
||||
### [Export MindIR](#contents)
|
||||
|
||||
- Export on local
|
||||
|
||||
```shell
|
||||
python export.py --ckpt_file [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT]
|
||||
```
|
||||
|
||||
- Export on ModelArts (If you want to run in modelarts, please check the official documentation of [modelarts](https://support.huaweicloud.com/modelarts/), and you can start as follows)
|
||||
|
||||
```python
|
||||
# (1) Upload the code folder to S3 bucket.
|
||||
# (2) Click to "create training task" on the website UI interface.
|
||||
# (3) Set the code directory to "/{path}/textrcnn" on the website UI interface.
|
||||
# (4) Set the startup file to /{path}/textrcnn/export.py" on the website UI interface.
|
||||
# (5) Perform a or b.
|
||||
# a. setting parameters in /{path}/textrcnn/default_config.yaml.
|
||||
# 1. Set ”enable_modelarts: True“
|
||||
# 2. Set “ckpt_file: ./{path}/*.ckpt”('ckpt_file' indicates the path of the weight file to be exported relative to the file `export.py`, and the weight file must be included in the code directory.)
|
||||
# 3. Set ”file_name: textrcnn“
|
||||
# 4. Set ”file_format:MINDIR“
|
||||
# b. adding on the website UI interface.
|
||||
# 1. Add ”enable_modelarts=True“
|
||||
# 2. Add “ckpt_file=./{path}/*.ckpt”('ckpt_file' indicates the path of the weight file to be exported relative to the file `export.py`, and the weight file must be included in the code directory.)
|
||||
# 3. Add ”file_name=textrcnn“
|
||||
# 4. Add ”file_format=MINDIR“
|
||||
# (7) Check the "data storage location" on the website UI interface and set the "Dataset path" path.
|
||||
# (8) Set the "Output file path" and "Job log path" to your path on the website UI interface.
|
||||
# (9) Under the item "resource pool selection", select the specification of a single card.
|
||||
# (10) Create your job.
|
||||
# You will see textrcnn.mindir under {Output file path}.
|
||||
```
|
||||
|
||||
The ckpt_file parameter is required,
|
||||
`EXPORT_FORMAT` should be in ["AIR", "MINDIR"]
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ function preprocess_data()
|
|||
rm -rf ./preprocess_Result
|
||||
fi
|
||||
mkdir preprocess_Result
|
||||
python3.7 ../preprocess.py --result_path=./preprocess_Result
|
||||
python3.7 ../preprocess.py --result_path=./preprocess_Result > preprocess.log 2>&1
|
||||
}
|
||||
|
||||
function compile_app()
|
||||
|
|
|
@ -26,6 +26,10 @@ batch_size: 16000
|
|||
ckpt_file: ''
|
||||
file_name: "autodis"
|
||||
file_format: "AIR"
|
||||
# 310infer related
|
||||
dataset_path: ""
|
||||
result_path: "./result_Files"
|
||||
label_path: ""
|
||||
# Dataset related
|
||||
DataConfig:
|
||||
data_vocab_size: 184965
|
||||
|
|
|
@ -14,26 +14,21 @@
|
|||
# ============================================================================
|
||||
"""postprocess."""
|
||||
import os
|
||||
import argparse
|
||||
import numpy as np
|
||||
from mindspore import Tensor
|
||||
from src.autodis import AUCMetric
|
||||
from src.model_utils.config import train_config
|
||||
from src.model_utils.config import config, train_config
|
||||
|
||||
parser = argparse.ArgumentParser(description='postprocess')
|
||||
parser.add_argument('--result_path', type=str, default="./result_Files", help='result path')
|
||||
parser.add_argument('--label_path', type=str, default=None, help='label path')
|
||||
args_opt, _ = parser.parse_known_args()
|
||||
|
||||
def get_acc():
|
||||
''' get accuracy '''
|
||||
auc_metric = AUCMetric()
|
||||
files = os.listdir(args_opt.label_path)
|
||||
files = os.listdir(config.label_path)
|
||||
batch_size = train_config.batch_size
|
||||
|
||||
for f in files:
|
||||
rst_file = os.path.join(args_opt.result_path, f.split('.')[0] + '_0.bin')
|
||||
label_file = os.path.join(args_opt.label_path, f)
|
||||
rst_file = os.path.join(config.result_path, f.split('.')[0] + '_0.bin')
|
||||
label_file = os.path.join(config.label_path, f)
|
||||
|
||||
logit = Tensor(np.fromfile(rst_file, np.float32).reshape(batch_size, 1))
|
||||
label = Tensor(np.fromfile(label_file, np.float32).reshape(batch_size, 1))
|
||||
|
|
|
@ -14,25 +14,19 @@
|
|||
# ============================================================================
|
||||
"""preprocess."""
|
||||
import os
|
||||
import argparse
|
||||
|
||||
from src.model_utils.config import data_config, train_config
|
||||
from src.model_utils.config import config, data_config, train_config
|
||||
from src.dataset import create_dataset, DataType
|
||||
|
||||
parser = argparse.ArgumentParser(description='preprocess.')
|
||||
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
|
||||
parser.add_argument('--result_path', type=str, default='./preprocess_Result', help='Result path')
|
||||
args_opt, _ = parser.parse_known_args()
|
||||
|
||||
|
||||
def generate_bin():
|
||||
'''generate bin files'''
|
||||
ds = create_dataset(args_opt.dataset_path, train_mode=False,
|
||||
ds = create_dataset(config.dataset_path, train_mode=False,
|
||||
epochs=1, batch_size=train_config.batch_size,
|
||||
data_type=DataType(data_config.data_format))
|
||||
batch_ids_path = os.path.join(args_opt.result_path, "00_batch_ids")
|
||||
batch_wts_path = os.path.join(args_opt.result_path, "01_batch_wts")
|
||||
labels_path = os.path.join(args_opt.result_path, "02_labels")
|
||||
batch_ids_path = os.path.join(config.result_path, "00_batch_ids")
|
||||
batch_wts_path = os.path.join(config.result_path, "01_batch_wts")
|
||||
labels_path = os.path.join(config.result_path, "02_labels")
|
||||
|
||||
os.makedirs(batch_ids_path)
|
||||
os.makedirs(batch_wts_path)
|
||||
|
|
Loading…
Reference in New Issue