forked from mindspore-Ecosystem/mindspore
fix bug dataset.py
This commit is contained in:
parent
61c96a444f
commit
10714cde2c
|
@ -14,10 +14,6 @@
|
||||||
- [评估过程](#评估过程)
|
- [评估过程](#评估过程)
|
||||||
- [启动](#启动-1)
|
- [启动](#启动-1)
|
||||||
- [结果](#结果-1)
|
- [结果](#结果-1)
|
||||||
- [导出过程](#导出过程)
|
|
||||||
- [导出](#导出)
|
|
||||||
- [推理过程](#推理过程)
|
|
||||||
- [推理](#推理)
|
|
||||||
- [模型说明](#模型说明)
|
- [模型说明](#模型说明)
|
||||||
- [训练性能](#训练性能)
|
- [训练性能](#训练性能)
|
||||||
- [随机情况的描述](#随机情况的描述)
|
- [随机情况的描述](#随机情况的描述)
|
||||||
|
@ -64,7 +60,6 @@ ShuffleNetV1的核心部分被分成三个阶段,每个阶段重复堆积了
|
||||||
├─run_standalone_train.sh # Ascend环境下的单卡训练脚本
|
├─run_standalone_train.sh # Ascend环境下的单卡训练脚本
|
||||||
├─run_distribute_train.sh # Ascend环境下的八卡并行训练脚本
|
├─run_distribute_train.sh # Ascend环境下的八卡并行训练脚本
|
||||||
├─run_eval.sh # Ascend环境下的评估脚本
|
├─run_eval.sh # Ascend环境下的评估脚本
|
||||||
├─run_infer_310.sh # Ascend 310 推理shell脚本
|
|
||||||
├─src
|
├─src
|
||||||
├─dataset.py # 数据预处理
|
├─dataset.py # 数据预处理
|
||||||
├─shufflenetv1.py # 网络模型定义
|
├─shufflenetv1.py # 网络模型定义
|
||||||
|
@ -78,7 +73,6 @@ ShuffleNetV1的核心部分被分成三个阶段,每个阶段重复堆积了
|
||||||
├─default_config.yaml # 参数文件
|
├─default_config.yaml # 参数文件
|
||||||
├─train.py # 网络训练脚本
|
├─train.py # 网络训练脚本
|
||||||
├─export.py # 模型格式转换脚本
|
├─export.py # 模型格式转换脚本
|
||||||
├─postprogress.py # 310推理后处理脚本
|
|
||||||
└─eval.py # 网络评估脚本
|
└─eval.py # 网络评估脚本
|
||||||
└─mindspore_hub_conf.py # hub配置脚本
|
└─mindspore_hub_conf.py # hub配置脚本
|
||||||
```
|
```
|
||||||
|
@ -182,6 +176,7 @@ result:{'Loss': 2.0479587888106323, 'Top_1_Acc': 0.7385817307692307, 'Top_5_Acc'
|
||||||
|
|
||||||
# (1) 选择a(修改yaml文件参数)或者b(ModelArts创建训练作业修改参数)其中一种方式。
|
# (1) 选择a(修改yaml文件参数)或者b(ModelArts创建训练作业修改参数)其中一种方式。
|
||||||
# a. 设置 "enable_modelarts=True" 。
|
# a. 设置 "enable_modelarts=True" 。
|
||||||
|
# 设置 "is_distributed=True"
|
||||||
# 设置 "save_ckpt_path=/cache/train/outputs_imagenet/"
|
# 设置 "save_ckpt_path=/cache/train/outputs_imagenet/"
|
||||||
# 设置 "train_dataset_path=/cache/data/train/train_dataset/"
|
# 设置 "train_dataset_path=/cache/data/train/train_dataset/"
|
||||||
# 设置 "resume=/cache/data/train/train_predtrained/pred file name" 如果没有预训练权重 resume=""
|
# 设置 "resume=/cache/data/train/train_predtrained/pred file name" 如果没有预训练权重 resume=""
|
||||||
|
@ -216,36 +211,6 @@ result:{'Loss': 2.0479587888106323, 'Top_1_Acc': 0.7385817307692307, 'Top_5_Acc'
|
||||||
# (7) 开始模型的推理。
|
# (7) 开始模型的推理。
|
||||||
```
|
```
|
||||||
|
|
||||||
## 导出过程
|
|
||||||
|
|
||||||
### 导出
|
|
||||||
|
|
||||||
```shell
|
|
||||||
python export.py --ckpt_file [CKPT_PATH] --device_target [DEVICE_TARGET] --file_format [EXPORT_FORMAT] --batch_size [BATCH_SIZE]
|
|
||||||
```
|
|
||||||
|
|
||||||
`EXPORT_FORMAT` 可选 ["AIR", "MINDIR"]
|
|
||||||
|
|
||||||
## 推理过程
|
|
||||||
|
|
||||||
### 推理
|
|
||||||
|
|
||||||
在推理之前需要先导出模型,AIR模型只能在昇腾910环境上导出,MINDIR可以在任意环境上导出。
|
|
||||||
|
|
||||||
```shell
|
|
||||||
# 昇腾310 推理
|
|
||||||
bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [LABEL_FILE] [DEVICE_ID]
|
|
||||||
```
|
|
||||||
|
|
||||||
-注: Densnet121网络使用ImageNet数据集,图片的label是将文件夹排序后从0开始编号所得的数字.
|
|
||||||
|
|
||||||
推理的结果保存在当前目录下,在acc.log日志文件中可以找到类似以下的结果。
|
|
||||||
Densenet121网络使用ImageNet推理得到的结果如下:
|
|
||||||
|
|
||||||
```log
|
|
||||||
Top_1_Acc=73.85%, Top_5_Acc=91.526%
|
|
||||||
```
|
|
||||||
|
|
||||||
# 模型说明
|
# 模型说明
|
||||||
|
|
||||||
## 训练性能
|
## 训练性能
|
||||||
|
|
|
@ -23,12 +23,12 @@ device_id: 0
|
||||||
# Training options
|
# Training options
|
||||||
epoch_size: 250
|
epoch_size: 250
|
||||||
keep_checkpoint_max: 5
|
keep_checkpoint_max: 5
|
||||||
save_ckpt_path: "/data1/mjq/ckpt/shufflenetv1/test_before_pr/"
|
save_ckpt_path: "./"
|
||||||
save_checkpoint_epochs: 1
|
save_checkpoint_epochs: 1
|
||||||
save_checkpoint: True
|
save_checkpoint: True
|
||||||
amp_level: "O3"
|
amp_level: "O3"
|
||||||
is_distributed: False
|
is_distributed: False
|
||||||
train_dataset_path: "/data1/mjq/dataset/ImageNet_Original/train/"
|
train_dataset_path: ""
|
||||||
resume: ""
|
resume: ""
|
||||||
|
|
||||||
# Dataset config
|
# Dataset config
|
||||||
|
@ -49,10 +49,15 @@ momentum: 0.9
|
||||||
|
|
||||||
# ======================================================================================
|
# ======================================================================================
|
||||||
# Eval options
|
# Eval options
|
||||||
ckpt_path: "/data1/mjq/ckpt/shufflenetv1/shufflenetv1_1-250_1251.ckpt"
|
ckpt_path: ""
|
||||||
eval_dataset_path: "/data1/mjq/dataset/ImageNet_Original/validation_preprocess/"
|
eval_dataset_path: ""
|
||||||
|
|
||||||
|
|
||||||
|
# ======================================================================================
|
||||||
|
# export options
|
||||||
|
file_name: "shufflenetv1"
|
||||||
|
file_format: "MINDIR"
|
||||||
|
|
||||||
---
|
---
|
||||||
# Help description for each configuration
|
# Help description for each configuration
|
||||||
enable_modelarts: "Whether training on modelarts default: False"
|
enable_modelarts: "Whether training on modelarts default: False"
|
||||||
|
@ -65,3 +70,6 @@ enable_profiling: "Whether enable profiling while training default: False"
|
||||||
is_distributed: "distributed training"
|
is_distributed: "distributed training"
|
||||||
resume: "resume training with existed checkpoint"
|
resume: "resume training with existed checkpoint"
|
||||||
model_size: "shuffleNetV1 model size choices 2.0x, 1.5x, 1.0x, 0.5x"
|
model_size: "shuffleNetV1 model size choices 2.0x, 1.5x, 1.0x, 0.5x"
|
||||||
|
device_id: "device id"
|
||||||
|
file_name: "output file name"
|
||||||
|
file_format: "file format choices [AIR MINDIR ONNX]"
|
||||||
|
|
|
@ -14,40 +14,28 @@
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
"""
|
"""
|
||||||
##############export checkpoint file into air, onnx, mindir models#################
|
##############export checkpoint file into air, onnx, mindir models#################
|
||||||
python export.py
|
suggest run as python export.py --file_name [file name] --ckpt_path [ckpt path] --file_format [file format]
|
||||||
"""
|
"""
|
||||||
import argparse
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
import mindspore as ms
|
import mindspore as ms
|
||||||
from mindspore import Tensor, load_checkpoint, load_param_into_net, export, context
|
from mindspore import Tensor, load_checkpoint, load_param_into_net, export, context
|
||||||
|
from src.model_utils.config import config
|
||||||
from src.shufflenetv1 import ShuffleNetV1
|
from src.shufflenetv1 import ShuffleNetV1
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='ShuffleNetV1 export')
|
|
||||||
parser.add_argument("--device_id", type=int, default=0, help="device id")
|
|
||||||
parser.add_argument("--batch_size", type=int, default=1, help="batch size")
|
|
||||||
parser.add_argument("--ckpt_file", type=str, required=True, help="checkpoint file path.")
|
|
||||||
parser.add_argument("--file_name", type=str, default="shufflenetv1", help="output file name.")
|
|
||||||
parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='AIR', help='file format')
|
|
||||||
parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend",
|
|
||||||
help="device target")
|
|
||||||
parser.add_argument('--model_size', type=str, default='2.0x', choices=['2.0x', '1.5x', '1.0x', '0.5x'],
|
|
||||||
help='shufflenetv1 model size')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
|
||||||
|
if config.device_target == "Ascend":
|
||||||
|
context.set_context(device_id=config.device_id)
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
|
|
||||||
if args.device_target == "Ascend":
|
|
||||||
context.set_context(device_id=args.device_id)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
||||||
net = ShuffleNetV1(model_size=args.model_size)
|
net = ShuffleNetV1(model_size=config.model_size)
|
||||||
|
|
||||||
param_dict = load_checkpoint(args.ckpt_file)
|
param_dict = load_checkpoint(config.ckpt_path)
|
||||||
load_param_into_net(net, param_dict)
|
load_param_into_net(net, param_dict)
|
||||||
|
|
||||||
image_height, image_width = (224, 224)
|
image_height, image_width = (224, 224)
|
||||||
input_arr = Tensor(np.ones([args.batch_size, 3, image_height, image_width]), ms.float32)
|
input_arr = Tensor(np.ones([config.batch_size, 3, image_height, image_width]), ms.float32)
|
||||||
export(net, input_arr, file_name=args.file_name, file_format=args.file_format)
|
export(net, input_arr, file_name=config.file_name, file_format=config.file_format)
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
"""Data operations, will be used in train.py and eval.py"""
|
"""Data operations, will be used in train.py and eval.py"""
|
||||||
from src.config import config
|
from src.model_utils.config import config
|
||||||
import mindspore.common.dtype as mstype
|
import mindspore.common.dtype as mstype
|
||||||
import mindspore.dataset as ds
|
import mindspore.dataset as ds
|
||||||
import mindspore.dataset.transforms.c_transforms as C2
|
import mindspore.dataset.transforms.c_transforms as C2
|
||||||
|
|
Loading…
Reference in New Issue