Modify resnet50_adv_pruning network to fix mindspore 1.0.0 version

This commit is contained in:
zhanghuiyao 2020-12-23 17:03:09 +08:00
parent e06048bfe0
commit f4060607df
5 changed files with 47 additions and 64 deletions

View File

@ -4,14 +4,14 @@
- [Dataset](#dataset)
- [Environment Requirements](#environment-requirements)
- [Script Description](#script-description)
- [Script and Sample Code](#script-and-sample-code)
- [Training Process](#training-process)
- [Evaluation Process](#evaluation-process)
- [Evaluation](#evaluation)
- [Script and Sample Code](#script-and-sample-code)
- [Training Process](#training-process)
- [Evaluation Process](#evaluation-process)
- [Evaluation](#evaluation)
- [Model Description](#model-description)
- [Performance](#performance)
- [Training Performance](#evaluation-performance)
- [Inference Performance](#evaluation-performance)
- [Performance](#performance)
- [Training Performance](#evaluation-performance)
- [Inference Performance](#evaluation-performance)
- [Description of Random Situation](#description-of-random-situation)
- [ModelZoo Homepage](#modelzoo-homepage)
@ -26,20 +26,20 @@ The Adversarial Pruning method is a reliable neural network pruning algorithm by
Dataset used: [Oxford-IIIT Pet](https://www.robots.ox.ac.uk/~vgg/data/pets/)
- Dataset size: 7049 colorful images in 1000 classes
- Train: 3680 images
- Test: 3369 images
- Train: 3680 images
- Test: 3369 images
- Data format: RGB images.
- Note: Data will be processed in src/dataset.py
- Note: Data will be processed in src/dataset.py
# [Environment Requirements](#contents)
- HardwareAscend/GPU
- Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend, please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources.
- Prepare hardware environment with Ascend or GPU processor. If you want to try Ascend, please send the [application form](https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/file/other/Ascend%20Model%20Zoo%E4%BD%93%E9%AA%8C%E8%B5%84%E6%BA%90%E7%94%B3%E8%AF%B7%E8%A1%A8.docx) to ascend@huawei.com. Once approved, you can get the resources.
- Framework
- [MindSpore](https://www.mindspore.cn/install/en)
- [MindSpore](https://www.mindspore.cn/install/en)
- For more information, please check the resources below
- [MindSpore Tutorials](https://www.mindspore.cn/tutorial/training/en/master/index.html)
- [MindSpore Python API](https://www.mindspore.cn/doc/api_python/en/master/index.html)
- [MindSpore Tutorials](https://www.mindspore.cn/tutorial/training/en/master/index.html)
- [MindSpore Python API](https://www.mindspore.cn/doc/api_python/en/master/index.html)
# [Script description](#contents)
@ -58,6 +58,7 @@ Dataset used: [Oxford-IIIT Pet](https://www.robots.ox.ac.uk/~vgg/data/pets/)
```
## [Training process](#contents)
To Be Done
## [Eval process](#contents)
@ -66,12 +67,11 @@ To Be Done
After installing MindSpore via the official website, you can start evaluation as follows:
### Launch
```
# infer example
```bash
# infer example
Ascend: python eval.py --dataset_path ~/Pets/test.mindrecord --platform Ascend --checkpoint_path [CHECKPOINT_PATH]
GPU: python eval.py --dataset_path ~/Pets/test.mindrecord --platform GPU --checkpoint_path [CHECKPOINT_PATH]
```
@ -80,18 +80,18 @@ After installing MindSpore via the official website, you can start evaluation as
### Result
```
```python
result: {'acc': 0.8023984736985554} ckpt= ./resnet50-imgnet-0.65x-80.24.ckpt
```
# [Model Description](#contents)
## [Performance](#contents)
#### Evaluation Performance
### Evaluation Performance
#### ResNet50-0.65x on ImageNet2012
###### ResNet50-0.65x on ImageNet2012
| Parameters | |
| -------------------------- | -------------------------------------- |
| Model Version | ResNet50-0.65x |
@ -102,7 +102,8 @@ result: {'acc': 0.8023984736985554} ckpt= ./resnet50-imgnet-0.65x-80.24.ckpt
| FLOPs (G) | 2.1 |
| Accuracy (Top1) | 75.80 |
###### ResNet50-0.65x on Oxford-IIIT Pet
#### ResNet50-0.65x on Oxford-IIIT Pet
| Parameters | |
| -------------------------- | -------------------------------------- |
| Model Version | ResNet50-0.65x |

View File

@ -26,7 +26,7 @@ from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import dtype as mstype
from src.pet_dataset import create_dataset
from src.config import config_ascend, config_gpu
from src.config import cfg
from src.resnet_imgnet import resnet50
@ -40,14 +40,12 @@ args_opt = parser.parse_args()
if __name__ == '__main__':
config_platform = None
config_platform = cfg
if args_opt.platform == "Ascend":
config_platform = config_ascend
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend",
device_id=device_id, save_graphs=False)
elif args_opt.platform == "GPU":
config_platform = config_gpu
context.set_context(mode=context.GRAPH_MODE,
device_target="GPU", save_graphs=False)
else:
@ -55,12 +53,6 @@ if __name__ == '__main__':
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
if args_opt.platform == "Ascend":
net.to_float(mstype.float16)
for _, cell in net.cells_and_names():
if isinstance(cell, nn.Dense):
cell.to_float(mstype.float32)
dataset = create_dataset(dataset_path=args_opt.dataset_path,
do_train=False,
config=config_platform,
@ -76,6 +68,13 @@ if __name__ == '__main__':
net = resnet50(
rate=0.65, class_num=config_platform.num_classes, index=index)
if args_opt.platform == "Ascend":
net.to_float(mstype.float16)
for _, cell in net.cells_and_names():
if isinstance(cell, nn.Dense):
cell.to_float(mstype.float32)
if args_opt.checkpoint_path:
param_dict = load_checkpoint(args_opt.checkpoint_path)
load_param_into_net(net, param_dict)

View File

@ -17,25 +17,7 @@ network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed
config_ascend = ed({
"num_classes": 438,
"image_height": 224,
"image_width": 224,
"batch_size": 256,
"epoch_size": 200,
"warmup_epochs": 1,
"lr": 0.02,
"momentum": 0.9,
"weight_decay": 4e-5,
"label_smooth": 0.1,
"loss_scale": 1024,
"save_checkpoint": True,
"save_checkpoint_epochs": 5,
"keep_checkpoint_max": 200,
"save_checkpoint_path": "./checkpoint",
})
config_gpu = ed({
cfg = ed({
"num_classes": 37,
"image_height": 224,
"image_width": 224,

View File

@ -18,10 +18,11 @@ create train or eval dataset.
import os
import mindspore.common.dtype as mstype
import mindspore.dataset.engine as de
import mindspore.dataset.transforms.vision.c_transforms as C
import mindspore.dataset.transforms.vision.py_transforms as P
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset.vision.py_transforms as P
import mindspore.dataset.transforms.c_transforms as C2
from mindspore.dataset.transforms.vision import Inter
import mindspore.dataset.transforms.py_transforms as P2
from mindspore.dataset.vision import Inter
def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch_size=100):
@ -78,13 +79,13 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
center_crop_p = P.CenterCrop(224)
totensor = P.ToTensor()
normalize_p = P.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
composeop = P.ComposeOp(
composeop = P2.Compose(
[decode_p, resize_p, center_crop_p, totensor, normalize_p])
if do_train:
trans = [resize_crop_op, horizontal_flip_op, color_op,
rescale_op, normalize_op, change_swap_op]
else:
trans = composeop()
trans = composeop
type_cast_op = C2.TypeCast(mstype.int32)
ds = ds.map(input_columns="image", operations=trans,

View File

@ -327,21 +327,21 @@ class ResNet(nn.Cell):
for _, m in self.cells_and_names():
if isinstance(m, (nn.Conv2d)):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.set_parameter_data(Tensor(np.random.normal(0, np.sqrt(2. / n),
m.weight.data.shape).astype("float32")))
m.weight.set_data(Tensor(np.random.normal(0, np.sqrt(2. / n),
m.weight.data.shape).astype("float32")))
if m.bias is not None:
m.bias.set_parameter_data(
m.bias.set_data(
Tensor(np.zeros(m.bias.data.shape, dtype="float32")))
elif isinstance(m, nn.BatchNorm2d):
m.gamma.set_parameter_data(
m.gamma.set_data(
Tensor(np.ones(m.gamma.data.shape, dtype="float32")))
m.beta.set_parameter_data(
m.beta.set_data(
Tensor(np.zeros(m.beta.data.shape, dtype="float32")))
elif isinstance(m, nn.Dense):
m.weight.set_parameter_data(Tensor(np.random.normal(
m.weight.set_data(Tensor(np.random.normal(
0, 0.01, m.weight.data.shape).astype("float32")))
if m.bias is not None:
m.bias.set_parameter_data(
m.bias.set_data(
Tensor(np.zeros(m.bias.data.shape, dtype="float32")))