forked from mindspore-Ecosystem/mindspore
131 lines
5.1 KiB
Python
131 lines
5.1 KiB
Python
# Copyright 2020 Huawei Technologies Co., Ltd
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# ============================================================================
|
|
"""
|
|
##############test googlenet example on cifar10#################
|
|
python eval.py
|
|
"""
|
|
import os
|
|
import time
|
|
|
|
import mindspore.nn as nn
|
|
from mindspore import context
|
|
from mindspore.nn.optim.momentum import Momentum
|
|
from mindspore.train.model import Model
|
|
from mindspore.train.serialization import load_checkpoint, load_param_into_net
|
|
from mindspore.common import set_seed
|
|
|
|
from src.dataset import create_dataset_cifar10, create_dataset_imagenet
|
|
|
|
from src.googlenet import GoogleNet
|
|
from src.CrossEntropySmooth import CrossEntropySmooth
|
|
|
|
from model_utils.config import config
|
|
from model_utils.moxing_adapter import moxing_wrapper
|
|
from model_utils.device_adapter import get_device_id, get_device_num
|
|
|
|
set_seed(1)
|
|
|
|
def modelarts_pre_process():
|
|
'''modelarts pre process function.'''
|
|
def unzip(zip_file, save_dir):
|
|
import zipfile
|
|
s_time = time.time()
|
|
if not os.path.exists(os.path.join(save_dir, config.modelarts_dataset_unzip_name)):
|
|
zip_isexist = zipfile.is_zipfile(zip_file)
|
|
if zip_isexist:
|
|
fz = zipfile.ZipFile(zip_file, 'r')
|
|
data_num = len(fz.namelist())
|
|
print("Extract Start...")
|
|
print("unzip file num: {}".format(data_num))
|
|
data_print = int(data_num / 100) if data_num > 100 else 1
|
|
i = 0
|
|
for file in fz.namelist():
|
|
if i % data_print == 0:
|
|
print("unzip percent: {}%".format(int(i * 100 / data_num)), flush=True)
|
|
i += 1
|
|
fz.extract(file, save_dir)
|
|
print("cost time: {}min:{}s.".format(int((time.time() - s_time) / 60),
|
|
int(int(time.time() - s_time) % 60)))
|
|
print("Extract Done.")
|
|
else:
|
|
print("This is not zip.")
|
|
else:
|
|
print("Zip has been extracted.")
|
|
|
|
if config.need_modelarts_dataset_unzip:
|
|
zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + ".zip")
|
|
save_dir_1 = os.path.join(config.data_path)
|
|
|
|
sync_lock = "/tmp/unzip_sync.lock"
|
|
|
|
# Each server contains 8 devices as most.
|
|
if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
|
|
print("Zip file path: ", zip_file_1)
|
|
print("Unzip file save dir: ", save_dir_1)
|
|
unzip(zip_file_1, save_dir_1)
|
|
print("===Finish extract data synchronization===")
|
|
try:
|
|
os.mknod(sync_lock)
|
|
except IOError:
|
|
pass
|
|
|
|
while True:
|
|
if os.path.exists(sync_lock):
|
|
break
|
|
time.sleep(1)
|
|
|
|
print("Device: {}, Finish sync unzip data from {} to {}.".format(get_device_id(), zip_file_1, save_dir_1))
|
|
|
|
|
|
@moxing_wrapper(pre_process=modelarts_pre_process)
|
|
def run_eval():
|
|
if config.dataset_name == 'cifar10':
|
|
dataset = create_dataset_cifar10(config.val_data_path, 1, False, cifar_cfg=config)
|
|
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
|
|
net = GoogleNet(num_classes=config.num_classes)
|
|
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, config.momentum,
|
|
weight_decay=config.weight_decay)
|
|
model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'})
|
|
|
|
elif config.dataset_name == "imagenet":
|
|
dataset = create_dataset_imagenet(config.val_data_path, 1, False, imagenet_cfg=config)
|
|
if not config.use_label_smooth:
|
|
config.label_smooth_factor = 0.0
|
|
loss = CrossEntropySmooth(sparse=True, reduction="mean",
|
|
smooth_factor=config.label_smooth_factor, num_classes=config.num_classes)
|
|
net = GoogleNet(num_classes=config.num_classes)
|
|
model = Model(net, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'})
|
|
|
|
else:
|
|
raise ValueError("dataset is not support.")
|
|
|
|
device_target = config.device_target
|
|
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
|
|
if device_target == "Ascend":
|
|
context.set_context(device_id=get_device_id())
|
|
|
|
param_dict = load_checkpoint(config.checkpoint_path)
|
|
print("load checkpoint from [{}].".format(config.checkpoint_path))
|
|
load_param_into_net(net, param_dict)
|
|
|
|
net.set_train(False)
|
|
|
|
acc = model.eval(dataset)
|
|
print("accuracy: ", acc)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
run_eval()
|