forked from mindspore-Ecosystem/mindspore
gpu update resnet in modelzoo
This commit is contained in:
parent
6c4ee3f3d1
commit
73806e0c4b
|
@ -241,7 +241,7 @@ result: {'top_5_accuracy': 0.9429417413572343, 'top_1_accuracy': 0.7853513124199
|
|||
### Running on GPU
|
||||
```
|
||||
# distributed training example
|
||||
mpirun -n 8 python train.py ---net=resnet50 --dataset=cifar10 -dataset_path=~/cifar-10-batches-bin --device_target="GPU" --run_distribute=True
|
||||
mpirun -n 8 python train.py --net=resnet50 --dataset=cifar10 --dataset_path=~/cifar-10-batches-bin --device_target="GPU" --run_distribute=True
|
||||
|
||||
# standalone training example
|
||||
python train.py --net=resnet50 --dataset=cifar10 --dataset_path=~/cifar-10-batches-bin --device_target="GPU"
|
||||
|
|
|
@ -54,8 +54,10 @@ if __name__ == '__main__':
|
|||
target = args_opt.device_target
|
||||
|
||||
# init context
|
||||
device_id = int(os.getenv('DEVICE_ID'))
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False, device_id=device_id)
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False)
|
||||
if target != "GPU":
|
||||
device_id = int(os.getenv('DEVICE_ID'))
|
||||
context.set_context(device_id=device_id)
|
||||
|
||||
# create dataset
|
||||
dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=False, batch_size=config.batch_size,
|
||||
|
|
|
@ -143,13 +143,21 @@ if __name__ == '__main__':
|
|||
amp_level="O2", keep_batchnorm_fp32=False)
|
||||
else:
|
||||
# GPU target
|
||||
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean", is_grad=False,
|
||||
smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
|
||||
if args_opt.dataset == "imagenet2012":
|
||||
if not config.use_label_smooth:
|
||||
config.label_smooth_factor = 0.0
|
||||
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean", is_grad=False,
|
||||
smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
|
||||
else:
|
||||
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean", is_grad=False,
|
||||
num_classes=config.class_num)
|
||||
## fp32 training
|
||||
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, config.momentum, config.weight_decay)
|
||||
model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'})
|
||||
##Mixed precision
|
||||
#model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'},
|
||||
# amp_level="O2", keep_batchnorm_fp32=True)
|
||||
# # Mixed precision
|
||||
# loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)
|
||||
# opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, config.momentum, config.weight_decay, config.loss_scale)
|
||||
# model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics={'acc'}, amp_level="O2")
|
||||
|
||||
# define callbacks
|
||||
time_cb = TimeMonitor(data_size=step_size)
|
||||
|
|
Loading…
Reference in New Issue