From 75d77806c094f3540fb90b96d59c6e1ea62c922d Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 29 May 2020 12:52:36 +0800 Subject: [PATCH] fix pylint --- model_zoo/deeplabv3/README.md | 5 +---- model_zoo/deeplabv3/src/losses.py | 2 +- model_zoo/deeplabv3/train.py | 6 +++--- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/model_zoo/deeplabv3/README.md b/model_zoo/deeplabv3/README.md index fa9c8808102..b178a83e6da 100644 --- a/model_zoo/deeplabv3/README.md +++ b/model_zoo/deeplabv3/README.md @@ -1,10 +1,7 @@ # Deeplab-V3 Example ## Description -- This is an example of training DeepLabv3 with PASCAL VOC 2012 dataset in MindSpore. -- Paper Rethinking Atrous Convolution for Semantic Image Segmentation -Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam - +This is an example of training DeepLabv3 with PASCAL VOC 2012 dataset in MindSpore. ## Requirements - Install [MindSpore](https://www.mindspore.cn/install/en). diff --git a/model_zoo/deeplabv3/src/losses.py b/model_zoo/deeplabv3/src/losses.py index 051cc1b1f93..af782c2de9c 100644 --- a/model_zoo/deeplabv3/src/losses.py +++ b/model_zoo/deeplabv3/src/losses.py @@ -50,7 +50,7 @@ class OhemLoss(nn.Cell): losses = self.cross_entropy(logits, one_hot_labels)[0] weights = self.cast(self.not_equal(labels, self.ignore_label), mstype.float32) * self.loss_weight weighted_losses = self.mul(losses, weights) - loss = self.reduce_sum(weighted_losses,(0,)) + loss = self.reduce_sum(weighted_losses, (0,)) zeros = self.fill(mstype.float32, self.shape(weights), 0.0) ones = self.fill(mstype.float32, self.shape(weights), 1.0) present = self.select(self.equal(weights, zeros), zeros, ones) diff --git a/model_zoo/deeplabv3/train.py b/model_zoo/deeplabv3/train.py index a0f5b01d0d7..2135b0abf55 100644 --- a/model_zoo/deeplabv3/train.py +++ b/model_zoo/deeplabv3/train.py @@ -80,9 +80,9 @@ if __name__ == "__main__": ckpoint_cb = ModelCheckpoint(prefix='checkpoint_deeplabv3', config=config_ck) callback.append(ckpoint_cb) net = deeplabv3_resnet50(config.seg_num_classes, [args_opt.batch_size, 3, args_opt.crop_size, args_opt.crop_size], - infer_scale_sizes=config.eval_scales, atrous_rates=config.atrous_rates, - decoder_output_stride=config.decoder_output_stride, output_stride=config.output_stride, - fine_tune_batch_norm=config.fine_tune_batch_norm, image_pyramid=config.image_pyramid) + infer_scale_sizes=config.eval_scales, atrous_rates=config.atrous_rates, + decoder_output_stride=config.decoder_output_stride, output_stride=config.output_stride, + fine_tune_batch_norm=config.fine_tune_batch_norm, image_pyramid=config.image_pyramid) net.set_train() model_fine_tune(args_opt, net, 'layer') loss = OhemLoss(config.seg_num_classes, config.ignore_label)