!674 [pylint] clean pylint warning

Merge pull request !674 from jinyaohui/clean_pylint_0425
This commit is contained in:
mindspore-ci-bot 2020-04-26 09:31:28 +08:00 committed by Gitee
commit e40dc39a14
9 changed files with 37 additions and 33 deletions

View File

@ -18,8 +18,8 @@ from __future__ import division
import os
import numpy as np
from PIL import Image
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from PIL import Image
import mindspore.dataset as de
from mindspore.mindrecord import FileWriter
import mindspore.dataset.transforms.vision.c_transforms as C

View File

@ -16,6 +16,9 @@
from __future__ import absolute_import as _abs
import sys
import os
from .op_build import op_build
from .message import compilewithjson
def AKGAddPath():
"""_akg add path."""
@ -58,6 +61,3 @@ class AKGMetaPathLoader:
sys.meta_path.insert(0, AKGMetaPathFinder())
from .op_build import op_build
from .message import compilewithjson

View File

@ -14,7 +14,6 @@
# ============================================================================
"""FTRL"""
from mindspore.ops import functional as F, composite as C, operations as P
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.common import Tensor
import mindspore.common.dtype as mstype
@ -23,6 +22,8 @@ from mindspore._checkparam import Rel
from .optimizer import Optimizer, apply_decay, grad_scale
ftrl_opt = C.MultitypeFuncGraph("ftrl_opt")
@ftrl_opt.register("Function", "Tensor", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor")
def _tensor_run_opt(opt, learning_rate, l1, l2, lr_power, linear, gradient, weight, moment):
"""Apply ftrl optimizer to the weight parameter."""
@ -30,8 +31,10 @@ def _tensor_run_opt(opt, learning_rate, l1, l2, lr_power, linear, gradient, weig
success = F.depend(success, opt(weight, moment, linear, gradient, learning_rate, l1, l2, lr_power))
return success
def _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale=1.0, weight_decay=0.0,
prim_name=None):
"""Check param."""
validator.check_value_type("initial_accum", initial_accum, [float], prim_name)
validator.check_number("initial_accum", initial_accum, 0.0, Rel.GE, prim_name)
@ -118,5 +121,6 @@ class FTRL(Optimizer):
if self.reciprocal_scale != 1.0:
grads = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), grads)
lr = self.learning_rate
success = self.hyper_map(F.partial(ftrl_opt, self.opt, lr, self.l1, self.l2, self.lr_power), linear, grads, params, moments)
success = self.hyper_map(F.partial(ftrl_opt, self.opt, lr, self.l1, self.l2, self.lr_power),
linear, grads, params, moments)
return success

View File

@ -41,6 +41,7 @@ class OutputTo16(nn.Cell):
def _do_keep_batchnorm_fp32(network):
"""Do keep batchnorm fp32."""
cells = network.name_cells()
change = False
for name in cells:
@ -68,6 +69,7 @@ _config_level = {
def _check_kwargs(key_words):
"""Check kwargs."""
for arg in key_words:
if arg not in ['cast_model_type', 'keep_batchnorm_fp32', 'loss_scale_manager']:
raise ValueError(f"Unsupported arg '{arg}'")
@ -84,6 +86,7 @@ def _check_kwargs(key_words):
def _add_loss_network(network, loss_fn, cast_model_type):
"""Add loss network."""
class WithLossCell(nn.Cell):
"Wrap loss for amp. Cast network output back to float32"

View File

@ -683,6 +683,7 @@ class LossMonitor(Callback):
class TimeMonitor(Callback):
"""Time Monitor."""
def __init__(self, data_size):
super(TimeMonitor, self).__init__()
self.data_size = data_size
@ -701,4 +702,3 @@ class TimeMonitor(Callback):
def step_end(self, run_context):
step_mseconds = (time.time() - self.step_time) * 1000
print('step time', step_mseconds, flush=True)

View File

@ -174,8 +174,7 @@ test_sets = [
embedding_shape=[1, 128, 768],
use_one_hot_embeddings=True,
initializer_range=0.02), 1, 1), {
'init_param_with': lambda shp: np.ones(shp).astype(np.float32)
}),
'init_param_with': lambda shp: np.ones(shp).astype(np.float32)}),
'desc_inputs': [input_ids],
'desc_bprop': [[128]]}),
('EmbeddingLookup_multi_outputs_init_param', {
@ -184,8 +183,7 @@ test_sets = [
embedding_shape=[1, 128, 768],
use_one_hot_embeddings=False,
initializer_range=0.02), {
'init_param_with': lambda shp: np.ones(shp).astype(np.float32)
}),
'init_param_with': lambda shp: np.ones(shp).astype(np.float32)}),
'desc_inputs': [input_ids],
'desc_bprop': [[1, 128, 768], [128]]}),
('EmbeddingLookup_multi_outputs_grad_with_no_sens', {
@ -194,8 +192,7 @@ test_sets = [
embedding_shape=[1, 128, 768],
use_one_hot_embeddings=False,
initializer_range=0.02), {
'init_param_with': lambda shp: np.ones(shp).astype(np.float32)
}),
'init_param_with': lambda shp: np.ones(shp).astype(np.float32)}),
'desc_inputs': [input_ids]}),
('GetMaskedLMOutput_grad_with_no_sens', {
'block': GetMaskedLMOutput(BertConfig(batch_size=1)),