forked from mindspore-Ecosystem/mindspore
!674 [pylint] clean pylint warning
Merge pull request !674 from jinyaohui/clean_pylint_0425
This commit is contained in:
commit
e40dc39a14
|
@ -18,8 +18,8 @@ from __future__ import division
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from PIL import Image
|
|
||||||
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
|
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
|
||||||
|
from PIL import Image
|
||||||
import mindspore.dataset as de
|
import mindspore.dataset as de
|
||||||
from mindspore.mindrecord import FileWriter
|
from mindspore.mindrecord import FileWriter
|
||||||
import mindspore.dataset.transforms.vision.c_transforms as C
|
import mindspore.dataset.transforms.vision.c_transforms as C
|
||||||
|
|
|
@ -16,6 +16,9 @@
|
||||||
from __future__ import absolute_import as _abs
|
from __future__ import absolute_import as _abs
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
from .op_build import op_build
|
||||||
|
from .message import compilewithjson
|
||||||
|
|
||||||
|
|
||||||
def AKGAddPath():
|
def AKGAddPath():
|
||||||
"""_akg add path."""
|
"""_akg add path."""
|
||||||
|
@ -58,6 +61,3 @@ class AKGMetaPathLoader:
|
||||||
|
|
||||||
|
|
||||||
sys.meta_path.insert(0, AKGMetaPathFinder())
|
sys.meta_path.insert(0, AKGMetaPathFinder())
|
||||||
|
|
||||||
from .op_build import op_build
|
|
||||||
from .message import compilewithjson
|
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
"""FTRL"""
|
"""FTRL"""
|
||||||
from mindspore.ops import functional as F, composite as C, operations as P
|
from mindspore.ops import functional as F, composite as C, operations as P
|
||||||
from mindspore.common.initializer import initializer
|
|
||||||
from mindspore.common.parameter import Parameter
|
from mindspore.common.parameter import Parameter
|
||||||
from mindspore.common import Tensor
|
from mindspore.common import Tensor
|
||||||
import mindspore.common.dtype as mstype
|
import mindspore.common.dtype as mstype
|
||||||
|
@ -23,6 +22,8 @@ from mindspore._checkparam import Rel
|
||||||
from .optimizer import Optimizer, apply_decay, grad_scale
|
from .optimizer import Optimizer, apply_decay, grad_scale
|
||||||
|
|
||||||
ftrl_opt = C.MultitypeFuncGraph("ftrl_opt")
|
ftrl_opt = C.MultitypeFuncGraph("ftrl_opt")
|
||||||
|
|
||||||
|
|
||||||
@ftrl_opt.register("Function", "Tensor", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor")
|
@ftrl_opt.register("Function", "Tensor", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor")
|
||||||
def _tensor_run_opt(opt, learning_rate, l1, l2, lr_power, linear, gradient, weight, moment):
|
def _tensor_run_opt(opt, learning_rate, l1, l2, lr_power, linear, gradient, weight, moment):
|
||||||
"""Apply ftrl optimizer to the weight parameter."""
|
"""Apply ftrl optimizer to the weight parameter."""
|
||||||
|
@ -30,8 +31,10 @@ def _tensor_run_opt(opt, learning_rate, l1, l2, lr_power, linear, gradient, weig
|
||||||
success = F.depend(success, opt(weight, moment, linear, gradient, learning_rate, l1, l2, lr_power))
|
success = F.depend(success, opt(weight, moment, linear, gradient, learning_rate, l1, l2, lr_power))
|
||||||
return success
|
return success
|
||||||
|
|
||||||
|
|
||||||
def _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale=1.0, weight_decay=0.0,
|
def _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale=1.0, weight_decay=0.0,
|
||||||
prim_name=None):
|
prim_name=None):
|
||||||
|
"""Check param."""
|
||||||
validator.check_value_type("initial_accum", initial_accum, [float], prim_name)
|
validator.check_value_type("initial_accum", initial_accum, [float], prim_name)
|
||||||
validator.check_number("initial_accum", initial_accum, 0.0, Rel.GE, prim_name)
|
validator.check_number("initial_accum", initial_accum, 0.0, Rel.GE, prim_name)
|
||||||
|
|
||||||
|
@ -118,5 +121,6 @@ class FTRL(Optimizer):
|
||||||
if self.reciprocal_scale != 1.0:
|
if self.reciprocal_scale != 1.0:
|
||||||
grads = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), grads)
|
grads = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), grads)
|
||||||
lr = self.learning_rate
|
lr = self.learning_rate
|
||||||
success = self.hyper_map(F.partial(ftrl_opt, self.opt, lr, self.l1, self.l2, self.lr_power), linear, grads, params, moments)
|
success = self.hyper_map(F.partial(ftrl_opt, self.opt, lr, self.l1, self.l2, self.lr_power),
|
||||||
|
linear, grads, params, moments)
|
||||||
return success
|
return success
|
||||||
|
|
|
@ -41,6 +41,7 @@ class OutputTo16(nn.Cell):
|
||||||
|
|
||||||
|
|
||||||
def _do_keep_batchnorm_fp32(network):
|
def _do_keep_batchnorm_fp32(network):
|
||||||
|
"""Do keep batchnorm fp32."""
|
||||||
cells = network.name_cells()
|
cells = network.name_cells()
|
||||||
change = False
|
change = False
|
||||||
for name in cells:
|
for name in cells:
|
||||||
|
@ -68,6 +69,7 @@ _config_level = {
|
||||||
|
|
||||||
|
|
||||||
def _check_kwargs(key_words):
|
def _check_kwargs(key_words):
|
||||||
|
"""Check kwargs."""
|
||||||
for arg in key_words:
|
for arg in key_words:
|
||||||
if arg not in ['cast_model_type', 'keep_batchnorm_fp32', 'loss_scale_manager']:
|
if arg not in ['cast_model_type', 'keep_batchnorm_fp32', 'loss_scale_manager']:
|
||||||
raise ValueError(f"Unsupported arg '{arg}'")
|
raise ValueError(f"Unsupported arg '{arg}'")
|
||||||
|
@ -84,6 +86,7 @@ def _check_kwargs(key_words):
|
||||||
|
|
||||||
|
|
||||||
def _add_loss_network(network, loss_fn, cast_model_type):
|
def _add_loss_network(network, loss_fn, cast_model_type):
|
||||||
|
"""Add loss network."""
|
||||||
class WithLossCell(nn.Cell):
|
class WithLossCell(nn.Cell):
|
||||||
"Wrap loss for amp. Cast network output back to float32"
|
"Wrap loss for amp. Cast network output back to float32"
|
||||||
|
|
||||||
|
|
|
@ -683,6 +683,7 @@ class LossMonitor(Callback):
|
||||||
|
|
||||||
|
|
||||||
class TimeMonitor(Callback):
|
class TimeMonitor(Callback):
|
||||||
|
"""Time Monitor."""
|
||||||
def __init__(self, data_size):
|
def __init__(self, data_size):
|
||||||
super(TimeMonitor, self).__init__()
|
super(TimeMonitor, self).__init__()
|
||||||
self.data_size = data_size
|
self.data_size = data_size
|
||||||
|
@ -701,4 +702,3 @@ class TimeMonitor(Callback):
|
||||||
def step_end(self, run_context):
|
def step_end(self, run_context):
|
||||||
step_mseconds = (time.time() - self.step_time) * 1000
|
step_mseconds = (time.time() - self.step_time) * 1000
|
||||||
print('step time', step_mseconds, flush=True)
|
print('step time', step_mseconds, flush=True)
|
||||||
|
|
||||||
|
|
|
@ -174,8 +174,7 @@ test_sets = [
|
||||||
embedding_shape=[1, 128, 768],
|
embedding_shape=[1, 128, 768],
|
||||||
use_one_hot_embeddings=True,
|
use_one_hot_embeddings=True,
|
||||||
initializer_range=0.02), 1, 1), {
|
initializer_range=0.02), 1, 1), {
|
||||||
'init_param_with': lambda shp: np.ones(shp).astype(np.float32)
|
'init_param_with': lambda shp: np.ones(shp).astype(np.float32)}),
|
||||||
}),
|
|
||||||
'desc_inputs': [input_ids],
|
'desc_inputs': [input_ids],
|
||||||
'desc_bprop': [[128]]}),
|
'desc_bprop': [[128]]}),
|
||||||
('EmbeddingLookup_multi_outputs_init_param', {
|
('EmbeddingLookup_multi_outputs_init_param', {
|
||||||
|
@ -184,8 +183,7 @@ test_sets = [
|
||||||
embedding_shape=[1, 128, 768],
|
embedding_shape=[1, 128, 768],
|
||||||
use_one_hot_embeddings=False,
|
use_one_hot_embeddings=False,
|
||||||
initializer_range=0.02), {
|
initializer_range=0.02), {
|
||||||
'init_param_with': lambda shp: np.ones(shp).astype(np.float32)
|
'init_param_with': lambda shp: np.ones(shp).astype(np.float32)}),
|
||||||
}),
|
|
||||||
'desc_inputs': [input_ids],
|
'desc_inputs': [input_ids],
|
||||||
'desc_bprop': [[1, 128, 768], [128]]}),
|
'desc_bprop': [[1, 128, 768], [128]]}),
|
||||||
('EmbeddingLookup_multi_outputs_grad_with_no_sens', {
|
('EmbeddingLookup_multi_outputs_grad_with_no_sens', {
|
||||||
|
@ -194,8 +192,7 @@ test_sets = [
|
||||||
embedding_shape=[1, 128, 768],
|
embedding_shape=[1, 128, 768],
|
||||||
use_one_hot_embeddings=False,
|
use_one_hot_embeddings=False,
|
||||||
initializer_range=0.02), {
|
initializer_range=0.02), {
|
||||||
'init_param_with': lambda shp: np.ones(shp).astype(np.float32)
|
'init_param_with': lambda shp: np.ones(shp).astype(np.float32)}),
|
||||||
}),
|
|
||||||
'desc_inputs': [input_ids]}),
|
'desc_inputs': [input_ids]}),
|
||||||
('GetMaskedLMOutput_grad_with_no_sens', {
|
('GetMaskedLMOutput_grad_with_no_sens', {
|
||||||
'block': GetMaskedLMOutput(BertConfig(batch_size=1)),
|
'block': GetMaskedLMOutput(BertConfig(batch_size=1)),
|
||||||
|
|
Loading…
Reference in New Issue