diff --git a/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py b/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py index e95a9ba069a..c626778b594 100644 --- a/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +++ b/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py @@ -244,8 +244,8 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t return True - -# pylint: disable=locally-disabled,too-many-arguments, too-many-locals, too-many-statements +# pylint: disable=locally-disabled,too-many-arguments, too-many-locals, too-many-statements, +# pylint: disable=inconsistent-return-statements # @util.check_input_type(dict, dict, (dict, NoneType), dict, bool, bool, str) @op_info_register(matmul_cube_dense_left_op_info) def CusMatMulCubeDenseLeft(input_x1, input_x2, bias=None, output_y={}, trans_a=False, trans_b=False, diff --git a/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py b/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py index 4a1982738d6..c349c5705cf 100644 --- a/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +++ b/mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py @@ -40,6 +40,7 @@ matmul_cube_dense_right_op_info = TBERegOp("CusMatMulCubeDenseRight") \ .get_op_info() +# pylint: disable=inconsistent-return-statements @op_info_register(matmul_cube_dense_right_op_info) def CusMatMulCubeDenseRight(input_x1, input_x2, input_x3, bias=None, output_y={}, trans_a=False, trans_b=False, kernel_name="matmulcube"): diff --git a/model_zoo/Transformer/src/transformer_for_train.py b/model_zoo/Transformer/src/transformer_for_train.py index ac54aee7f9b..758ac65ab50 100644 --- a/model_zoo/Transformer/src/transformer_for_train.py +++ b/model_zoo/Transformer/src/transformer_for_train.py @@ -31,6 +31,8 @@ from .transformer_model import TransformerModel GRADIENT_CLIP_TYPE = 1 GRADIENT_CLIP_VALUE = 5.0 + +# pylint: disable=consider-using-in class ClipGradients(nn.Cell): """ Clip gradients. @@ -48,11 +50,12 @@ class ClipGradients(nn.Cell): self.clip_by_norm = nn.ClipByNorm() self.cast = P.Cast() self.dtype = P.DType() + def construct(self, grads, clip_type, clip_value): - #return grads + # return grads if clip_type != 0 and clip_type != 1: return grads @@ -83,8 +86,8 @@ class TransformerTrainingLoss(nn.Cell): super(TransformerTrainingLoss, self).__init__(auto_prefix=False) self.vocab_size = config.vocab_size self.onehot = P.OneHot() - self.on_value = Tensor(float(1-config.label_smoothing), mstype.float32) - self.off_value = Tensor(config.label_smoothing/float(self.vocab_size-1), mstype.float32) + self.on_value = Tensor(float(1 - config.label_smoothing), mstype.float32) + self.off_value = Tensor(config.label_smoothing / float(self.vocab_size - 1), mstype.float32) self.reduce_sum = P.ReduceSum() self.reduce_mean = P.ReduceMean() self.reshape = P.Reshape() @@ -92,7 +95,7 @@ class TransformerTrainingLoss(nn.Cell): self.flatten = P.Flatten() self.neg = P.Neg() self.cast = P.Cast() - self.flat_shape = (config.batch_size*config.seq_length,) + self.flat_shape = (config.batch_size * config.seq_length,) def construct(self, prediction_scores, label_ids, label_weights): """Defines the computation performed.""" @@ -217,10 +220,12 @@ class TransformerTrainOneStepCell(nn.Cell): grad_scale = C.MultitypeFuncGraph("grad_scale") reciprocal = P.Reciprocal() + @grad_scale.register("Tensor", "Tensor") def tensor_grad_scale(scale, grad): return grad * F.cast(reciprocal(scale), F.dtype(grad)) + class TransformerTrainOneStepWithLossScaleCell(nn.Cell): """ Encapsulation class of Transformer network training. diff --git a/model_zoo/bert/src/bert_for_pre_training.py b/model_zoo/bert/src/bert_for_pre_training.py index 4732cc795fd..600512b4a77 100644 --- a/model_zoo/bert/src/bert_for_pre_training.py +++ b/model_zoo/bert/src/bert_for_pre_training.py @@ -34,6 +34,9 @@ GRADIENT_CLIP_VALUE = 1.0 _nn_clip_by_norm = nn.ClipByNorm() clip_grad = C.MultitypeFuncGraph("clip_grad") + + +# pylint: disable=consider-using-in @clip_grad.register("Number", "Number", "Tensor") def _clip_grad(clip_type, clip_value, grad): """ @@ -57,6 +60,7 @@ def _clip_grad(clip_type, clip_value, grad): new_grad = _nn_clip_by_norm(grad, F.cast(F.tuple_to_array((clip_value,)), dt)) return new_grad + class GetMaskedLMOutput(nn.Cell): """ Get masked lm output. @@ -377,6 +381,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32), name="loss_scale") self.add_flags(has_effect=True) + def construct(self, input_ids, input_mask, diff --git a/tests/mindspore_test_framework/apps/test_bert_parts.py b/tests/mindspore_test_framework/apps/test_bert_parts.py index c45e4b2ab73..b1c10564630 100644 --- a/tests/mindspore_test_framework/apps/test_bert_parts.py +++ b/tests/mindspore_test_framework/apps/test_bert_parts.py @@ -15,14 +15,15 @@ """Test bert submodules.""" -import numpy as np import os -from mindspore import Tensor -from mindspore import nn, context +import numpy as np from mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, GetMaskedLMOutput, \ BertConfig, BertPreTraining, BertNetworkWithLoss from mindspore.model_zoo.Bert_NEZHA.bert_model import BertModel + +from mindspore import Tensor +from mindspore import nn, context from ..mindspore_test import mindspore_test from ..pipeline.forward.compile_forward import pipeline_for_compile_forward_anf_graph_for_case_by_case_config, \ pipeline_for_compile_forward_ge_graph_for_case_by_case_config diff --git a/tests/mindspore_test_framework/components/executor/check_exceptions.py b/tests/mindspore_test_framework/components/executor/check_exceptions.py index 450f95bc612..29f7d136c25 100644 --- a/tests/mindspore_test_framework/components/executor/check_exceptions.py +++ b/tests/mindspore_test_framework/components/executor/check_exceptions.py @@ -15,9 +15,10 @@ """Component that Check if the function raises the expected Exception.""" -import pytest import sys +import pytest + from ...components.icomponent import IExectorComponent from ...utils import keyword diff --git a/tests/mindspore_test_framework/utils/check_gradient.py b/tests/mindspore_test_framework/utils/check_gradient.py index 660395e2986..cadb0207d61 100644 --- a/tests/mindspore_test_framework/utils/check_gradient.py +++ b/tests/mindspore_test_framework/utils/check_gradient.py @@ -16,9 +16,10 @@ """Implementation of Numerical gradients checking.""" # pylint: disable=missing-docstring +from typing import Callable, List, Any + import mindspore._c_expression as _c_expression import numpy as np -from typing import Callable, List, Any from mindspore import ParameterTuple from mindspore import Tensor diff --git a/tests/mindspore_test_framework/utils/dataset_util.py b/tests/mindspore_test_framework/utils/dataset_util.py index 334c7248f6a..82e991ff098 100644 --- a/tests/mindspore_test_framework/utils/dataset_util.py +++ b/tests/mindspore_test_framework/utils/dataset_util.py @@ -15,9 +15,10 @@ """Dataset utils.""" -import numpy as np import random +import numpy as np + from mindspore import Tensor diff --git a/tests/mindspore_test_framework/utils/debug_util.py b/tests/mindspore_test_framework/utils/debug_util.py index ee826fa21a2..b345bac2993 100644 --- a/tests/mindspore_test_framework/utils/debug_util.py +++ b/tests/mindspore_test_framework/utils/debug_util.py @@ -24,8 +24,7 @@ from mindspore.ops import operations as P from mindspore.ops._grad.grad_base import bprop_getters from mindspore.ops.primitive import prim_attr_register, PrimitiveWithInfer -logging.basicConfig(level=logging.DEBUG, format= -'[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s') +logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s') logger = logging.getLogger(__name__) diff --git a/tests/mindspore_test_framework/utils/other_util.py b/tests/mindspore_test_framework/utils/other_util.py index 1698eefd1df..bfa7531be64 100644 --- a/tests/mindspore_test_framework/utils/other_util.py +++ b/tests/mindspore_test_framework/utils/other_util.py @@ -14,9 +14,8 @@ # ============================================================================ """Other utils.""" - -import mindspore._c_expression as _c_expression import numpy as np +import mindspore._c_expression as _c_expression from mindspore.common.tensor import Tensor diff --git a/tests/st/networks/models/bert/src/bert_for_pre_training.py b/tests/st/networks/models/bert/src/bert_for_pre_training.py index 4732cc795fd..600512b4a77 100644 --- a/tests/st/networks/models/bert/src/bert_for_pre_training.py +++ b/tests/st/networks/models/bert/src/bert_for_pre_training.py @@ -34,6 +34,9 @@ GRADIENT_CLIP_VALUE = 1.0 _nn_clip_by_norm = nn.ClipByNorm() clip_grad = C.MultitypeFuncGraph("clip_grad") + + +# pylint: disable=consider-using-in @clip_grad.register("Number", "Number", "Tensor") def _clip_grad(clip_type, clip_value, grad): """ @@ -57,6 +60,7 @@ def _clip_grad(clip_type, clip_value, grad): new_grad = _nn_clip_by_norm(grad, F.cast(F.tuple_to_array((clip_value,)), dt)) return new_grad + class GetMaskedLMOutput(nn.Cell): """ Get masked lm output. @@ -377,6 +381,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32), name="loss_scale") self.add_flags(has_effect=True) + def construct(self, input_ids, input_mask, diff --git a/tests/st/ops/ascend/test_autocast.py b/tests/st/ops/ascend/test_autocast.py index b91a169a967..af8d043a5f8 100644 --- a/tests/st/ops/ascend/test_autocast.py +++ b/tests/st/ops/ascend/test_autocast.py @@ -23,35 +23,41 @@ from mindspore.ops import functional as F, composite as C import mindspore.context as context import pytest + class TensorIntAutoCast(nn.Cell): - def __init__(self,): + def __init__(self, ): super(TensorIntAutoCast, self).__init__() self.i = 2 + def construct(self, t): z = F.tensor_mul(t, self.i) return z class TensorFPAutoCast(nn.Cell): - def __init__(self,): + def __init__(self, ): super(TensorFPAutoCast, self).__init__() self.f = 1.2 + def construct(self, t): z = F.tensor_mul(t, self.f) return z class TensorBoolAutoCast(nn.Cell): - def __init__(self,): + def __init__(self, ): super(TensorBoolAutoCast, self).__init__() self.f = True + def construct(self, t): z = F.tensor_mul(t, self.f) return z + class TensorAutoCast(nn.Cell): - def __init__(self,): + def __init__(self, ): super(TensorAutoCast, self).__init__() + def construct(self, t1, t2): z = F.tensor_mul(t1, t2) return z @@ -68,7 +74,7 @@ def test_tensor_auto_cast(): t_fp16 = Tensor(np.ones([2, 1, 2, 2]), mstype.float16) t_fp32 = Tensor(np.ones([2, 1, 2, 2]), mstype.float32) t_fp64 = Tensor(np.ones([2, 1, 2, 2]), mstype.float64) - net = TensorAutoCast() + net = TensorAutoCast() rs = net(t_uint8, t_int8) assert rs.dtype() == mstype.int16 rs = net(t_uint8, t_int16) @@ -96,7 +102,7 @@ def test_tensor_auto_cast(): assert rs.dtype() == mstype.float64 rs = net(t_fp32, t_fp64) assert rs.dtype() == mstype.float64 - + rs = net(t_uint8, t_fp16) assert rs.dtype() == mstype.float16 rs = net(t_uint8, t_fp32) @@ -210,7 +216,6 @@ def test_tensor_auto_cast(): with pytest.raises(TypeError): net(t_uint64, t_fp64) - with pytest.raises(TypeError): tfp(t_uint16) with pytest.raises(TypeError): diff --git a/tests/st/ops/ascend/test_ops_infer.py b/tests/st/ops/ascend/test_ops_infer.py index 350116eb9f7..7b1489de3d5 100644 --- a/tests/st/ops/ascend/test_ops_infer.py +++ b/tests/st/ops/ascend/test_ops_infer.py @@ -21,6 +21,7 @@ import mindspore.common.dtype as mstype from mindspore import Tensor from mindspore.ops import operations as P from mindspore import context + context.set_context(mode=context.GRAPH_MODE, save_graphs=True) @@ -29,14 +30,16 @@ def test_cast_op_attr(): def __init__(self): super(CastNet, self).__init__() self.cast = P.Cast() + def construct(self, x, t): return self.cast(x, t) - + class CastTypeTest(nn.Cell): def __init__(self, net): super(CastTypeTest, self).__init__() self.net = net self.cast = P.Cast() + def construct(self, x, y, z): cast_op = self.cast t1 = cast_op(x, mstype.float32) @@ -46,6 +49,7 @@ def test_cast_op_attr(): t4 = cast_net(y, mstype.int32) t5 = cast_net(z, mstype.float16) return (t1, t2, t3, t4, t5) + net = CastTypeTest(CastNet()) t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.int32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) diff --git a/tests/st/ops/cpu/test_transpose_op.py b/tests/st/ops/cpu/test_transpose_op.py index 812d1f37057..bda85ca90fd 100644 --- a/tests/st/ops/cpu/test_transpose_op.py +++ b/tests/st/ops/cpu/test_transpose_op.py @@ -142,4 +142,6 @@ def test_transpose(): assert (output[1].asnumpy() == expect1).all() assert (output[2].asnumpy() == expect2).all() assert (output[3].asnumpy() == expect3).all() -test_transpose() \ No newline at end of file + + +test_transpose() diff --git a/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py b/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py index 28543043e78..66b442668b4 100644 --- a/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py +++ b/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py @@ -1043,6 +1043,7 @@ def test_print_tuple_wrapper(tag): return fns[tag] +# pylint: disable=unnecessary-semicolon def test_constant_duplicate_mul(tag): fns = FnDict() Mul = Primitive('Mul'); diff --git a/tests/ut/python/dtype/test_dictionary.py b/tests/ut/python/dtype/test_dictionary.py index 26c955d89de..14033873f76 100644 --- a/tests/ut/python/dtype/test_dictionary.py +++ b/tests/ut/python/dtype/test_dictionary.py @@ -152,7 +152,7 @@ def test_dict_set_item(): x = Tensor(np.ones([2, 2, 3], np.float32)) net = DictSetNet() - out = net(x) + _ = net(x) # if the dictionary item does not exist, create a new one @@ -168,4 +168,4 @@ def test_dict_set_item_create_new(): return my_dict x = Tensor(np.ones([2, 2, 3], np.float32)) net = DictSetNet() - out = net(x) + _ = net(x) diff --git a/tests/ut/python/dtype/test_hypermap.py b/tests/ut/python/dtype/test_hypermap.py index 2627114b075..da3721060a2 100644 --- a/tests/ut/python/dtype/test_hypermap.py +++ b/tests/ut/python/dtype/test_hypermap.py @@ -81,31 +81,3 @@ def test_hypermap_func_const(): net = NetMap() assert net() == (8, 12, 16) - - -""" -def test_hypermap_func_variable(): - class NetMap(Cell): - def __init__(self): - super(NetMap, self).__init__() - - def double(self, x): - return 2 * x - - def triple(self, x): - return 3 * x - - def square(self, x): - return x * x - - def construct(self, x): - _list = [self.double, self.triple, self.square] - return map(lambda f: f(x), _list) - - x = Tensor(np.ones([3, 2, 3], np.float32)) - net = NetMap() - - with pytest.raises(RuntimeError) as ex: - net(x) - assert "HyperMap don't support Closure with free variable yet" in str(ex.value) -""" diff --git a/tests/ut/python/dtype/test_list.py b/tests/ut/python/dtype/test_list.py index d5e316bed1b..f9ddb1a16c6 100644 --- a/tests/ut/python/dtype/test_list.py +++ b/tests/ut/python/dtype/test_list.py @@ -133,7 +133,7 @@ def test_list_append_2(): class ListOperate(nn.Cell): - def __init__(self, ): + def __init__(self,): super(ListOperate, self).__init__() def construct(self, t, l): @@ -153,7 +153,7 @@ class ListOperate(nn.Cell): class InListNet(nn.Cell): - def __init__(self, ): + def __init__(self,): super(InListNet, self).__init__() self.list_ = [1, 2, 3, 4, 5, "ok"] diff --git a/tests/ut/python/dtype/test_tuple.py b/tests/ut/python/dtype/test_tuple.py index 4e20bef25da..2f4ef7e5086 100644 --- a/tests/ut/python/dtype/test_tuple.py +++ b/tests/ut/python/dtype/test_tuple.py @@ -53,7 +53,7 @@ class NestTupleGraphNet(nn.Cell): class InTupleNet(nn.Cell): - def __init__(self, ): + def __init__(self,): super(InTupleNet, self).__init__() self.tuple_ = (1, 2, 3, 4, 5, "ok") diff --git a/tests/ut/python/exec/test_AssignAdd.py b/tests/ut/python/exec/test_AssignAdd.py index 502b7b92f1d..6fed26b7267 100644 --- a/tests/ut/python/exec/test_AssignAdd.py +++ b/tests/ut/python/exec/test_AssignAdd.py @@ -99,4 +99,4 @@ def test_assignadd_scalar_cast(): net = AssignAddNet() x = Tensor(np.ones([1]).astype(np.int64) * 102) # _executor.compile(net, 1) - result = net(x) + _ = net(x) diff --git a/tests/ut/python/ir/test_tensor.py b/tests/ut/python/ir/test_tensor.py index 9cc7208d3f5..3786505a50f 100644 --- a/tests/ut/python/ir/test_tensor.py +++ b/tests/ut/python/ir/test_tensor.py @@ -429,9 +429,9 @@ def test_tensor_dtype_np_int64(): def test_tensor_dtype_fp32_to_bool(): with pytest.raises(RuntimeError): - input = np.random.randn(2, 3, 4, 5).astype(np.float32) - input = ms.Tensor(input) - input_me = ms.Tensor(input, dtype=ms.bool_) + input_ = np.random.randn(2, 3, 4, 5).astype(np.float32) + input_ = ms.Tensor(input_) + _ = ms.Tensor(input_, dtype=ms.bool_) def test_tensor_operation(): diff --git a/tests/ut/python/keep_order/test_keep_order.py b/tests/ut/python/keep_order/test_keep_order.py index 25d88d378e6..1cf2b8e19ab 100644 --- a/tests/ut/python/keep_order/test_keep_order.py +++ b/tests/ut/python/keep_order/test_keep_order.py @@ -41,10 +41,10 @@ class Func(nn.Cell): def construct(self, x, y): init = self.alloc_status() - sum = add(x, y) + sum_ = add(x, y) product = mul1(x, y) flag = self.get_status(init) - out = add2(sum, product) + out = add2(sum_, product) clear = self.clear_status(flag) out = F.depend(out, clear) return out @@ -88,7 +88,7 @@ def test_sens(): sens = Tensor(np.ones([3, 3]).astype(np.float32)) net = Net() net.add_flags(has_effect=True) - out = net(x, y, sens) + _ = net(x, y, sens) class Net_hyper(nn.Cell): @@ -119,7 +119,7 @@ def test_hyper_add(): sens = Tensor(np.ones([3, 3]).astype(np.float32)) net = Net_hyper() net.add_flags(has_effect=True) - out = net(x, y, sens) + _ = net(x, y, sens) def test_keep_order_io_effect_exception_return_dtype(): diff --git a/tests/ut/python/model/test_mix_precision.py b/tests/ut/python/model/test_mix_precision.py index 9a57160c98a..30c6002be8b 100644 --- a/tests/ut/python/model/test_mix_precision.py +++ b/tests/ut/python/model/test_mix_precision.py @@ -148,9 +148,6 @@ def test_cast(): _executor.compile(net, x) -"""test grad of PReLU, which cause AddN(generated by grad) fail""" - - class IRBlockZ(nn.Cell): def __init__(self, inplanes, planes): super(IRBlockZ, self).__init__() diff --git a/tests/ut/python/nn/test_pooling.py b/tests/ut/python/nn/test_pooling.py index 0967fc3bfcb..38b0b55e556 100644 --- a/tests/ut/python/nn/test_pooling.py +++ b/tests/ut/python/nn/test_pooling.py @@ -46,6 +46,7 @@ class MaxNet(nn.Cell): kernel_size, stride=None, padding=0): + _ = padding super(MaxNet, self).__init__() self.maxpool = nn.MaxPool2d(kernel_size, stride) @@ -73,5 +74,5 @@ class Avg1dNet(nn.Cell): def test_avg1d(): net = Avg1dNet(6, 1) - input = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32)) - _executor.compile(net, input) + input_ = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32)) + _executor.compile(net, input_) diff --git a/tests/ut/python/nn/test_psnr.py b/tests/ut/python/nn/test_psnr.py index caed021c66a..b045516442a 100644 --- a/tests/ut/python/nn/test_psnr.py +++ b/tests/ut/python/nn/test_psnr.py @@ -52,19 +52,19 @@ def test_compile_psnr_grayscale(): def test_psnr_max_val_negative(): max_val = -1 with pytest.raises(ValueError): - net = PSNRNet(max_val) + _ = PSNRNet(max_val) def test_psnr_max_val_bool(): max_val = True with pytest.raises(TypeError): - net = PSNRNet(max_val) + _ = PSNRNet(max_val) def test_psnr_max_val_zero(): max_val = 0 with pytest.raises(ValueError): - net = PSNRNet(max_val) + _ = PSNRNet(max_val) def test_psnr_different_shape(): diff --git a/tests/ut/python/nn/test_ssim.py b/tests/ut/python/nn/test_ssim.py index 319cc991f9f..9fd98069556 100644 --- a/tests/ut/python/nn/test_ssim.py +++ b/tests/ut/python/nn/test_ssim.py @@ -51,59 +51,59 @@ def test_compile_grayscale(): def test_ssim_max_val_negative(): max_val = -1 with pytest.raises(ValueError): - net = SSIMNet(max_val) + _ = SSIMNet(max_val) def test_ssim_max_val_bool(): max_val = True with pytest.raises(TypeError): - net = SSIMNet(max_val) + _ = SSIMNet(max_val) def test_ssim_max_val_zero(): max_val = 0 with pytest.raises(ValueError): - net = SSIMNet(max_val) + _ = SSIMNet(max_val) def test_ssim_filter_size_float(): with pytest.raises(TypeError): - net = SSIMNet(filter_size=1.1) + _ = SSIMNet(filter_size=1.1) def test_ssim_filter_size_zero(): with pytest.raises(ValueError): - net = SSIMNet(filter_size=0) + _ = SSIMNet(filter_size=0) def test_ssim_filter_sigma_zero(): with pytest.raises(ValueError): - net = SSIMNet(filter_sigma=0.0) + _ = SSIMNet(filter_sigma=0.0) def test_ssim_filter_sigma_negative(): with pytest.raises(ValueError): - net = SSIMNet(filter_sigma=-0.1) + _ = SSIMNet(filter_sigma=-0.1) def test_ssim_k1_k2_wrong_value(): with pytest.raises(ValueError): - net = SSIMNet(k1=1.1) + _ = SSIMNet(k1=1.1) with pytest.raises(ValueError): - net = SSIMNet(k1=1.0) + _ = SSIMNet(k1=1.0) with pytest.raises(ValueError): - net = SSIMNet(k1=0.0) + _ = SSIMNet(k1=0.0) with pytest.raises(ValueError): - net = SSIMNet(k1=-1.0) + _ = SSIMNet(k1=-1.0) with pytest.raises(ValueError): - net = SSIMNet(k2=1.1) + _ = SSIMNet(k2=1.1) with pytest.raises(ValueError): - net = SSIMNet(k2=1.0) + _ = SSIMNet(k2=1.0) with pytest.raises(ValueError): - net = SSIMNet(k2=0.0) + _ = SSIMNet(k2=0.0) with pytest.raises(ValueError): - net = SSIMNet(k2=-1.0) + _ = SSIMNet(k2=-1.0) def test_ssim_different_shape(): diff --git a/tests/ut/python/onnx/test_onnx.py b/tests/ut/python/onnx/test_onnx.py index e7d04fcfc70..019ecfe9c20 100644 --- a/tests/ut/python/onnx/test_onnx.py +++ b/tests/ut/python/onnx/test_onnx.py @@ -64,13 +64,13 @@ class BatchNormTester(nn.Cell): def test_batchnorm_train_onnx_export(): "test onnx export interface does not modify trainable flag of a network" - input = Tensor(np.ones([1, 3, 32, 32]).astype(np.float32) * 0.01) + input_ = Tensor(np.ones([1, 3, 32, 32]).astype(np.float32) * 0.01) net = BatchNormTester(3) net.set_train() if not net.training: raise ValueError('netowrk is not in training mode') onnx_file = 'batch_norm.onnx' - export(net, input, file_name=onnx_file, file_format='ONNX') + export(net, input_, file_name=onnx_file, file_format='ONNX') if not net.training: raise ValueError('netowrk is not in training mode') @@ -172,6 +172,7 @@ net_cfgs = [ def get_id(cfg): + _ = cfg return list(map(lambda x: x[0], net_cfgs)) diff --git a/tests/ut/python/ops/test_math_ops_check.py b/tests/ut/python/ops/test_math_ops_check.py index 7ab9853965a..fd4cd1d3f32 100755 --- a/tests/ut/python/ops/test_math_ops_check.py +++ b/tests/ut/python/ops/test_math_ops_check.py @@ -28,7 +28,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ class AssignAddNet(nn.Cell): - def __init__(self,): + def __init__(self, ): super(AssignAddNet, self).__init__() self.op = P.AssignAdd() self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_add1") @@ -39,7 +39,7 @@ class AssignAddNet(nn.Cell): class AssignSubNet(nn.Cell): - def __init__(self,): + def __init__(self, ): super(AssignSubNet, self).__init__() self.op = P.AssignSub() self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_sub1") @@ -635,7 +635,7 @@ test_case_math_ops = [ 'skip': ['backward']}), # type of x and y not match ('Greater1', { - 'block': P.Greater(), + 'block': P.Greater(), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # type of x and y not match @@ -660,6 +660,7 @@ test_case_math_ops = [ 'skip': ['backward']}), ] + @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception) def test_check_exception(): return raise_set diff --git a/tests/ut/python/ops/test_ops_attr_infer.py b/tests/ut/python/ops/test_ops_attr_infer.py index 92bd23245f2..d4f6d8e2993 100644 --- a/tests/ut/python/ops/test_ops_attr_infer.py +++ b/tests/ut/python/ops/test_ops_attr_infer.py @@ -21,21 +21,25 @@ import mindspore.context as context from mindspore import Tensor from mindspore.ops import functional as F from mindspore.ops import prim_attr_register, PrimitiveWithInfer + context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + class FakeOp(PrimitiveWithInfer): @prim_attr_register def __init__(self): """""" + def infer_shape(self, x, y): self.second_shape = y self.add_prim_attr("second_shape", y) return x - + def infer_dtype(self, x, y): return x -# test the normal case that should generate independent primitive because of different + +# test the normal case that should generate independent primitive because of different # generated attributes after inference def test_conv2d_same_primitive(): class Conv2DSameNet(nn.Cell): @@ -43,15 +47,18 @@ def test_conv2d_same_primitive(): super(Conv2DSameNet, self).__init__() self.conv1 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True) self.conv2 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True) + def construct(self, x, y): r1 = self.conv1(x) r2 = self.conv2(y) return (r1, r2) + t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) net = Conv2DSameNet() net(t1, t2) + # test cell as high order argument # The graph with free variables used as argument is not supported yet # because of the limit of inference specialize system @@ -59,18 +66,22 @@ def Xtest_conv2d_op_with_arg(): class Conv2dNet(nn.Cell): def __init__(self): super(Conv2dNet, self).__init__() + def construct(self, op, x): return op(x) + class OpsNet(nn.Cell): def __init__(self, net): super(OpsNet, self).__init__() self.opnet = net self.conv2 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True) + def construct(self, x, y): conv_op = self.conv2 a = self.opnet(conv_op, x) b = self.opnet(conv_op, y) return (a, b) + t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) net = OpsNet(Conv2dNet()) @@ -82,23 +93,29 @@ def test_conv2d_op_with_arg(): def __init__(self): super(FackOpNet, self).__init__() self.op = FakeOp() + def construct(self, x, y): return self.op(x, y) + class OpNet(nn.Cell): def __init__(self): super(OpNet, self).__init__() + def construct(self, op, x, y): return op(x, y) + class OpsNet(nn.Cell): def __init__(self, net): super(OpsNet, self).__init__() self.opnet = net self.op = FackOpNet() + def construct(self, x, y): op = self.op a = self.opnet(op, x, y) b = self.opnet(op, y, x) return (a, b) + t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) net = OpsNet(OpNet()) @@ -110,63 +127,77 @@ def test_conv2d_op_with_arg_same_input(): def __init__(self): super(FackOpNet, self).__init__() self.op = FakeOp() + def construct(self, x, y): return self.op(x, y) + class OpNet(nn.Cell): def __init__(self): super(OpNet, self).__init__() + def construct(self, op, x, y): return op(x, y) + class OpsNet(nn.Cell): def __init__(self, net): super(OpsNet, self).__init__() self.opnet = net self.op = FackOpNet() + def construct(self, x, y): op = self.op a = self.opnet(op, x, x) b = self.opnet(op, y, x) return (a, b) + t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) net = OpsNet(OpNet()) net(t1, t2) + # test op with partial def test_op_as_partial(): class OpAsPartial(nn.Cell): def __init__(self): super(OpAsPartial, self).__init__() self.op = FakeOp() + def construct(self, x, y, z): partial_op = F.partial(self.op, x) a = partial_op(y) b = partial_op(z) return a, b + t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) net = OpAsPartial() net(t1, t2, t3) + # test op with partial def test_op_as_partial_inside(): class OpAsPartial(nn.Cell): def __init__(self): super(OpAsPartial, self).__init__() self.op = FakeOp() + def construct(self, x, y, z): partial_op = F.partial(self.op, x) a = partial_op(y) b = partial_op(z) return a, b + class OuterNet(nn.Cell): def __init__(self): super(OuterNet, self).__init__() self.net = OpAsPartial() + def construct(self, x, y, z): a, b = self.net(x, y, z) return a, b + t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) @@ -180,12 +211,14 @@ def test_op_as_partial_independent(): def __init__(self): super(OpAsPartial, self).__init__() self.op = FakeOp() + def construct(self, x, y, z): partial_op1 = F.partial(self.op, x) a = partial_op1(y) partial_op2 = F.partial(self.op, x) b = partial_op2(z) return a, b + t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) @@ -198,6 +231,7 @@ def test_nest_partial(): def __init__(self): super(NestPartial, self).__init__() self.op = FakeOp() + def construct(self, x, y, z): partial_op1 = F.partial(self.op) partial_op2 = F.partial(partial_op1, x) @@ -206,54 +240,65 @@ def test_nest_partial(): partial_op4 = F.partial(partial_op3, x) b = partial_op4(z) return a, b + t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) net = NestPartial() net(t1, t2, t3) - + + # high order argument # op and op args as network arguments def test_op_with_arg_as_input(): class WithOpArgNet(nn.Cell): def __init__(self): super(WithOpArgNet, self).__init__() + def construct(self, op, x, y): return op(x, y) + class OpsNet(nn.Cell): def __init__(self, net): super(OpsNet, self).__init__() self.opnet = net self.op = FakeOp() + def construct(self, x, y, z): op = self.op a = self.opnet(op, x, z) b = self.opnet(op, x, y) return (a, b) + t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) net = OpsNet(WithOpArgNet()) net(t1, t2, t3) + # The partial application used as argument is not supported yet # because of the limit of inference specialize system def Xtest_partial_as_arg(): class PartialArgNet(nn.Cell): def __init__(self): super(PartialArgNet, self).__init__() + def construct(self, partial_op, y): return partial_op(y) + class OpsNet(nn.Cell): def __init__(self, net): super(OpsNet, self).__init__() self.partial_net = net self.op = FakeOp() + def construct(self, x, y, z): partial_op = F.partial(self.op, x) a = self.partial_net(partial_op, z) b = self.partial_net(partial_op, y) return (a, b) + t1 = Tensor(np.ones([1, 16, 1, 1918]).astype(np.float32)) t2 = Tensor(np.ones([1, 16, 1, 3840]).astype(np.float32)) t3 = Tensor(np.ones([1, 16, 1, 1234]).astype(np.float32)) diff --git a/tests/ut/python/ops/test_ops_check.py b/tests/ut/python/ops/test_ops_check.py index 6dc4ac6b04b..c7bcb555e3a 100644 --- a/tests/ut/python/ops/test_ops_check.py +++ b/tests/ut/python/ops/test_ops_check.py @@ -32,6 +32,7 @@ from ....mindspore_test_framework.pipeline.forward.verify_exception \ logging.basicConfig(level=logging.WARNING) +# pylint: disable=abstract-method class NetMissConstruct(nn.Cell): """ NetMissConstruct definition """ @@ -46,7 +47,6 @@ class NetMissConstruct(nn.Cell): self.max_pool2d = nn.MaxPool2d(kernel_size=2) self.flatten = P.Flatten() - # pylint: disable=abstract-method # TestCase: Mis-spelled 'construct' to 'construtc' def construtc(self, x): x = self.max_pool2d(self.relu(self.conv1(x))) diff --git a/tests/ut/python/optimizer/test_debug_location.py b/tests/ut/python/optimizer/test_debug_location.py index 2848b4771be..f35a8e3fc3b 100644 --- a/tests/ut/python/optimizer/test_debug_location.py +++ b/tests/ut/python/optimizer/test_debug_location.py @@ -44,7 +44,7 @@ class MockNeg(PrimitiveWithInfer): def infer_dtype(self, input_x): raise TypeError("InferError") - return input_x + # return input_x class MockSub(PrimitiveWithInfer): @@ -79,8 +79,8 @@ class Net(nn.Cell): self.matmul = P.MatMul() self.add = P.TensorAdd() - def construct(self, input): - output = self.add(self.matmul(input, self.weight), self.bias) + def construct(self, input_): + output = self.add(self.matmul(input_, self.weight), self.bias) return output @@ -93,9 +93,9 @@ class NetFP16(nn.Cell): self.add = P.TensorAdd() self.cast = P.Cast() - def construct(self, input): + def construct(self, input_): output = self.cast( - self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)), + self.add(self.matmul(self.cast(input_, mstype.float16), self.cast(self.weight, mstype.float16)), self.cast(self.bias, mstype.float16)), mstype.float32) return output diff --git a/tests/ut/python/optimizer/test_optimize_with_loss_scale.py b/tests/ut/python/optimizer/test_optimize_with_loss_scale.py index 95e9da3230d..ea60f1f09b8 100644 --- a/tests/ut/python/optimizer/test_optimize_with_loss_scale.py +++ b/tests/ut/python/optimizer/test_optimize_with_loss_scale.py @@ -42,10 +42,10 @@ class MindDataSet(MindData): if self._size < self._iter_num: raise StopIteration self._iter_num += 1 - next = [] - for shape, type in zip(self._output_shapes, self._np_types): - next.append(Tensor(np.ones(shape).astype(type))) - return tuple(next) + lst = [] + for shape_, type_ in zip(self._output_shapes, self._np_types): + lst.append(Tensor(np.ones(shape_).astype(type_))) + return tuple(lst) class Net(nn.Cell): @@ -56,8 +56,8 @@ class Net(nn.Cell): self.matmul = P.MatMul() self.add = P.TensorAdd() - def construct(self, input): - output = self.add(self.matmul(input, self.weight), self.bias) + def construct(self, input_): + output = self.add(self.matmul(input_, self.weight), self.bias) return output @@ -70,9 +70,9 @@ class NetFP16(nn.Cell): self.add = P.TensorAdd() self.cast = P.Cast() - def construct(self, input): + def construct(self, input_): output = self.cast( - self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)), + self.add(self.matmul(self.cast(input_, mstype.float16), self.cast(self.weight, mstype.float16)), self.cast(self.bias, mstype.float16)), mstype.float32) return output diff --git a/tests/ut/python/parallel/test_alltoall.py b/tests/ut/python/parallel/test_alltoall.py index a03a83b0ba3..96ff8435046 100644 --- a/tests/ut/python/parallel/test_alltoall.py +++ b/tests/ut/python/parallel/test_alltoall.py @@ -97,8 +97,8 @@ def test_all_to_all(): print(strategys) expect_dict = {'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits' '/SoftmaxCrossEntropyWithLogits-op3': [[8, 1], [8, 1]], - 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits/OneHot-op4': [ - [8, 1], [], []], + 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits/' + 'OneHot-op4': [[8, 1], [], []], 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/Transpose-op1': [ [8, 1]], 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/MatMul-op0': [ diff --git a/tests/ut/python/parallel/test_two_matmul.py b/tests/ut/python/parallel/test_two_matmul.py index 6489cc90a8d..871e77f0044 100644 --- a/tests/ut/python/parallel/test_two_matmul.py +++ b/tests/ut/python/parallel/test_two_matmul.py @@ -170,4 +170,4 @@ def test_matmul_forward_reduce_scatter_transpose(): x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([64, 32]), dtype=ms.float32) b = Tensor(np.ones([128, 64]), dtype=ms.float32) - compile_net(net, x, y, b) \ No newline at end of file + compile_net(net, x, y, b) diff --git a/tests/ut/python/parameter_feature/test_parameter.py b/tests/ut/python/parameter_feature/test_parameter.py index b739b96781d..289fd35e818 100644 --- a/tests/ut/python/parameter_feature/test_parameter.py +++ b/tests/ut/python/parameter_feature/test_parameter.py @@ -280,4 +280,4 @@ def test_mixed_precision_const_parameter(): x = Tensor(np.ones((1, 3, 28, 28), np.float32)) y = Tensor(np.ones((1, 3, 14, 14), np.float32)) z = Tensor(np.ones((1, 3, 28, 28), np.float32)) - out = net(x, y, z) + _ = net(x, y, z) diff --git a/tests/ut/python/parameter_feature/test_var_grad.py b/tests/ut/python/parameter_feature/test_var_grad.py index 05b0289d257..7a332b1c3ba 100644 --- a/tests/ut/python/parameter_feature/test_var_grad.py +++ b/tests/ut/python/parameter_feature/test_var_grad.py @@ -39,7 +39,7 @@ def test_net_vargs_expand(): y = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) sens = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) net = AddNet() - out = C.grad_all_with_sens(net, net.trainable_params())(x, y, sens) + _ = C.grad_all_with_sens(net, net.trainable_params())(x, y, sens) class VarNet(Cell): @@ -111,7 +111,7 @@ def test_all_var_args_grad_with_sens(): sens = Tensor(1.0, dtype=mstype.float32) net = VarNet(SecondNet()) grad_net = GradNet(net) - out = grad_net(x, y, sens) + _ = grad_net(x, y, sens) def test_grad_list_var_args(): @@ -128,7 +128,7 @@ def test_grad_list_var_args(): y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) net = VarNet(SecondNet()) grad_net = GradNet(net) - out = grad_net(x, y) + _ = grad_net(x, y) def test_grad_all_var_args(): @@ -145,7 +145,7 @@ def test_grad_all_var_args(): y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) net = VarNet(SecondNet()) grad_net = GradNet(net) - out = grad_net(x, y) + _ = grad_net(x, y) def test_grad_all_var_args_with_sens(): @@ -163,7 +163,7 @@ def test_grad_all_var_args_with_sens(): sens = Tensor(1.0, dtype=mstype.float32) net = VarNet(SecondNet()) grad_net = GradNet(net) - out = grad_net(x, y, sens) + _ = grad_net(x, y, sens) def test_grad_var_args_with_sens(): @@ -181,7 +181,7 @@ def test_grad_var_args_with_sens(): sens = Tensor(1.0, dtype=mstype.float32) net = VarNet(SecondNet()) grad_net = GradNet(net) - out = grad_net(x, y, sens) + _ = grad_net(x, y, sens) def test_var_args_grad(): @@ -219,7 +219,7 @@ def test_var_args_grad(): sens = Tensor(1.0, dtype=mstype.float32) net = VarNet(SecondNet()) grad_net = GradNet(net) - out = grad_net(x, y, sens) + _ = grad_net(x, y, sens) def test_var_args_positional(): @@ -253,7 +253,7 @@ def test_var_args_positional(): y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) net = VarNet(SecondNet()) grad_net = GradNet(net) - out = grad_net(x, y) + _ = grad_net(x, y) def test_grad_within_if_else(): @@ -271,7 +271,7 @@ def test_grad_within_if_else(): x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) - sens = Tensor(1.0, dtype=mstype.float32) + _ = Tensor(1.0, dtype=mstype.float32) net = VarNet(SecondNet()) grad_net = GradNet(net) out = grad_net(x, y) @@ -316,10 +316,10 @@ def test_grad_for_concat(): net = Concat(axis=self.axis) grad_net = GradNet(net) grad_net.set_train() - input_grad = grad_net(*inputs, Tensor(self.out_grad_np)) + _ = grad_net(*inputs, Tensor(self.out_grad_np)) def grad_cmp(self): - input_grad_mindspore = self.grad_mindspore_impl() + self.grad_mindspore_impl() fact = ConcatFactory(input_shape=( (2, 184320, 1), (2, 46080, 1), (2, 11520, 1), (2, 2880, 1), (2, 720, 1)), axis=1) diff --git a/tests/ut/python/pipeline/parse/test_cont_break.py b/tests/ut/python/pipeline/parse/test_cont_break.py index 1098e278971..578df0c287d 100644 --- a/tests/ut/python/pipeline/parse/test_cont_break.py +++ b/tests/ut/python/pipeline/parse/test_cont_break.py @@ -84,7 +84,7 @@ class for_loop_with_cont_break(Cell): if i > 5: x *= 3 break - x *= 2 + # x *= 2 x = x * 2 pass return x diff --git a/tests/ut/python/pynative_mode/test_framstruct.py b/tests/ut/python/pynative_mode/test_framstruct.py index 17db2f5b5d4..0e27aee70a0 100644 --- a/tests/ut/python/pynative_mode/test_framstruct.py +++ b/tests/ut/python/pynative_mode/test_framstruct.py @@ -123,6 +123,7 @@ def sub(x, y): return x - y +# pylint: disable=using-constant-test @ms_function def if_always_true(x): """ if_always_true """ @@ -870,6 +871,7 @@ def test_grad_refactor_14(): assert C.grad_all(grad_refactor_14)(2, 3) == (3, 9) +# pylint: disable=using-constant-test class IfDeferInline(nn.Cell): def __init__(self, mul_size): super().__init__() diff --git a/tests/ut/python/pynative_mode/test_hook.py b/tests/ut/python/pynative_mode/test_hook.py index f25fe97cb25..6cfab67ce8c 100644 --- a/tests/ut/python/pynative_mode/test_hook.py +++ b/tests/ut/python/pynative_mode/test_hook.py @@ -1,12 +1,10 @@ import numpy as np import mindspore.nn as nn import mindspore.ops.operations as P -from mindspore import context from mindspore.ops import composite as C -from mindspore.common import dtype as mstype from mindspore import context, Tensor, ParameterTuple from mindspore.common.initializer import TruncatedNormal -from mindspore.nn import Dense, WithLossCell, SoftmaxCrossEntropyWithLogits, Momentum +from mindspore.nn import WithLossCell, Momentum context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") @@ -18,25 +16,28 @@ def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): kernel_size=kernel_size, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode="valid") + def fc_with_initialize(input_channels, out_channels): """weight initial for fc layer""" weight = weight_variable() bias = weight_variable() return nn.Dense(input_channels, out_channels, weight, bias) + def weight_variable(): """weight initial""" return TruncatedNormal(0.02) + def cell_hook_function(cell_id, grad_input, grad_output): print(cell_id) - assert(grad_output[0].asnumpy().shape == (32, 6, 14, 14)) - assert(grad_input[0].asnumpy().shape == (32, 16, 10, 10)) + assert (grad_output[0].asnumpy().shape == (32, 6, 14, 14)) + assert (grad_input[0].asnumpy().shape == (32, 16, 10, 10)) def var_hook_function(grad_out): print("grad:", grad_out) - assert(grad_out[0].asnumpy().shape == (32, 120)) + assert (grad_out[0].asnumpy().shape == (32, 120)) class LeNet5(nn.Cell): @@ -82,7 +83,7 @@ class LeNet5(nn.Cell): x = self.fc3(x) return x - + class GradWrap(nn.Cell): """ GradWrap definition """ def __init__(self, network): @@ -94,6 +95,7 @@ class GradWrap(nn.Cell): weights = self.weights return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) + def test_hook(): net = LeNet5() optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) @@ -101,7 +103,7 @@ def test_hook(): net_with_criterion = WithLossCell(net, criterion) train_network = GradWrap(net_with_criterion) train_network.set_train() - + input_data = Tensor(np.ones([net.batch_size, 1, 32, 32]).astype(np.float32) * 0.01) label = Tensor(np.ones([net.batch_size, net.num_class]).astype(np.float32)) output = net(Tensor(input_data)) @@ -111,8 +113,6 @@ def test_hook(): print(loss_output.asnumpy().shape) - - class MulAdd(nn.Cell): def __init__(self): super(MulAdd, self).__init__() @@ -121,12 +121,13 @@ class MulAdd(nn.Cell): return 2 * x + y def bprop(self, x, y, out, dout): - assert(x == 1) - assert(y == 2) - assert(out == 4) - assert(dout == 1) + assert (x == 1) + assert (y == 2) + assert (out == 4) + assert (dout == 1) return 3 * dout, 2 * y + def test_custom_bprop(): mul_add = MulAdd() mul_add.bprop_debug = True diff --git a/tests/ut/python/pynative_mode/test_stop_gradient.py b/tests/ut/python/pynative_mode/test_stop_gradient.py index 9690138fb95..a94f80adf09 100644 --- a/tests/ut/python/pynative_mode/test_stop_gradient.py +++ b/tests/ut/python/pynative_mode/test_stop_gradient.py @@ -18,10 +18,9 @@ import pytest import mindspore.common.dtype as mstype import mindspore.nn as nn -from mindspore import Parameter, ParameterTuple, Tensor +from mindspore import Parameter, ParameterTuple from mindspore import Tensor from mindspore import context -from mindspore import context from mindspore.common.api import ms_function from mindspore.ops import composite as C from mindspore.ops import operations as P diff --git a/tests/ut/python/train/quant/mobilenetv2.py b/tests/ut/python/train/quant/mobilenetv2.py index 38daaf314b3..e214953b979 100644 --- a/tests/ut/python/train/quant/mobilenetv2.py +++ b/tests/ut/python/train/quant/mobilenetv2.py @@ -60,6 +60,7 @@ class InvertedResidual(nn.Cell): class MobileNetV2(nn.Cell): def __init__(self, num_class=1000, input_size=224, width_mul=1.): super(MobileNetV2, self).__init__() + _ = input_size block = InvertedResidual input_channel = 32 last_channel = 1280 diff --git a/tests/ut/python/train/quant/mobilenetv2_combined.py b/tests/ut/python/train/quant/mobilenetv2_combined.py index 86c9847a637..5ae241c0f20 100644 --- a/tests/ut/python/train/quant/mobilenetv2_combined.py +++ b/tests/ut/python/train/quant/mobilenetv2_combined.py @@ -68,6 +68,7 @@ class InvertedResidual(nn.Cell): class MobileNetV2(nn.Cell): def __init__(self, num_class=1000, input_size=224, width_mul=1.): super(MobileNetV2, self).__init__() + _ = input_size block = InvertedResidual input_channel = 32 last_channel = 1280 diff --git a/tests/ut/python/train/quant/test_quant.py b/tests/ut/python/train/quant/test_quant.py index b11ec32ed8b..91698e01380 100644 --- a/tests/ut/python/train/quant/test_quant.py +++ b/tests/ut/python/train/quant/test_quant.py @@ -63,33 +63,3 @@ class LeNet5(nn.Cell): x = self.fc2(x) x = self.fc3(x) return x - -""" -def test_qat_lenet(): - net = LeNet5() - net = qat.convert_quant_network( - net, quant_delay=0, bn_fold=False, freeze_bn=10000, weight_bits=8, act_bits=8) - - -def test_qat_mobile(): - net = MobileNetV2() - img = Tensor(np.ones((1, 3, 224, 224)).astype(np.float32)) - net = qat.convert_quant_network( - net, quant_delay=0, bn_fold=False, freeze_bn=10000, weight_bits=8, act_bits=8) - net(img) - - -def test_qat_mobile_train(): - net = MobileNetV2(num_class=10) - img = Tensor(np.ones((1, 3, 224, 224)).astype(np.float32)) - label = Tensor(np.ones((1, 10)).astype(np.float32)) - net = qat.convert_quant_network( - net, quant_delay=0, bn_fold=False, freeze_bn=10000, weight_bits=8, act_bits=8) - - loss = nn.SoftmaxCrossEntropyWithLogits(reduction='mean') - optimizer = nn.Momentum(net.trainable_params(), - learning_rate=0.1, momentum=0.9) - net = nn.WithLossCell(net, loss) - net = nn.TrainOneStepCell(net, optimizer) - net(img, label) -""" \ No newline at end of file diff --git a/tests/ut/python/train/summary/summary_reader.py b/tests/ut/python/train/summary/summary_reader.py index 67653ff7a5a..647c25f25c5 100644 --- a/tests/ut/python/train/summary/summary_reader.py +++ b/tests/ut/python/train/summary/summary_reader.py @@ -13,9 +13,10 @@ # limitations under the License. # ============================================================================ """Summary reader.""" -import mindspore.train.summary_pb2 as summary_pb2 import struct +import mindspore.train.summary_pb2 as summary_pb2 + _HEADER_SIZE = 8 _HEADER_CRC_SIZE = 4 _DATA_CRC_SIZE = 4 diff --git a/tests/ut/python/train/test_amp.py b/tests/ut/python/train/test_amp.py index fe08809be1e..5325aad593a 100644 --- a/tests/ut/python/train/test_amp.py +++ b/tests/ut/python/train/test_amp.py @@ -25,6 +25,7 @@ from ....dataset_mock import MindData def setup_module(module): + _ = module context.set_context(mode=context.GRAPH_MODE) @@ -56,7 +57,7 @@ def test_amp_o0(): optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_network = amp.build_train_network(net, optimizer, level="O0") - output = train_network(inputs, label) + _ = train_network(inputs, label) def test_amp_o2(): @@ -66,7 +67,7 @@ def test_amp_o2(): optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_network = amp.build_train_network(net, optimizer, level="O2") - output = train_network(inputs, label) + _ = train_network(inputs, label) def test_amp_o2_loss(): @@ -76,7 +77,7 @@ def test_amp_o2_loss(): loss = nn.MSELoss() optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_network = amp.build_train_network(net, optimizer, loss, level="O2") - output = train_network(inputs, label) + _ = train_network(inputs, label) def test_amp_o0_loss(): @@ -86,7 +87,7 @@ def test_amp_o0_loss(): loss = nn.MSELoss() optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_network = amp.build_train_network(net, optimizer, loss) - output = train_network(inputs, label) + _ = train_network(inputs, label) class MindDataSet(MindData): @@ -100,10 +101,10 @@ class MindDataSet(MindData): if self._size < self._iter_num: raise StopIteration self._iter_num += 1 - next = [] - for shape, type in zip(self._output_shapes, self._np_types): - next.append(Tensor(np.ones(shape).astype(type))) - return tuple(next) + lst = [] + for shape_, type_ in zip(self._output_shapes, self._np_types): + lst.append(Tensor(np.ones(shape_).astype(type_))) + return tuple(lst) def test_compile_model_train_O0(): diff --git a/tests/ut/python/train/test_training.py b/tests/ut/python/train/test_training.py index 5d7c9004302..92625e54f97 100644 --- a/tests/ut/python/train/test_training.py +++ b/tests/ut/python/train/test_training.py @@ -151,7 +151,7 @@ def test_eval(): with pytest.raises(ValueError): model2.eval(dataset) - net3 = LossNet() + _ = LossNet() model3 = Model(net2, eval_network=net2, metrics={"loss"}) with pytest.raises(ValueError): model3.eval(dataset) diff --git a/tests/ut/python/utils/test_callback.py b/tests/ut/python/utils/test_callback.py index e9f8c4e7089..26ad1a22772 100644 --- a/tests/ut/python/utils/test_callback.py +++ b/tests/ut/python/utils/test_callback.py @@ -15,6 +15,7 @@ """test callback function.""" import os import stat + import numpy as np import pytest @@ -283,14 +284,14 @@ def test_build_callbacks(): callbacks = [ck_obj, loss_cb_1, 'Error', None] with pytest.raises(TypeError): - callback_list = _build_callbacks(callbacks) + _ = _build_callbacks(callbacks) def test_RunContext(): """Test RunContext.""" context_err = 666 with pytest.raises(TypeError): - context = RunContext(context_err) + _ = RunContext(context_err) cb_params = _InternalCallbackParam() cb_params.member1 = 1 diff --git a/tests/vm_impl/nn_ops_vm_impl.py b/tests/vm_impl/nn_ops_vm_impl.py index 2468314a958..f22a5c93b30 100644 --- a/tests/vm_impl/nn_ops_vm_impl.py +++ b/tests/vm_impl/nn_ops_vm_impl.py @@ -223,6 +223,7 @@ def vm_impl_avg_pool_grad(self): return vm_impl +# pylint: disable=function-redefined @vm_impl_getters.register(G.FusedBatchNormGrad) def vm_impl_fused_batch_norm_grad(self): """Generate vm_impl function for FusedBatchNormGrad""" @@ -239,6 +240,7 @@ def vm_impl_fused_batch_norm_grad(self): return vm_impl +# pylint: disable=function-redefined @vm_impl_getters.register(G.BatchNormGrad) def vm_impl_fused_batch_norm_grad(self): """Generate vm_impl function for BatchNormGrad"""