diff --git a/tests/perf_test/bert/test_bert_train.py b/tests/perf_test/bert/test_bert_train.py index f5d6f0e32f7..39803d68600 100644 --- a/tests/perf_test/bert/test_bert_train.py +++ b/tests/perf_test/bert/test_bert_train.py @@ -18,13 +18,13 @@ # pylint: disable=missing-docstring, arguments-differ, W0612 import os + import mindspore.common.dtype as mstype import mindspore.context as context from mindspore import Tensor -from mindspore.nn.optim import AdamWeightDecayDynamicLR from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertNetworkWithLoss, BertTrainOneStepCell, \ BertTrainOneStepWithLossScaleCell -from mindspore.nn.wrap.loss_scale import FixedLossScaleUpdateCell +from mindspore.nn.optim import AdamWeightDecayDynamicLR from mindspore.train.loss_scale_manager import DynamicLossScaleManager from ...dataset_mock import MindData from ...ops_common import nn, np, batch_tuple_tensor, build_construct_graph diff --git a/tests/perf_test/mindrecord/imagenet/imagenet_to_mindrecord.py b/tests/perf_test/mindrecord/imagenet/imagenet_to_mindrecord.py index cc3b6d78b06..f9f186ec834 100644 --- a/tests/perf_test/mindrecord/imagenet/imagenet_to_mindrecord.py +++ b/tests/perf_test/mindrecord/imagenet/imagenet_to_mindrecord.py @@ -13,7 +13,6 @@ # limitations under the License. # ============================================================================ """use ImageNetToMR tool generate mindrecord""" -import os from mindspore.mindrecord import ImageNetToMR IMAGENET_MAP_FILE = "../../../ut/data/mindrecord/testImageNetDataWhole/labels_map.txt" @@ -21,6 +20,7 @@ IMAGENET_IMAGE_DIR = "../../../ut/data/mindrecord/testImageNetDataWhole/images" MINDRECORD_FILE = "./imagenet.mindrecord" PARTITION_NUMBER = 16 + def imagenet_to_mindrecord(): imagenet_transformer = ImageNetToMR(IMAGENET_MAP_FILE, IMAGENET_IMAGE_DIR, @@ -28,5 +28,6 @@ def imagenet_to_mindrecord(): PARTITION_NUMBER) imagenet_transformer.transform() + if __name__ == '__main__': imagenet_to_mindrecord() diff --git a/tests/perf_test/mindrecord/imagenet/imagenet_to_tfrecord.py b/tests/perf_test/mindrecord/imagenet/imagenet_to_tfrecord.py index 86d18a7d941..024ed885d1f 100644 --- a/tests/perf_test/mindrecord/imagenet/imagenet_to_tfrecord.py +++ b/tests/perf_test/mindrecord/imagenet/imagenet_to_tfrecord.py @@ -15,6 +15,7 @@ """generate tfrecord""" import collections import os + import tensorflow as tf IMAGENET_MAP_FILE = "../../../ut/data/mindrecord/testImageNetDataWhole/labels_map.txt" @@ -22,6 +23,7 @@ IMAGENET_IMAGE_DIR = "../../../ut/data/mindrecord/testImageNetDataWhole/images" TFRECORD_FILE = "./imagenet.tfrecord" PARTITION_NUMBER = 16 + def get_imagenet_filename_label_pic(map_file, image_dir): """ Get data from imagenet. @@ -69,18 +71,22 @@ def get_imagenet_filename_label_pic(map_file, image_dir): continue yield str(file_name), int(label), image_bytes + def create_int_feature(values): feature = tf.train.Feature(int64_list=tf.train.Int64List(value=[values])) return feature + def create_string_feature(values): feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[bytes(values, encoding='utf-8')])) return feature + def create_bytes_feature(values): feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) return feature + def imagenet_to_tfrecord(): writers = [] for i in range(PARTITION_NUMBER): @@ -109,5 +115,6 @@ def imagenet_to_tfrecord(): print("Write {} total examples".format(total_written)) + if __name__ == '__main__': imagenet_to_tfrecord() diff --git a/tests/perf_test/mindrecord/imagenet/perf_read_imagenet.py b/tests/perf_test/mindrecord/imagenet/perf_read_imagenet.py index fa5baef266d..f4ce3a904fa 100644 --- a/tests/perf_test/mindrecord/imagenet/perf_read_imagenet.py +++ b/tests/perf_test/mindrecord/imagenet/perf_read_imagenet.py @@ -14,17 +14,20 @@ # ============================================================================ """test dataset performance about mindspore.MindDataset, mindspore.TFRecordDataset, tf.data.TFRecordDataset""" import time -import mindspore.dataset as ds -from mindspore.mindrecord import FileReader import tensorflow as tf +import mindspore.dataset as ds +from mindspore.mindrecord import FileReader + print_step = 5000 + def print_log(count): if count % print_step == 0: print("Read {} rows ...".format(count)) + def use_filereader(mindrecord): start = time.time() columns_list = ["data", "label"] @@ -38,6 +41,7 @@ def use_filereader(mindrecord): end = time.time() print("Read by FileReader - total rows: {}, cost time: {}s".format(num_iter, end - start)) + def use_minddataset(mindrecord): start = time.time() columns_list = ["data", "label"] @@ -51,6 +55,7 @@ def use_minddataset(mindrecord): end = time.time() print("Read by MindDataset - total rows: {}, cost time: {}s".format(num_iter, end - start)) + def use_tfrecorddataset(tfrecord): start = time.time() columns_list = ["data", "label"] @@ -66,8 +71,10 @@ def use_tfrecorddataset(tfrecord): end = time.time() print("Read by TFRecordDataset - total rows: {}, cost time: {}s".format(num_iter, end - start)) + def use_tensorflow_tfrecorddataset(tfrecord): start = time.time() + def _parse_record(example_photo): features = { 'file_name': tf.io.FixedLenFeature([], tf.string), @@ -87,6 +94,7 @@ def use_tensorflow_tfrecorddataset(tfrecord): end = time.time() print("Read by TensorFlow TFRecordDataset - total rows: {}, cost time: {}s".format(num_iter, end - start)) + if __name__ == '__main__': # use MindDataset mindrecord = './imagenet.mindrecord00' diff --git a/tests/perf_test/test_lenet.py b/tests/perf_test/test_lenet.py index d071a653306..ef526e1fc25 100644 --- a/tests/perf_test/test_lenet.py +++ b/tests/perf_test/test_lenet.py @@ -18,15 +18,14 @@ import numpy as np import mindspore.nn as nn -from mindspore.common.api import _executor -from mindspore import Tensor -from mindspore.model_zoo.lenet import LeNet -from mindspore import context import mindspore.ops.composite as C +from mindspore import Tensor +from mindspore import context +from mindspore.common.api import _executor +from mindspore.model_zoo.lenet import LeNet context.set_context(mode=context.GRAPH_MODE) - batch_size = 1 channel = 1 height = 32 @@ -36,6 +35,7 @@ num_class = 10 class LeNetGrad(nn.Cell): """Backward of LeNet""" + def __init__(self, network): super(LeNetGrad, self).__init__() self.grad_op = C.grad_all_with_sens diff --git a/tests/perf_test/test_resnet_infer.py b/tests/perf_test/test_resnet_infer.py index 65fdad6e0d2..a12160f49ad 100644 --- a/tests/perf_test/test_resnet_infer.py +++ b/tests/perf_test/test_resnet_infer.py @@ -17,10 +17,11 @@ import numpy as np -from mindspore.common.api import _executor from mindspore import Tensor +from mindspore.common.api import _executor from .resnet_example import resnet50 + def test_compile(): net = resnet50() inp = Tensor(np.ones([1, 3, 224, 224]).astype(np.float32)) diff --git a/tests/perf_test/test_resnet_pynative.py b/tests/perf_test/test_resnet_pynative.py index 1b7262c1fb3..156977ee325 100644 --- a/tests/perf_test/test_resnet_pynative.py +++ b/tests/perf_test/test_resnet_pynative.py @@ -20,9 +20,9 @@ import numpy as np from mindspore import Tensor -from ..train_step_wrap import train_step_without_opt from .resnet_example import resnet50 -from ..vm_impl import * +from ..train_step_wrap import train_step_without_opt + def test_resnet50_pynative(): net = train_step_without_opt(resnet50()) diff --git a/tests/perf_test/test_resnet_train.py b/tests/perf_test/test_resnet_train.py index e500d8f31c2..7524c9bf43f 100644 --- a/tests/perf_test/test_resnet_train.py +++ b/tests/perf_test/test_resnet_train.py @@ -17,13 +17,15 @@ import numpy as np -from mindspore.common.api import _executor import mindspore.context as context from mindspore import Tensor -from ..train_step_wrap import train_step_with_loss_warp +from mindspore.common.api import _executor from .resnet_example import resnet50 +from ..train_step_wrap import train_step_with_loss_warp + context.set_context(mode=context.GRAPH_MODE) + def test_train_step(): net = train_step_with_loss_warp(resnet50()) net.set_train() diff --git a/tests/train_step_wrap.py b/tests/train_step_wrap.py index d48e25b8371..842b924198a 100644 --- a/tests/train_step_wrap.py +++ b/tests/train_step_wrap.py @@ -16,15 +16,15 @@ train step wrap """ import mindspore.nn as nn -from mindspore.ops import functional as F +from mindspore import ParameterTuple from mindspore.ops import composite as C -from mindspore.ops import operations as P -from mindspore import Parameter, ParameterTuple + class TrainStepWrap(nn.Cell): """ TrainStepWrap definition """ + def __init__(self, network): super(TrainStepWrap, self).__init__() self.network = network @@ -39,10 +39,12 @@ class TrainStepWrap(nn.Cell): grads = self.grad(self.network, weights)(x, label) return self.optimizer(grads) + class NetWithLossClass(nn.Cell): """ NetWithLossClass definition """ + def __init__(self, network): super(NetWithLossClass, self).__init__(auto_prefix=False) self.loss = nn.SoftmaxCrossEntropyWithLogits() @@ -61,6 +63,7 @@ class TrainStepWrap2(nn.Cell): """ TrainStepWrap2 definition """ + def __init__(self, network, sens): super(TrainStepWrap2, self).__init__() self.network = network @@ -76,13 +79,16 @@ class TrainStepWrap2(nn.Cell): grads = self.grad(self.network, weights)(x, self.sens) return self.optimizer(grads) + def train_step_with_sens(network, sens): return TrainStepWrap2(network, sens) + class TrainStepWrapWithoutOpt(nn.Cell): """ TrainStepWrapWithoutOpt definition """ + def __init__(self, network): super(TrainStepWrapWithoutOpt, self).__init__() self.network = network @@ -93,5 +99,6 @@ class TrainStepWrapWithoutOpt(nn.Cell): grads = self.grad(self.network, self.weights)(x, label) return grads + def train_step_without_opt(network): return TrainStepWrapWithoutOpt(NetWithLossClass(network)) diff --git a/tests/ut/python/dtype/test_dictionary.py b/tests/ut/python/dtype/test_dictionary.py index a6638aa0774..4535fb82d02 100644 --- a/tests/ut/python/dtype/test_dictionary.py +++ b/tests/ut/python/dtype/test_dictionary.py @@ -28,6 +28,7 @@ context.set_context(mode=context.GRAPH_MODE) def Xtest_arg_dict(): class DictNet(Cell): """DictNet definition""" + def __init__(self): super(DictNet, self).__init__() self.max = P.Maximum() @@ -48,6 +49,7 @@ def Xtest_arg_dict(): def test_const_dict(): class DictNet(Cell): """DictNet1 definition""" + def __init__(self): super(DictNet, self).__init__() self.max = P.Maximum() @@ -58,6 +60,7 @@ def test_const_dict(): a = self.max(self.dictionary["x"], self.dictionary["y"]) b = self.min(self.dictionary["x"], self.dictionary["y"]) return a + b + net = DictNet() net() @@ -65,6 +68,7 @@ def test_const_dict(): def test_dict_set_or_get_item(): class DictNet(Cell): """DictNet1 definition""" + def __init__(self): super(DictNet, self).__init__() self.dict_ = {"x": 1, "y": 2} @@ -91,6 +95,7 @@ def test_dict_set_or_get_item(): def test_dict_set_or_get_item_2(): class DictNet(Cell): """DictNet1 definition""" + def __init__(self): super(DictNet, self).__init__() @@ -117,6 +122,7 @@ def test_dict_set_or_get_item_2(): def test_dict_set_or_get_item_3(): class DictNet(Cell): """DictNet1 definition""" + def __init__(self): super(DictNet, self).__init__() self.dict_ = {"x": Tensor(np.ones([2, 2, 3], np.float32)), "y": 1} @@ -130,5 +136,3 @@ def test_dict_set_or_get_item_3(): net = DictNet() assert net() == Tensor(np.ones([4, 2, 3], np.float32)) - - diff --git a/tests/ut/python/dtype/test_hypermap.py b/tests/ut/python/dtype/test_hypermap.py index 7bbccb0b22b..2627114b075 100644 --- a/tests/ut/python/dtype/test_hypermap.py +++ b/tests/ut/python/dtype/test_hypermap.py @@ -13,7 +13,6 @@ # limitations under the License. # ============================================================================ import numpy as np -import pytest from mindspore import Tensor, context from mindspore.nn import Cell diff --git a/tests/ut/python/exec/__init__.py b/tests/ut/python/exec/__init__.py index 5443c0ca48e..9f7610e25c4 100644 --- a/tests/ut/python/exec/__init__.py +++ b/tests/ut/python/exec/__init__.py @@ -15,6 +15,7 @@ """setup for pytest""" import mindspore.context as context + # pylint: disable=unused-argument def setup_module(module): context.set_context(mode=context.GRAPH_MODE) diff --git a/tests/ut/python/exec/resnet_example.py b/tests/ut/python/exec/resnet_example.py index 913e90a0bb5..816849f4e62 100644 --- a/tests/ut/python/exec/resnet_example.py +++ b/tests/ut/python/exec/resnet_example.py @@ -16,6 +16,7 @@ resnet50 example """ import numpy as np + import mindspore.nn as nn from mindspore import Tensor from mindspore.ops import operations as P diff --git a/tests/ut/python/exec/test_AssignAdd.py b/tests/ut/python/exec/test_AssignAdd.py index 6b7fc78802d..a7244c5639c 100644 --- a/tests/ut/python/exec/test_AssignAdd.py +++ b/tests/ut/python/exec/test_AssignAdd.py @@ -16,19 +16,21 @@ test assign add """ import numpy as np -import mindspore.nn as nn -from mindspore.ops import operations as P -from mindspore.common.initializer import initializer -from mindspore import Tensor, Parameter + import mindspore as ms -from ..ut_filter import non_graph_engine -from mindspore.common.api import _executor import mindspore.context as context -import pytest +import mindspore.nn as nn +from mindspore import Tensor, Parameter +from mindspore.common.initializer import initializer +from mindspore.ops import operations as P +from ..ut_filter import non_graph_engine + context.set_context(mode=context.GRAPH_MODE) + class Net(nn.Cell): """Net definition""" + def __init__(self): super(Net, self).__init__() self.AssignAdd = P.AssignAdd() @@ -39,18 +41,19 @@ class Net(nn.Cell): out = self.AssignAdd(self.inputdata, x) return out + @non_graph_engine def test_AssignAdd_1(): """test AssignAdd 1""" import mindspore.context as context context.set_context(mode=context.GRAPH_MODE) net = Net() - x = Tensor(np.ones([1]).astype(np.int64)*100) + x = Tensor(np.ones([1]).astype(np.int64) * 100) print("MyPrintResult dataX:", x) result = net(x) print("MyPrintResult data::", result) - expect = np.ones([1]).astype(np.int64)*101 + expect = np.ones([1]).astype(np.int64) * 101 diff = result.asnumpy() - expect print("MyPrintExpect:", expect) @@ -58,18 +61,19 @@ def test_AssignAdd_1(): error = np.ones(shape=[1]) * 1.0e-3 assert np.all(diff < error) + @non_graph_engine def test_AssignAdd_2(): """test AssignAdd 2""" import mindspore.context as context context.set_context(mode=context.GRAPH_MODE) net = Net() - x = Tensor(np.ones([1]).astype(np.int64)*102) + x = Tensor(np.ones([1]).astype(np.int64) * 102) print("MyPrintResult dataX:", x) result = net(x) print("MyPrintResult data::", result.asnumpy()) - expect = np.ones([1]).astype(np.int64)*103 + expect = np.ones([1]).astype(np.int64) * 103 diff = result.asnumpy() - expect print("MyPrintExpect:", expect) @@ -77,8 +81,10 @@ def test_AssignAdd_2(): error = np.ones(shape=[1]) * 1.0e-3 assert np.all(diff < error) + class AssignAddNet(nn.Cell): """Net definition""" + def __init__(self): super(AssignAddNet, self).__init__() self.AssignAdd = P.AssignAdd() @@ -89,9 +95,10 @@ class AssignAddNet(nn.Cell): z1 = self.AssignAdd(self.inputdata, self.one) return z1 + @non_graph_engine def test_assignadd_scalar_cast(): net = AssignAddNet() - x = Tensor(np.ones([1]).astype(np.int64)*102) - #_executor.compile(net, 1) + x = Tensor(np.ones([1]).astype(np.int64) * 102) + # _executor.compile(net, 1) result = net(x) diff --git a/tests/ut/python/exec/test_activation.py b/tests/ut/python/exec/test_activation.py index 6f41bb986a1..601b058e8c0 100644 --- a/tests/ut/python/exec/test_activation.py +++ b/tests/ut/python/exec/test_activation.py @@ -14,6 +14,7 @@ # ============================================================================ """ test Activations """ import numpy as np + import mindspore.nn as nn from mindspore import Tensor from ..ut_filter import non_graph_engine diff --git a/tests/ut/python/exec/test_assign_sub.py b/tests/ut/python/exec/test_assign_sub.py index 5ff0e9e9e06..ee4d9ee38bd 100644 --- a/tests/ut/python/exec/test_assign_sub.py +++ b/tests/ut/python/exec/test_assign_sub.py @@ -16,15 +16,17 @@ test assign sub """ import numpy as np + +import mindspore.context as context import mindspore.nn as nn import mindspore.ops.operations as P from mindspore import Tensor -import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter context.set_context(mode=context.GRAPH_MODE) + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() diff --git a/tests/ut/python/exec/test_batchnorm.py b/tests/ut/python/exec/test_batchnorm.py index 7524bb8ee21..682f57a2880 100644 --- a/tests/ut/python/exec/test_batchnorm.py +++ b/tests/ut/python/exec/test_batchnorm.py @@ -14,6 +14,7 @@ # ============================================================================ """ut for batchnorm layer""" import numpy as np + import mindspore.nn as nn from mindspore import Tensor from ..ut_filter import non_graph_engine diff --git a/tests/ut/python/exec/test_bias_add.py b/tests/ut/python/exec/test_bias_add.py index 75a15f93bfb..a80be608fd0 100644 --- a/tests/ut/python/exec/test_bias_add.py +++ b/tests/ut/python/exec/test_bias_add.py @@ -14,14 +14,17 @@ # ============================================================================ """ test BiasAdd """ import numpy as np + import mindspore.nn as nn -from mindspore.ops import operations as P -from mindspore.common.initializer import initializer from mindspore import Tensor, Parameter +from mindspore.common.initializer import initializer +from mindspore.ops import operations as P from ..ut_filter import non_graph_engine + class Net(nn.Cell): """Net definition""" + def __init__(self, output_channels, bias_init='zeros', diff --git a/tests/ut/python/exec/test_conv.py b/tests/ut/python/exec/test_conv.py index bbddcd99bee..6109f4e2fde 100644 --- a/tests/ut/python/exec/test_conv.py +++ b/tests/ut/python/exec/test_conv.py @@ -14,6 +14,7 @@ # ============================================================================ """test conv""" import numpy as np + import mindspore.nn as nn from mindspore import Tensor from ..ut_filter import non_graph_engine @@ -25,6 +26,7 @@ out_channels = 64 class Net(nn.Cell): """Net definition""" + def __init__(self, cin, cout, @@ -70,6 +72,7 @@ def test_compile2(): output = net(input_data) print(output.asnumpy()) + @non_graph_engine def test_compile3(): net = Net(3, 1, (3, 3), weight_init='ONES') diff --git a/tests/ut/python/exec/test_dense.py b/tests/ut/python/exec/test_dense.py index c9c09c50cfe..dae010fe6e3 100644 --- a/tests/ut/python/exec/test_dense.py +++ b/tests/ut/python/exec/test_dense.py @@ -14,12 +14,15 @@ # ============================================================================ """ test Dense """ import numpy as np + import mindspore.nn as nn from mindspore import Tensor from ..ut_filter import non_graph_engine + class Net(nn.Cell): """Net definition""" + def __init__(self, input_channels, output_channels, diff --git a/tests/ut/python/exec/test_eval.py b/tests/ut/python/exec/test_eval.py index 0edf9cb7489..6d6e0ba20fc 100644 --- a/tests/ut/python/exec/test_eval.py +++ b/tests/ut/python/exec/test_eval.py @@ -14,11 +14,12 @@ # ============================================================================ """test eval""" import numpy as np + import mindspore as ms import mindspore.nn as nn -from mindspore.common.api import _executor from mindspore import Tensor from mindspore import context +from mindspore.common.api import _executor from ..ut_filter import non_graph_engine diff --git a/tests/ut/python/exec/test_flatten.py b/tests/ut/python/exec/test_flatten.py index dfb3e181e77..d1c16482c2c 100644 --- a/tests/ut/python/exec/test_flatten.py +++ b/tests/ut/python/exec/test_flatten.py @@ -16,8 +16,8 @@ import numpy as np import mindspore.nn as nn -from mindspore.ops import operations as P from mindspore import Tensor +from mindspore.ops import operations as P from ..ut_filter import non_graph_engine diff --git a/tests/ut/python/exec/test_pooling.py b/tests/ut/python/exec/test_pooling.py index 0e526ff8d68..9204f27c8aa 100644 --- a/tests/ut/python/exec/test_pooling.py +++ b/tests/ut/python/exec/test_pooling.py @@ -15,12 +15,12 @@ """ test pooling api """ -import numpy as np import mindspore.nn as nn -from mindspore import Tensor + class MaxNet(nn.Cell): """MaxNet definition""" + def __init__(self, kernel_size, stride=None): diff --git a/tests/ut/python/exec/test_softmax.py b/tests/ut/python/exec/test_softmax.py index b3144723e38..e6f1c61d0bc 100644 --- a/tests/ut/python/exec/test_softmax.py +++ b/tests/ut/python/exec/test_softmax.py @@ -16,9 +16,11 @@ test softmax api """ import numpy as np + import mindspore.nn as nn from mindspore import Tensor + class Net(nn.Cell): def __init__(self, dim): super(Net, self).__init__() diff --git a/tests/ut/python/exec/test_tensor_add.py b/tests/ut/python/exec/test_tensor_add.py index 14cebd8c8f4..85ea68f6e7a 100644 --- a/tests/ut/python/exec/test_tensor_add.py +++ b/tests/ut/python/exec/test_tensor_add.py @@ -14,10 +14,12 @@ # ============================================================================ """ test TensorAdd """ import numpy as np + import mindspore.nn as nn from mindspore import Tensor from mindspore.ops import operations as P + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() diff --git a/tests/ut/python/exec/test_train.py b/tests/ut/python/exec/test_train.py index 549fe372c60..9f0e2671976 100644 --- a/tests/ut/python/exec/test_train.py +++ b/tests/ut/python/exec/test_train.py @@ -14,29 +14,33 @@ # ============================================================================ """ test model train """ import numpy as np + import mindspore.nn as nn -from mindspore.ops import operations as P -from mindspore.common.initializer import initializer from mindspore import Tensor, Parameter, Model +from mindspore.common.initializer import initializer from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim import Momentum +from mindspore.ops import operations as P + # fn is a funcation use i as input def lr_gen(fn, epoch_size): for i in range(epoch_size): yield fn(i) + def me_train_tensor(net, input_np, label_np, epoch_size=2): """me_train_tensor""" loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") - opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr_gen(lambda i: 0.1, epoch_size), 0.9, 0.01, 1024) + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr_gen(lambda i: 0.1, epoch_size), 0.9, + 0.01, 1024) Model(net, loss, opt) _network = nn.WithLossCell(net, loss) _train_net = nn.TrainOneStepCell(_network, opt) _train_net.set_train() label_np = np.argmax(label_np, axis=-1).astype(np.int32) for epoch in range(0, epoch_size): - print(f"epoch %d"%(epoch)) + print(f"epoch %d" % (epoch)) _train_net(Tensor(input_np), Tensor(label_np)) @@ -52,6 +56,7 @@ def test_bias_add(test_with_simu): class Net(nn.Cell): """Net definition""" + def __init__(self, output_channels, bias_init='zeros', @@ -87,6 +92,7 @@ def test_conv(test_with_simu): class Net(nn.Cell): "Net definition""" + def __init__(self, cin, cout, @@ -116,6 +122,7 @@ def test_net(): class Net(nn.Cell): """Net definition""" + def __init__(self): super(Net, self).__init__() Tensor(np.ones([64, 3, 7, 7]).astype(np.float32) * 0.01) @@ -141,6 +148,7 @@ def test_net(): label_np = np.ones([32, 12]).astype(np.int32) me_train_tensor(net, input_np, label_np) + def test_bn(): """test_bn""" import mindspore.context as context @@ -151,6 +159,7 @@ def test_bn(): class Net(nn.Cell): """Net definition""" + def __init__(self, cin, cout): super(Net, self).__init__() self.bn = nn.BatchNorm2d(cin) diff --git a/tests/ut/python/exec/test_train_with_lars.py b/tests/ut/python/exec/test_train_with_lars.py index d4ca2ed8c3a..4d3621b3b28 100644 --- a/tests/ut/python/exec/test_train_with_lars.py +++ b/tests/ut/python/exec/test_train_with_lars.py @@ -23,6 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import functional as F from mindspore.ops import operations as P + def get_reordered_parameters(parameters): """get_reordered_parameters""" # put the bias parameter to the end @@ -36,12 +37,15 @@ def get_reordered_parameters(parameters): reordered_params = tuple(non_bias_param + bias_param) return len(non_bias_param), len(reordered_params), reordered_params + def get_net_trainable_reordered_params(net): params = net.trainable_params() return get_reordered_parameters(params) + class TrainOneStepWithLarsCell(nn.Cell): """TrainOneStepWithLarsCell definition""" + def __init__(self, network, optimizer, sens=1.0): super(TrainOneStepWithLarsCell, self).__init__(auto_prefix=False) self.network = network @@ -66,11 +70,13 @@ class TrainOneStepWithLarsCell(nn.Cell): new_grads = lars_grads + bias_grads return F.depend(loss, self.optimizer(new_grads)) + # fn is a funcation use i as input def lr_gen(fn, epoch_size): for i in range(epoch_size): yield fn(i) + def me_train_tensor(net, input_np, label_np, epoch_size=2): """me_train_tensor""" loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) diff --git a/tests/ut/python/ir/test_dtype.py b/tests/ut/python/ir/test_dtype.py index 31a44585409..ef1f660207f 100644 --- a/tests/ut/python/ir/test_dtype.py +++ b/tests/ut/python/ir/test_dtype.py @@ -14,12 +14,14 @@ # ============================================================================ """test_dtype""" from dataclasses import dataclass + import numpy as np import pytest import mindspore as ms from mindspore.common import dtype + def test_dtype_to_nptype(): """test_dtype2nptype""" assert ms.dtype_to_nptype(ms.bool_) == np.bool_ @@ -59,6 +61,7 @@ def test_dtype_to_pytype(): @dataclass class Foo: x: int + def inf(self): return self.x diff --git a/tests/ut/python/ir/test_tensor.py b/tests/ut/python/ir/test_tensor.py index b7bf1bebf5e..65922f0159f 100644 --- a/tests/ut/python/ir/test_tensor.py +++ b/tests/ut/python/ir/test_tensor.py @@ -25,25 +25,27 @@ import mindspore as ms import mindspore.common.api as me import mindspore.nn as nn from mindspore import Tensor -from mindspore.common.parameter import Parameter from mindspore.common.initializer import initializer +from mindspore.common.parameter import Parameter from ..ut_filter import non_graph_engine - ndarr = np.ones((2, 3)) + def test_tensor_flatten(): with pytest.raises(AttributeError): - lst = [1, 2, 3, 4,] + lst = [1, 2, 3, 4, ] tensor_list = ms.Tensor(lst, ms.float32) tensor_list = tensor_list.Flatten() print(tensor_list) + def test_tensor_list(): lst = [[1.0, 2.0, 1.0], [1.0, 10.0, 9.0]] tensor_list = ms.Tensor(lst, ms.float32) print(tensor_list) + def test_tensor(): """test_tensor""" t1 = ms.Tensor(ndarr) @@ -63,6 +65,7 @@ def test_tensor(): assert isinstance(t4, ms.Tensor) assert t4.dtype() == ms.int64 + def test_tensor_type_float16(): t_float16 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float16)) assert isinstance(t_float16, ms.Tensor) @@ -107,6 +110,7 @@ def test_tensor_type_float64_user_define(): assert t_float64.shape() == (2, 3) assert t_float64.dtype() == ms.float64 + def test_tensor_type_bool(): # init a tensor with bool type ts_bool_array = ms.Tensor(np.zeros([2, 3], np.bool), ms.bool_) @@ -122,6 +126,7 @@ def test_tensor_type_bool(): assert t_bool_array.shape() == (2, 3) assert t_bool_array.dtype() == ms.bool_ + def test_tensor_type_int8(): t_int8_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8)) assert isinstance(t_int8_array, ms.Tensor) @@ -154,6 +159,7 @@ def test_tensor_type_int64(): assert t_int64.shape() == (2, 3) assert t_int64.dtype() == ms.int64 + def test_tensor_type_uint8(): t_uint8_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8)) assert isinstance(t_uint8_array, ms.Tensor) @@ -181,6 +187,7 @@ def test_tensor_type_uint64(): assert t_uint64.shape() == (2, 3) assert t_uint64.dtype() == ms.uint64 + def test_set_type(): t = ms.Tensor(ndarr) t.set_dtype(ms.float32) @@ -202,15 +209,17 @@ def test_sub(): z = x - y assert isinstance(z, ms.Tensor) + @non_graph_engine def test_div(): - x = ms.Tensor(np.array([[2,6,10],[12, 4, 8]]).astype(np.float32)) - y = ms.Tensor(np.array([[2,2,5],[6, 1, 2]]).astype(np.float32)) + x = ms.Tensor(np.array([[2, 6, 10], [12, 4, 8]]).astype(np.float32)) + y = ms.Tensor(np.array([[2, 2, 5], [6, 1, 2]]).astype(np.float32)) z = x / y z2 = x / 2 assert isinstance(z, ms.Tensor) assert isinstance(z2, ms.Tensor) + @non_graph_engine def test_parameter(): x = Parameter(initializer(1, [1], ms.float32), name="beta1_power") @@ -220,6 +229,7 @@ def test_parameter(): class Net(nn.Cell): """Net definition""" + def __init__(self, dim): super(Net, self).__init__() self.dim = dim @@ -266,6 +276,7 @@ def test_tensor_contiguous(): assert True, rt_f.flags['C_CONTIGUOUS'] print("rt_f flags = ", rt_f.flags) + def test_tensor_contiguous2(): input_data = np.random.randn(32, 112, 112, 3).astype(np.float32) input_me = input_data.transpose(0, 3, 1, 2) @@ -274,36 +285,43 @@ def test_tensor_contiguous2(): out_f = tensor_f_float32.asnumpy() print("out_f flags = ", out_f.flags) + def test_tensor_input_string(): with pytest.raises(TypeError): input_data = 'ccc' ms.Tensor(input_data) + def test_tensor_input_tuple_string(): with pytest.raises(TypeError): input_data = (2, 3, '4', 5) ms.Tensor(input_data) + def test_tensor_input_list_string(): with pytest.raises(TypeError): input_data = [[2, 3, '4', 5], [1, 2, 3, 4]] ms.Tensor(input_data) + def test_tensor_input_none(): with pytest.raises(TypeError): input_data = None ms.Tensor(input_data, np.int64) + # pylint: disable=no-value-for-parameter def test_tensor_input_empty(): with pytest.raises(TypeError): ms.Tensor() + def test_tensor_input_ndarray_str(): with pytest.raises(TypeError): inp = np.array(["88", 2, 4]) ms.Tensor(inp) + def test_tensor_input_ndarray_bool(): inp = np.array([True, 2, 4]) ms.Tensor(inp) @@ -311,86 +329,103 @@ def test_tensor_input_ndarray_bool(): inp = np.array([False, 2, 4]) ms.Tensor(inp) + def test_tensor_input_ndarray_complex(): with pytest.raises(TypeError): inp = np.array([20j, 2, 4]) ms.Tensor(inp) + def test_tensor_input_ndarray_none(): with pytest.raises(TypeError): inp = np.array([None, 2, 4]) ms.Tensor(inp) + def test_tensor_input_ndarray_dict(): with pytest.raises(TypeError): inp = {'a': 6, 'b': 7} ms.Tensor(inp) + def test_tensor_input_np_nan(): with pytest.raises(TypeError): input_data = (1, 2, 3, np.nan) ms.Tensor(input_data, np.int64) + def test_tensor_input_tuple_inf(): with pytest.raises(TypeError): input_data = (1, 2, 3, float("inf")) ms.Tensor(input_data, np.int64) + def test_tensor_input_dict(): with pytest.raises(TypeError): input_data = {'a': 6, 'b': 7} ms.Tensor(input_data, np.int64) + def test_tensor_input_complex(): with pytest.raises(TypeError): input_data = (1, 2j, 3) ms.Tensor(input_data, np.int64) + def test_tensor_dtype_np_float(): with pytest.raises(TypeError): input_data = np.random.randn(32, 112, 112, 3).astype(np.float) ms.Tensor(input_data, np.float) + def test_tensor_dtype_np_float16(): with pytest.raises(TypeError): input_data = np.random.randn(32, 112, 112, 3).astype(np.float16) ms.Tensor(input_data, np.float16) + def test_tensor_dtype_np_float32(): with pytest.raises(TypeError): input_data = np.random.randn(32, 112, 112, 3).astype(np.float32) ms.Tensor(input_data, np.float32) + def test_tensor_dtype_np_float64(): with pytest.raises(TypeError): input_data = np.random.randn(32, 112, 112, 3).astype(np.float64) ms.Tensor(input_data, np.float64) + def test_tensor_dtype_np_int(): with pytest.raises(TypeError): input_data = np.random.randn(32, 112, 112, 3).astype(np.int) ms.Tensor(input_data, np.int) + def test_tensor_dtype_np_int8(): with pytest.raises(TypeError): input_data = np.random.randn(32, 112, 112, 3).astype(np.int8) ms.Tensor(input_data, np.int8) + def test_tensor_dtype_np_int16(): with pytest.raises(TypeError): input_data = np.random.randn(32, 112, 112, 3).astype(np.int16) ms.Tensor(input_data, np.int16) + def test_tensor_dtype_np_int32(): with pytest.raises(TypeError): input_data = np.random.randn(32, 112, 112, 3).astype(np.int32) ms.Tensor(input_data, np.int32) + def test_tensor_dtype_np_int64(): with pytest.raises(TypeError): input_data = np.random.randn(32, 112, 112, 3).astype(np.int64) ms.Tensor(input_data, np.int64) + def test_tensor_dtype_fp32_to_bool(): with pytest.raises(RuntimeError): input = np.random.randn(2, 3, 4, 5).astype(np.float32) @@ -399,7 +434,7 @@ def test_tensor_dtype_fp32_to_bool(): def test_tensor_operation(): - x = Tensor(np.ones((3,3)) * 4) + x = Tensor(np.ones((3, 3)) * 4) res = x + 1 assert np.all(res.asnumpy() == np.ones((3, 3)) * 5) res = 1 + x diff --git a/tests/ut/python/ir/test_tensor_py.py b/tests/ut/python/ir/test_tensor_py.py index 9954326027a..389730413bc 100644 --- a/tests/ut/python/ir/test_tensor_py.py +++ b/tests/ut/python/ir/test_tensor_py.py @@ -14,10 +14,11 @@ # ============================================================================ """test tensor py""" import numpy as np + +import mindspore as ms +from mindspore.common.api import _executor from mindspore.nn import Cell from mindspore.ops import operations as P -from mindspore.common.api import _executor -import mindspore as ms from ..ut_filter import non_graph_engine @@ -93,6 +94,7 @@ def test_float(): def test_tensor_method_sub(): """test_tensor_method_sub""" + class Net(Cell): def __init__(self): super(Net, self).__init__() @@ -111,6 +113,7 @@ def test_tensor_method_sub(): def test_tensor_method_mul(): """test_tensor_method_mul""" + class Net(Cell): def __init__(self): super(Net, self).__init__() @@ -129,6 +132,7 @@ def test_tensor_method_mul(): def test_tensor_method_div(): """test_tensor_method_div""" + class Net(Cell): def __init__(self): super(Net, self).__init__() diff --git a/tests/ut/python/keep_order/test_keep_order.py b/tests/ut/python/keep_order/test_keep_order.py index 45f21198643..25d88d378e6 100644 --- a/tests/ut/python/keep_order/test_keep_order.py +++ b/tests/ut/python/keep_order/test_keep_order.py @@ -13,24 +13,25 @@ # limitations under the License. # ============================================================================ import numpy as np -from mindspore.common.api import ms_function -from mindspore.common.tensor import Tensor -from mindspore.ops import operations as P -import mindspore.ops.functional as F + import mindspore.context as context -from mindspore.ops import composite as C -from mindspore.ops.composite import core -from mindspore.common import dtype as mstype import mindspore.nn as nn +import mindspore.ops.functional as F +from mindspore.common import dtype as mstype +from mindspore.common.tensor import Tensor +from mindspore.ops import composite as C +from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE) add1 = P.TensorAdd() mul1 = P.MatMul() add2 = P.TensorAdd() + def add(x, y): return add1(x, y) + class Func(nn.Cell): def __init__(self): super(Func, self).__init__() @@ -48,7 +49,10 @@ class Func(nn.Cell): out = F.depend(out, clear) return out + grad_s = C.GradOperation('grad_with_sens', get_all=True, sens_param=True) + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -69,6 +73,7 @@ class Net(nn.Cell): out = F.depend(out, clear) return out + def test_add(): x = Tensor(np.ones([3, 3]).astype(np.float32)) y = Tensor(np.ones([3, 3]).astype(np.float32)) @@ -76,6 +81,7 @@ def test_add(): func.add_flags(has_effect=True) func(x, y) + def test_sens(): x = Tensor(np.ones([3, 3]).astype(np.float32)) y = Tensor(np.ones([3, 3]).astype(np.float32)) @@ -84,6 +90,7 @@ def test_sens(): net.add_flags(has_effect=True) out = net(x, y, sens) + class Net_hyper(nn.Cell): def __init__(self): super(Net_hyper, self).__init__() @@ -105,6 +112,7 @@ class Net_hyper(nn.Cell): out = F.depend(out, clear) return out + def test_hyper_add(): x = Tensor(np.ones([3, 3]).astype(np.float32)) y = Tensor(np.ones([3, 3]).astype(np.float32)) @@ -113,10 +121,11 @@ def test_hyper_add(): net.add_flags(has_effect=True) out = net(x, y, sens) + def test_keep_order_io_effect_exception_return_dtype(): class Net(nn.Cell): def __init__(self): - super().__init__() + super().__init__() self.alloc_status = P.NPUAllocFloatStatus() self.get_status = P.NPUGetFloatStatus() self.clear_status = P.NPUClearFloatStatus() @@ -126,16 +135,16 @@ def test_keep_order_io_effect_exception_return_dtype(): self.neg = P.Neg() self.add_flags(has_effect=True) - def construct(self, x): + def construct(self, x): init = self.alloc_status() self.clear_status(init) - res = self.sub(x, self.neg(x)) + res = self.sub(x, self.neg(x)) self.get_status(init) dtype = self.dtype(res) return dtype - value = 655 + value = 655 data = np.full((8, 5, 3, 1), value, dtype=np.float16) x = Tensor(data, dtype=mstype.float16) net = Net() - data = net(x) \ No newline at end of file + data = net(x) diff --git a/tests/ut/python/metrics/test_accuracy.py b/tests/ut/python/metrics/test_accuracy.py index bedde410bf5..47a4dcfbd13 100644 --- a/tests/ut/python/metrics/test_accuracy.py +++ b/tests/ut/python/metrics/test_accuracy.py @@ -14,10 +14,13 @@ # ============================================================================ """test accuracy""" import math + import numpy as np import pytest -from mindspore.nn.metrics import Accuracy + from mindspore import Tensor +from mindspore.nn.metrics import Accuracy + def test_classification_accuracy(): """test_classification_accuracy""" @@ -29,8 +32,9 @@ def test_classification_accuracy(): metric.update(x, y) accuracy = metric.eval() accuracy2 = metric(x, y2) - assert math.isclose(accuracy, 2/3) - assert math.isclose(accuracy2, 2/3) + assert math.isclose(accuracy, 2 / 3) + assert math.isclose(accuracy2, 2 / 3) + def test_multilabel_accuracy(): x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])) @@ -39,7 +43,8 @@ def test_multilabel_accuracy(): metric.clear() metric.update(x, y) accuracy = metric.eval() - assert accuracy == 1/3 + assert accuracy == 1 / 3 + def test_shape_accuracy(): x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])) @@ -49,6 +54,7 @@ def test_shape_accuracy(): with pytest.raises(ValueError): metric.update(x, y) + def test_shape_accuracy2(): x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])) y = Tensor(np.array([0, 1, 1, 1])) @@ -57,6 +63,7 @@ def test_shape_accuracy2(): with pytest.raises(ValueError): metric.update(x, y) + def test_shape_accuracy3(): x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) y = Tensor(np.array([[1, 0, 1], [1, 1, 1]])) @@ -65,6 +72,7 @@ def test_shape_accuracy3(): with pytest.raises(ValueError): metric.update(x, y) + def test_shape_accuracy4(): x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) y = Tensor(np.array(1)) @@ -73,6 +81,7 @@ def test_shape_accuracy4(): with pytest.raises(ValueError): metric.update(x, y) + def test_type_accuracy(): with pytest.raises(TypeError): Accuracy('test') diff --git a/tests/ut/python/metrics/test_error.py b/tests/ut/python/metrics/test_error.py index a50c458d18b..2e6b37591d2 100644 --- a/tests/ut/python/metrics/test_error.py +++ b/tests/ut/python/metrics/test_error.py @@ -14,10 +14,12 @@ # ============================================================================ """test error""" import math + import numpy as np import pytest -from mindspore.nn.metrics import MAE, MSE + from mindspore import Tensor +from mindspore.nn.metrics import MAE, MSE def test_MAE(): @@ -27,7 +29,7 @@ def test_MAE(): error.clear() error.update(x, y) result = error.eval() - assert math.isclose(result, 0.15/4) + assert math.isclose(result, 0.15 / 4) def test_input_MAE(): @@ -52,7 +54,7 @@ def test_MSE(): error.clear() error.update(x, y) result = error.eval() - assert math.isclose(result, 0.0125/4) + assert math.isclose(result, 0.0125 / 4) def test_input_MSE(): diff --git a/tests/ut/python/metrics/test_fbeta.py b/tests/ut/python/metrics/test_fbeta.py index 1f4838a0b0e..855cd2a0eb0 100755 --- a/tests/ut/python/metrics/test_fbeta.py +++ b/tests/ut/python/metrics/test_fbeta.py @@ -13,11 +13,11 @@ # limitations under the License. # ============================================================================ # """test_fbeta""" -import math import numpy as np import pytest -from mindspore.nn.metrics import get_metric_fn, Fbeta + from mindspore import Tensor +from mindspore.nn.metrics import get_metric_fn, Fbeta def test_classification_fbeta(): @@ -32,9 +32,9 @@ def test_classification_fbeta(): fbeta_mean = metric.eval(True) fbeta2 = metric(x, y2) - assert np.allclose(fbeta, np.array([2/3, 2/3])) - assert np.allclose(fbeta2, np.array([2/3, 2/3])) - assert np.allclose(fbeta_mean, 2/3) + assert np.allclose(fbeta, np.array([2 / 3, 2 / 3])) + assert np.allclose(fbeta2, np.array([2 / 3, 2 / 3])) + assert np.allclose(fbeta_mean, 2 / 3) def test_fbeta_update1(): @@ -46,6 +46,7 @@ def test_fbeta_update1(): with pytest.raises(ValueError): metric.update(x, y) + def test_fbeta_update2(): x1 = Tensor(np.array([[0.2, 0.5, 0.7], [0.3, 0.1, 0.2], [0.9, 0.6, 0.5]])) y1 = Tensor(np.array([1, 0, 2])) diff --git a/tests/ut/python/metrics/test_loss.py b/tests/ut/python/metrics/test_loss.py index bfdcffdbd6d..15ea2985638 100644 --- a/tests/ut/python/metrics/test_loss.py +++ b/tests/ut/python/metrics/test_loss.py @@ -15,8 +15,9 @@ """test loss""" import numpy as np import pytest -from mindspore.nn.metrics import Loss + from mindspore import Tensor +from mindspore.nn.metrics import Loss def test_loss_inputs_error(): diff --git a/tests/ut/python/metrics/test_metric_factory.py b/tests/ut/python/metrics/test_metric_factory.py index cfbf4c77e3e..c7300238a1a 100644 --- a/tests/ut/python/metrics/test_metric_factory.py +++ b/tests/ut/python/metrics/test_metric_factory.py @@ -14,9 +14,11 @@ # ============================================================================ """test_metric_factory""" import math + import numpy as np -from mindspore.nn.metrics import get_metric_fn + from mindspore import Tensor +from mindspore.nn.metrics import get_metric_fn def test_classification_accuracy(): @@ -26,7 +28,7 @@ def test_classification_accuracy(): metric.clear() metric.update(x, y) accuracy = metric.eval() - assert math.isclose(accuracy, 2/3) + assert math.isclose(accuracy, 2 / 3) def test_classification_accuracy_by_alias(): @@ -36,7 +38,7 @@ def test_classification_accuracy_by_alias(): metric.clear() metric.update(x, y) accuracy = metric.eval() - assert math.isclose(accuracy, 2/3) + assert math.isclose(accuracy, 2 / 3) def test_classification_precision(): diff --git a/tests/ut/python/metrics/test_precision.py b/tests/ut/python/metrics/test_precision.py index 37e5ab1286a..6eac9474b40 100644 --- a/tests/ut/python/metrics/test_precision.py +++ b/tests/ut/python/metrics/test_precision.py @@ -14,10 +14,12 @@ # ============================================================================ """test_precision""" import math + import numpy as np import pytest -from mindspore.nn.metrics import Precision + from mindspore import Tensor +from mindspore.nn.metrics import Precision def test_classification_precision(): @@ -43,7 +45,7 @@ def test_multilabel_precision(): metric.update(x, y) precision = metric.eval() - assert np.equal(precision, np.array([1, 2/3, 1])).all() + assert np.equal(precision, np.array([1, 2 / 3, 1])).all() def test_average_precision(): @@ -54,7 +56,7 @@ def test_average_precision(): metric.update(x, y) precision = metric.eval(True) - assert math.isclose(precision, (1 + 2/3 + 1) / 3) + assert math.isclose(precision, (1 + 2 / 3 + 1) / 3) def test_num_precision(): diff --git a/tests/ut/python/metrics/test_recall.py b/tests/ut/python/metrics/test_recall.py index dcf42c1f720..6ef3c93cca9 100644 --- a/tests/ut/python/metrics/test_recall.py +++ b/tests/ut/python/metrics/test_recall.py @@ -14,10 +14,12 @@ # ============================================================================ """test recall""" import math + import numpy as np import pytest -from mindspore.nn.metrics import Recall + from mindspore import Tensor +from mindspore.nn.metrics import Recall def test_classification_recall(): @@ -43,7 +45,7 @@ def test_multilabel_recall(): metric.update(x, y) recall = metric.eval() - assert np.equal(recall, np.array([2/3, 2/3, 1])).all() + assert np.equal(recall, np.array([2 / 3, 2 / 3, 1])).all() def test_average_recall(): @@ -54,7 +56,7 @@ def test_average_recall(): metric.update(x, y) recall = metric.eval(True) - assert math.isclose(recall, (2/3 + 2/3 + 1) / 3) + assert math.isclose(recall, (2 / 3 + 2 / 3 + 1) / 3) def test_num_recall(): diff --git a/tests/ut/python/metrics/test_topk.py b/tests/ut/python/metrics/test_topk.py index 022a80e8919..5f13bec7f6a 100644 --- a/tests/ut/python/metrics/test_topk.py +++ b/tests/ut/python/metrics/test_topk.py @@ -14,10 +14,12 @@ # ============================================================================ """test topk""" import math + import numpy as np import pytest -from mindspore.nn.metrics import TopKCategoricalAccuracy, Top1CategoricalAccuracy, Top5CategoricalAccuracy + from mindspore import Tensor +from mindspore.nn.metrics import TopKCategoricalAccuracy, Top1CategoricalAccuracy, Top5CategoricalAccuracy def test_type_topk(): @@ -54,8 +56,8 @@ def test_topk(): topk.update(x, y) result = topk.eval() result2 = topk(x, y2) - assert math.isclose(result, 2/3) - assert math.isclose(result2, 2/3) + assert math.isclose(result, 2 / 3) + assert math.isclose(result2, 2 / 3) def test_zero_topk(): @@ -79,8 +81,8 @@ def test_top1(): topk.update(x, y) result = topk.eval() result2 = topk(x, y2) - assert math.isclose(result, 1/3) - assert math.isclose(result2, 1/3) + assert math.isclose(result, 1 / 3) + assert math.isclose(result2, 1 / 3) def test_top5(): @@ -97,5 +99,5 @@ def test_top5(): topk.update(x, y) result = topk.eval() result2 = topk(x, y2) - assert math.isclose(result, 2/3) - assert math.isclose(result2, 2/3) + assert math.isclose(result, 2 / 3) + assert math.isclose(result2, 2 / 3) diff --git a/tests/ut/python/model/__init__.py b/tests/ut/python/model/__init__.py index 5443c0ca48e..9f7610e25c4 100644 --- a/tests/ut/python/model/__init__.py +++ b/tests/ut/python/model/__init__.py @@ -15,6 +15,7 @@ """setup for pytest""" import mindspore.context as context + # pylint: disable=unused-argument def setup_module(module): context.set_context(mode=context.GRAPH_MODE) diff --git a/tests/ut/python/model/res18_example.py b/tests/ut/python/model/res18_example.py index 88753334655..ac7d77fc480 100644 --- a/tests/ut/python/model/res18_example.py +++ b/tests/ut/python/model/res18_example.py @@ -17,10 +17,10 @@ resnet50 example """ import numpy as np -from mindspore.common.api import _executor +import mindspore.nn as nn # pylint: disable=C0414 from mindspore import Tensor +from mindspore.common.api import _executor from mindspore.ops.operations import TensorAdd -import mindspore.nn as nn # pylint: disable=C0414 from ...train_step_wrap import train_step_with_loss_warp diff --git a/tests/ut/python/model/test_bert.py b/tests/ut/python/model/test_bert.py index 1964330c7c0..840f594be35 100644 --- a/tests/ut/python/model/test_bert.py +++ b/tests/ut/python/model/test_bert.py @@ -15,9 +15,8 @@ """ test bert cell """ import numpy as np import pytest -from mindspore import Model -from mindspore.nn.optim import AdamWeightDecay -from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertModel, BertNetworkWithLoss, BertTrainOneStepCell + +from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertModel from ....dataset_mock import MindData diff --git a/tests/ut/python/model/test_bert_cell.py b/tests/ut/python/model/test_bert_cell.py index 2cb642c75fd..a7257dced0c 100644 --- a/tests/ut/python/model/test_bert_cell.py +++ b/tests/ut/python/model/test_bert_cell.py @@ -14,26 +14,30 @@ # ============================================================================ """ test bert of graph compile """ import functools + import numpy as np + import mindspore.common.dtype as mstype +import mindspore.nn as nn +import mindspore.ops.composite as C +from mindspore.common.initializer import TruncatedNormal +from mindspore.common.parameter import ParameterTuple from mindspore.common.tensor import Tensor +from mindspore.model_zoo.Bert_NEZHA import BertPretrainingLoss, GetNextSentenceOutput +from mindspore.model_zoo.Bert_NEZHA.bert_for_pre_training import ClipGradients from mindspore.model_zoo.Bert_NEZHA.bert_model import BertConfig, \ EmbeddingLookup, EmbeddingPostprocessor, BertOutput, RelaPosMatrixGenerator, \ RelaPosEmbeddingsGenerator, SaturateCast, BertAttention, BertSelfAttention, \ BertEncoderCell, BertTransformer, CreateAttentionMaskFromInputMask, BertModel from mindspore.nn.layer.basic import Norm -from mindspore.model_zoo.Bert_NEZHA import BertPretrainingLoss, GetNextSentenceOutput -import mindspore.nn as nn -from mindspore.common.initializer import TruncatedNormal -from mindspore.common.parameter import ParameterTuple from mindspore.nn.optim import AdamWeightDecay, AdamWeightDecayDynamicLR -from mindspore.model_zoo.Bert_NEZHA.bert_for_pre_training import ClipGradients -import mindspore.ops.composite as C -from mindspore.ops import functional as F -from ....ops_common import convert from ....mindspore_test_framework.mindspore_test import mindspore_test -from ....mindspore_test_framework.pipeline.forward.compile_forward import pipeline_for_compile_forward_ge_graph_for_case_by_case_config -from ....mindspore_test_framework.pipeline.gradient.compile_gradient import pipeline_for_compile_grad_ge_graph_for_case_by_case_config +from ....mindspore_test_framework.pipeline.forward.compile_forward import \ + pipeline_for_compile_forward_ge_graph_for_case_by_case_config +from ....mindspore_test_framework.pipeline.gradient.compile_gradient import \ + pipeline_for_compile_grad_ge_graph_for_case_by_case_config +from ....ops_common import convert + def bert_trans(): """bert_trans""" @@ -53,10 +57,12 @@ def bert_trans(): net.set_train() return net + def set_train(net): net.set_train() return net + class NetForAdam(nn.Cell): def __init__(self): super(NetForAdam, self).__init__() @@ -66,8 +72,10 @@ class NetForAdam(nn.Cell): x = self.dense(x) return x + class TrainStepWrapForAdam(nn.Cell): """TrainStepWrapForAdam definition""" + def __init__(self, network): super(TrainStepWrapForAdam, self).__init__() self.network = network @@ -81,8 +89,10 @@ class TrainStepWrapForAdam(nn.Cell): grads = self.clip_gradients(grads, 1, 1.0) return self.optimizer(grads) + class TrainStepWrapForAdamDynamicLr(nn.Cell): """TrainStepWrapForAdamDynamicLr definition""" + def __init__(self, network): super(TrainStepWrapForAdamDynamicLr, self).__init__() self.network = network @@ -95,16 +105,19 @@ class TrainStepWrapForAdamDynamicLr(nn.Cell): grads = C.grad_by_list_with_sens(self.network, weights)(x, self.sens) return self.optimizer(grads) + class TempC2Wrap(nn.Cell): - def __init__(self, op, c1=None, c2=None,): + def __init__(self, op, c1=None, c2=None, ): super(TempC2Wrap, self).__init__() self.op = op self.c1 = c1 self.c2 = c2 + def construct(self, x1): x = self.op(x1, self.c1, self.c2) return x + test_case_cell_ops = [ ('Norm_keepdims', { 'block': Norm(keep_dims=True), @@ -373,7 +386,7 @@ test_case_cell_ops = [ 'block': set_train(nn.Dense(in_channels=768, out_channels=3072, activation='gelu', - weight_init=TruncatedNormal(0.02),)), + weight_init=TruncatedNormal(0.02), )), 'desc_inputs': [[3, 768]], 'desc_bprop': [[3, 3072]]}), ('GetNextSentenceOutput', { @@ -396,26 +409,28 @@ test_case_cell_ops = [ 'block': TempC2Wrap(ClipGradients(), 1, 1.0), 'desc_inputs': [tuple(convert(shp) for shp in [[1], [1], [1]])], 'skip': ['backward', 'exec']}), - ] +] -test_case = functools.reduce(lambda x, y: x+y, [test_case_cell_ops]) +test_case = functools.reduce(lambda x, y: x + y, [test_case_cell_ops]) # use -k to select certain testcast # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm test_exec_case = filter(lambda x: 'skip' not in x[1] or - 'exec' not in x[1]['skip'], test_case) + 'exec' not in x[1]['skip'], test_case) test_backward_exec_case = filter(lambda x: 'skip' not in x[1] or - 'backward' not in x[1]['skip'] and 'backward_exec' - not in x[1]['skip'], test_case) + 'backward' not in x[1]['skip'] and 'backward_exec' + not in x[1]['skip'], test_case) test_check_gradient_case = filter(lambda x: 'skip' not in x[1] or - 'backward' not in x[1]['skip'] and 'backward_exec' - not in x[1]['skip'], test_case) + 'backward' not in x[1]['skip'] and 'backward_exec' + not in x[1]['skip'], test_case) + @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) def test_exec(): return test_exec_case + @mindspore_test(pipeline_for_compile_grad_ge_graph_for_case_by_case_config) def test_backward_exec(): return test_backward_exec_case diff --git a/tests/ut/python/model/test_lenet.py b/tests/ut/python/model/test_lenet.py index 228ff281e05..cc679c29740 100644 --- a/tests/ut/python/model/test_lenet.py +++ b/tests/ut/python/model/test_lenet.py @@ -15,16 +15,19 @@ """test lenet""" import numpy as np -import mindspore.nn as nn -from mindspore.common.api import _executor -from mindspore import Tensor -from mindspore.ops import operations as P import mindspore.context as context +import mindspore.nn as nn +from mindspore import Tensor +from mindspore.common.api import _executor +from mindspore.ops import operations as P from ....train_step_wrap import train_step_with_loss_warp, train_step_with_sens context.set_context(mode=context.GRAPH_MODE) + + class LeNet5(nn.Cell): """LeNet5 definition""" + def __init__(self): super(LeNet5, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') diff --git a/tests/ut/python/model/test_lenet_core_after_exception.py b/tests/ut/python/model/test_lenet_core_after_exception.py index b13234395fe..fdc6e81ab7b 100644 --- a/tests/ut/python/model/test_lenet_core_after_exception.py +++ b/tests/ut/python/model/test_lenet_core_after_exception.py @@ -14,8 +14,8 @@ # ============================================================================ """test_lenet_core_after_exception""" import numpy as np - import pytest + import mindspore.nn as nn from mindspore.common.api import _executor from mindspore.common.tensor import Tensor @@ -25,6 +25,7 @@ from ....train_step_wrap import train_step_with_loss_warp class LeNet5(nn.Cell): """LeNet5 definition""" + def __init__(self): super(LeNet5, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5, pad_mode="valid") diff --git a/tests/ut/python/model/test_mix_precision.py b/tests/ut/python/model/test_mix_precision.py index 0c762f42b92..9a57160c98a 100644 --- a/tests/ut/python/model/test_mix_precision.py +++ b/tests/ut/python/model/test_mix_precision.py @@ -15,23 +15,24 @@ """test_mix_precision""" import numpy as np -import mindspore.nn as nn import mindspore.common.dtype as mstype +import mindspore.nn as nn +from mindspore import Tensor, context +from mindspore.common import ParameterTuple from mindspore.common.api import _executor from mindspore.common.parameter import Parameter -from mindspore.common import ParameterTuple -from mindspore import Tensor, context -from mindspore.ops import operations as P -from mindspore.ops import composite as C -from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn import Momentum -from ....train_step_wrap import train_step_with_loss_warp -from tests.ops_common import convert +from mindspore.nn import TrainOneStepCell, WithLossCell +from mindspore.ops import composite as C +from mindspore.ops import operations as P from mindspore.train.parallel_utils import ParallelMode +from tests.ops_common import convert +from ....train_step_wrap import train_step_with_loss_warp class LeNet5(nn.Cell): """LeNet5""" + def __init__(self): super(LeNet5, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') @@ -148,10 +149,13 @@ def test_cast(): """test grad of PReLU, which cause AddN(generated by grad) fail""" + + class IRBlockZ(nn.Cell): def __init__(self, inplanes, planes): super(IRBlockZ, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, pad_mode="same", group=1, has_bias=False, dilation=1) + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, pad_mode="same", group=1, has_bias=False, + dilation=1) self.act_layer = nn.PReLU(planes) def construct(self, x): diff --git a/tests/ut/python/model/test_utils.py b/tests/ut/python/model/test_utils.py index e2a66275b8b..cd130524cc1 100644 --- a/tests/ut/python/model/test_utils.py +++ b/tests/ut/python/model/test_utils.py @@ -14,6 +14,7 @@ # ============================================================================ """test_dataset_utils""" import pytest + import mindspore as ms from mindspore.train._utils import _construct_tensor_list diff --git a/tests/ut/python/nn/__init__.py b/tests/ut/python/nn/__init__.py index 5443c0ca48e..9f7610e25c4 100644 --- a/tests/ut/python/nn/__init__.py +++ b/tests/ut/python/nn/__init__.py @@ -15,6 +15,7 @@ """setup for pytest""" import mindspore.context as context + # pylint: disable=unused-argument def setup_module(module): context.set_context(mode=context.GRAPH_MODE) diff --git a/tests/ut/python/nn/optim/test_adam.py b/tests/ut/python/nn/optim/test_adam.py index 269f276376e..e47a0d67043 100644 --- a/tests/ut/python/nn/optim/test_adam.py +++ b/tests/ut/python/nn/optim/test_adam.py @@ -15,16 +15,18 @@ """ test adam """ import numpy as np import pytest + import mindspore.nn as nn -from mindspore.common.api import _executor from mindspore import Tensor, Parameter +from mindspore.common.api import _executor from mindspore.nn import TrainOneStepCell, WithLossCell -from mindspore.ops import operations as P from mindspore.nn.optim import AdamWeightDecay, AdamWeightDecayDynamicLR +from mindspore.ops import operations as P class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") diff --git a/tests/ut/python/nn/optim/test_ftrl.py b/tests/ut/python/nn/optim/test_ftrl.py index adddfa05bf9..cbaa2a4520b 100644 --- a/tests/ut/python/nn/optim/test_ftrl.py +++ b/tests/ut/python/nn/optim/test_ftrl.py @@ -15,6 +15,7 @@ """ test FTRL """ import numpy as np + import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore.common.api import _executor @@ -47,4 +48,3 @@ def test_ftrl(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) _executor.compile(train_network, inputs, label) - diff --git a/tests/ut/python/nn/optim/test_lamb.py b/tests/ut/python/nn/optim/test_lamb.py index 502c7feed11..2d18207e0ec 100644 --- a/tests/ut/python/nn/optim/test_lamb.py +++ b/tests/ut/python/nn/optim/test_lamb.py @@ -14,16 +14,18 @@ # ============================================================================ """ test lamb """ import numpy as np + import mindspore.nn as nn -from mindspore.common.api import _executor from mindspore import Tensor, Parameter +from mindspore.common.api import _executor from mindspore.nn import TrainOneStepCell, WithLossCell -from mindspore.ops import operations as P from mindspore.nn.optim import Lamb +from mindspore.ops import operations as P class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") @@ -38,6 +40,7 @@ class Net(nn.Cell): class NetWithoutWeight(nn.Cell): """ NetWithoutWeight definition """ + def __init__(self): super(NetWithoutWeight, self).__init__() self.matmul = P.MatMul() diff --git a/tests/ut/python/nn/optim/test_lars.py b/tests/ut/python/nn/optim/test_lars.py index 17bbe69fe6d..2088f65f934 100644 --- a/tests/ut/python/nn/optim/test_lars.py +++ b/tests/ut/python/nn/optim/test_lars.py @@ -12,15 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ +from collections import Counter + import numpy as np + import mindspore.nn as nn from mindspore import Tensor, Parameter +from mindspore.common import dtype as mstype from mindspore.common.api import _executor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim import LARS, Momentum from mindspore.ops import operations as P -from mindspore.common import dtype as mstype -from collections import Counter def multisteplr(total_steps, milestone, base_lr=0.9, gamma=0.1, dtype=mstype.float32): @@ -56,7 +58,7 @@ def test_lars_multi_step_lr(): lr = multisteplr(10, [2, 6]) SGD = Momentum(net.trainable_params(), lr, 0.9) optimizer = LARS(SGD, epsilon=1e-08, hyperpara=0.02, decay_filter=lambda x: 'bn' not in x.name, - lars_filter=lambda x: 'bn' not in x.name) + lars_filter=lambda x: 'bn' not in x.name) net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) @@ -77,4 +79,4 @@ def test_lars_float_lr(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) - _executor.compile(train_network, inputs, label) \ No newline at end of file + _executor.compile(train_network, inputs, label) diff --git a/tests/ut/python/nn/optim/test_lr_schedule.py b/tests/ut/python/nn/optim/test_lr_schedule.py index fc2b8a3f4d4..c1a3a5e5bfe 100644 --- a/tests/ut/python/nn/optim/test_lr_schedule.py +++ b/tests/ut/python/nn/optim/test_lr_schedule.py @@ -14,19 +14,17 @@ # ============================================================================ """ test_lr_schedule """ import numpy as np -from mindspore.nn import Cell -from mindspore.ops.operations import BiasAdd, MatMul + from mindspore import Parameter, ParameterTuple, Tensor -from mindspore.nn import WithLossCell -from mindspore.nn.optim import Momentum -from mindspore.nn import SoftmaxCrossEntropyWithLogits -from mindspore.ops.composite import grad_by_list -from mindspore.ops import functional as F +from mindspore.nn import Cell from mindspore.nn.optim import Optimizer +from mindspore.ops.composite import grad_by_list +from mindspore.ops.operations import BiasAdd, MatMul class Net(Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.weight = Parameter(Tensor(np.ones([64, 10])), name="weight") @@ -41,6 +39,7 @@ class Net(Cell): class _TrainOneStepCell(Cell): """ _TrainOneStepCell definition """ + def __init__(self, network, optimizer): """ Append an optimizer to the training network after that the construct @@ -67,4 +66,3 @@ class _TrainOneStepCell(Cell): if self.lr_schedule: self.schedule.update_lr(*args) return self.optimizer(grads) - diff --git a/tests/ut/python/nn/optim/test_momentum.py b/tests/ut/python/nn/optim/test_momentum.py index 60fee6cc922..d3f1c4c218a 100644 --- a/tests/ut/python/nn/optim/test_momentum.py +++ b/tests/ut/python/nn/optim/test_momentum.py @@ -14,16 +14,18 @@ # ============================================================================ """ test momentum """ import numpy as np + import mindspore.nn as nn -from mindspore.common.api import _executor from mindspore import Tensor, Parameter +from mindspore.common.api import _executor from mindspore.nn import TrainOneStepCell, WithLossCell -from mindspore.ops import operations as P from mindspore.nn.optim import Momentum +from mindspore.ops import operations as P class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") diff --git a/tests/ut/python/nn/optim/test_optimizer.py b/tests/ut/python/nn/optim/test_optimizer.py index 9f1ec9a36f0..6594550687d 100644 --- a/tests/ut/python/nn/optim/test_optimizer.py +++ b/tests/ut/python/nn/optim/test_optimizer.py @@ -15,9 +15,10 @@ """ test optimizer """ import numpy as np import pytest + from mindspore import Tensor -from mindspore.nn.optim import Optimizer, SGD, Adam, AdamWeightDecay, AdamWeightDecayDynamicLR from mindspore.common.parameter import Parameter +from mindspore.nn.optim import Optimizer, SGD, Adam, AdamWeightDecay, AdamWeightDecayDynamicLR class IterableObjc: @@ -30,6 +31,7 @@ class IterableObjc: params = IterableObjc() + class TestOptimizer(): def test_init(self): Optimizer(0.5, params) @@ -44,6 +46,7 @@ class TestOptimizer(): class TestAdam(): """ TestAdam definition """ + def test_init(self): Adam(params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, use_nesterov=False, weight_decay=0.0, loss_scale=1.0) @@ -58,6 +61,7 @@ class TestAdam(): class TestSGD(): """ TestSGD definition """ + def test_init(self): with pytest.raises(ValueError): SGD(params, learning_rate=0.1, momentum=-0.1, dampening=0, weight_decay=0, nesterov=False) @@ -68,6 +72,7 @@ class TestSGD(): class TestNullParam(): """ TestNullParam definition """ + def test_optim_init(self): with pytest.raises(ValueError): Optimizer(0.1, None) @@ -84,8 +89,10 @@ class TestNullParam(): with pytest.raises(ValueError): SGD(None) + class TestUnsupportParam(): """ TestUnsupportParam definition """ + def test_optim_init(self): with pytest.raises(ValueError): Optimizer(0.1, (1, 2, 3)) diff --git a/tests/ut/python/nn/optim/test_rmsprop.py b/tests/ut/python/nn/optim/test_rmsprop.py index 647f1e8d45b..2e3fc90f5f1 100644 --- a/tests/ut/python/nn/optim/test_rmsprop.py +++ b/tests/ut/python/nn/optim/test_rmsprop.py @@ -15,16 +15,18 @@ """ test adam """ import numpy as np import pytest + import mindspore.nn as nn -from mindspore.common.api import _executor from mindspore import Tensor, Parameter +from mindspore.common.api import _executor from mindspore.nn import TrainOneStepCell, WithLossCell -from mindspore.ops import operations as P from mindspore.nn.optim import RMSProp +from mindspore.ops import operations as P class Net(nn.Cell): """ Net definition """ + def __init__(self): super(Net, self).__init__() self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight") @@ -59,4 +61,3 @@ def test_rmsprop_e(): with pytest.raises(TypeError): RMSProp(net.get_parameters(), momentum=1, learning_rate=0.1) - diff --git a/tests/ut/python/nn/test_activation.py b/tests/ut/python/nn/test_activation.py index d035e2971de..2ec8bb37c7d 100755 --- a/tests/ut/python/nn/test_activation.py +++ b/tests/ut/python/nn/test_activation.py @@ -14,11 +14,13 @@ # ============================================================================ """ test Activations """ import numpy as np + import mindspore.nn as nn -from mindspore.common.api import _executor from mindspore import Tensor +from mindspore.common.api import _executor from ..ut_filter import non_graph_engine + class SoftmaxNet(nn.Cell): def __init__(self, dim): super(SoftmaxNet, self).__init__() diff --git a/tests/ut/python/nn/test_batchnorm.py b/tests/ut/python/nn/test_batchnorm.py index e73b7ebbf0f..100a91a44b0 100644 --- a/tests/ut/python/nn/test_batchnorm.py +++ b/tests/ut/python/nn/test_batchnorm.py @@ -17,8 +17,8 @@ import numpy as np import pytest import mindspore.nn as nn -from mindspore.common.api import _executor from mindspore import Tensor, Parameter +from mindspore.common.api import _executor def test_bn_pars_valid1(): @@ -62,11 +62,12 @@ class GroupNet(nn.Cell): def __init__(self): super(GroupNet, self).__init__() self.group_bn = nn.GroupNorm() + def construct(self, x): return self.group_bn(x) def test_compile_groupnorm(): net = nn.GroupNorm(16, 64) - input_data = Tensor(np.random.rand(1,64,256,256).astype(np.float32)) + input_data = Tensor(np.random.rand(1, 64, 256, 256).astype(np.float32)) _executor.compile(net, input_data) diff --git a/tests/ut/python/nn/test_cell.py b/tests/ut/python/nn/test_cell.py index c583b27c1d9..30066ee8556 100644 --- a/tests/ut/python/nn/test_cell.py +++ b/tests/ut/python/nn/test_cell.py @@ -15,11 +15,10 @@ """ test cell """ import numpy as np import pytest -import mindspore.context as context + import mindspore.nn as nn from mindspore import Tensor, Parameter from mindspore.common.api import _executor -from ..ut_filter import non_graph_engine class ModA(nn.Cell): @@ -90,7 +89,7 @@ class ConvNet(nn.Cell): self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same") self.flatten = nn.Flatten() self.fc = nn.Dense( - int(ConvNet.image_h*ConvNet.image_w*ConvNet.output_ch/(4*4)), + int(ConvNet.image_h * ConvNet.image_w * ConvNet.output_ch / (4 * 4)), num_classes) def construct(self, x): diff --git a/tests/ut/python/nn/test_cell_wrapper.py b/tests/ut/python/nn/test_cell_wrapper.py index 148d42ab64b..0fddf7ff680 100755 --- a/tests/ut/python/nn/test_cell_wrapper.py +++ b/tests/ut/python/nn/test_cell_wrapper.py @@ -12,14 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import pytest import numpy as np +import pytest + import mindspore.nn as nn from mindspore import Tensor, Parameter +from mindspore.common import dtype as mstype from mindspore.common.api import _executor from mindspore.nn import TrainOneStepCell, WithLossCell, ParameterUpdate from mindspore.nn.optim import Momentum -from mindspore.common import dtype as mstype from mindspore.ops import operations as P diff --git a/tests/ut/python/nn/test_checkparameter.py b/tests/ut/python/nn/test_checkparameter.py index e53cdaf6d28..b640dd2e29e 100644 --- a/tests/ut/python/nn/test_checkparameter.py +++ b/tests/ut/python/nn/test_checkparameter.py @@ -14,10 +14,10 @@ # ============================================================================ """ test checkparameter """ import pytest + from mindspore._checkparam import check_int, check_int_positive, \ check_input_format, check_bool, twice - kernel_size = 5 kernel_size1 = twice(kernel_size) assert kernel_size1 == (5, 5) diff --git a/tests/ut/python/nn/test_clip_by_norm.py b/tests/ut/python/nn/test_clip_by_norm.py index 54b586f8486..ff7d1281081 100644 --- a/tests/ut/python/nn/test_clip_by_norm.py +++ b/tests/ut/python/nn/test_clip_by_norm.py @@ -14,11 +14,12 @@ # ============================================================================ """ test clip_by_norm """ import numpy as np -import pytest + import mindspore.nn as nn from mindspore import Tensor from ..ut_filter import non_graph_engine + @non_graph_engine def test_clip_by_norm(): clip_by_norm = nn.ClipByNorm() diff --git a/tests/ut/python/nn/test_container.py b/tests/ut/python/nn/test_container.py index b055d020d86..4103f203fa9 100644 --- a/tests/ut/python/nn/test_container.py +++ b/tests/ut/python/nn/test_container.py @@ -14,12 +14,13 @@ # ============================================================================ """ test container """ from collections import OrderedDict + import numpy as np import pytest + import mindspore.nn as nn from mindspore import Tensor - weight = Tensor(np.ones([2, 2])) conv2 = nn.Conv2d(3, 64, (3, 3), stride=2, padding=0) @@ -31,6 +32,7 @@ avg_pool = nn.AvgPool2d(kernel_size, stride) class TestSequentialCell(): """ TestSequentialCell """ + def test_SequentialCell_init(self): m = nn.SequentialCell() assert type(m).__name__ == 'SequentialCell' @@ -86,6 +88,7 @@ class TestSequentialCell(): class TestCellList(): """ TestCellList """ + def test_init1(self): cell_list = nn.CellList([conv2, avg_pool]) assert len(cell_list) == 2 @@ -118,7 +121,6 @@ class TestCellList(): cell = item assert type(cell).__name__ == 'AvgPool2d' - def test_add(self): cell_list = nn.CellList([conv2, avg_pool]) cell_list += [conv2] diff --git a/tests/ut/python/nn/test_dense.py b/tests/ut/python/nn/test_dense.py index 0845983bb0a..f1583169fef 100644 --- a/tests/ut/python/nn/test_dense.py +++ b/tests/ut/python/nn/test_dense.py @@ -15,10 +15,11 @@ """ test nn.Dense """ import numpy as np import pytest -import mindspore.nn as nn -from mindspore.common.api import _executor + import mindspore.context as context +import mindspore.nn as nn from mindspore import Tensor +from mindspore.common.api import _executor from ..ut_filter import non_graph_engine @@ -68,6 +69,7 @@ def test_dense_channels_error(): class Net(nn.Cell): """ Net definition """ + def __init__(self, input_channels, output_channels, diff --git a/tests/ut/python/nn/test_dropout.py b/tests/ut/python/nn/test_dropout.py index ec67a4c77bf..93cb9c81edf 100644 --- a/tests/ut/python/nn/test_dropout.py +++ b/tests/ut/python/nn/test_dropout.py @@ -15,12 +15,14 @@ """ Test Dropout """ import numpy as np import pytest + import mindspore.nn as nn from mindspore import Tensor from mindspore import context context.set_context(device_target="Ascend") + def test_check_dropout_3(): Tensor(np.ones([20, 16, 50]).astype(np.int32)) with pytest.raises(ValueError): diff --git a/tests/ut/python/nn/test_dynamic_lr.py b/tests/ut/python/nn/test_dynamic_lr.py index 8d03be17661..c53f28d5f7f 100644 --- a/tests/ut/python/nn/test_dynamic_lr.py +++ b/tests/ut/python/nn/test_dynamic_lr.py @@ -14,7 +14,7 @@ # ============================================================================ """ Test Dynamic Learning Rate """ import pytest -import mindspore + from mindspore.nn import dynamic_lr as dr milestone = [10, 20, 30] @@ -29,8 +29,9 @@ min_lr = 0.01 max_lr = 0.1 power = 0.5 + class TestInputs: - def test_milestone1(self): + def test_milestone1(self): milestone1 = 1 with pytest.raises(TypeError): dr.piecewise_constant_lr(milestone1, learning_rates) @@ -58,7 +59,7 @@ class TestInputs: lr = True with pytest.raises(TypeError): dr.exponential_decay_lr(lr, decay_rate, total_step, step_per_epoch, decay_epoch) - + with pytest.raises(TypeError): dr.polynomial_decay_lr(lr, end_learning_rate, total_step, step_per_epoch, decay_epoch, power) @@ -71,7 +72,7 @@ class TestInputs: dr.polynomial_decay_lr(lr, end_learning_rate, total_step, step_per_epoch, decay_epoch, power) def test_end_learning_rate_type(self): - lr = True + lr = True with pytest.raises(TypeError): dr.polynomial_decay_lr(learning_rate, lr, total_step, step_per_epoch, decay_epoch, power) @@ -127,7 +128,7 @@ class TestInputs: step_per_epoch1 = -1 with pytest.raises(ValueError): dr.exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch1, decay_epoch) - + with pytest.raises(ValueError): dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch1, decay_epoch) @@ -226,9 +227,10 @@ def test_cosine_decay(): lr = dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch) assert len(lr) == total_step + def test_polynomial_decay(): lr1 = dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power) assert len(lr1) == total_step lr2 = dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power, - True) + True) assert len(lr2) == total_step diff --git a/tests/ut/python/nn/test_embedding.py b/tests/ut/python/nn/test_embedding.py index 3e1c46d223c..e0f2f78f572 100644 --- a/tests/ut/python/nn/test_embedding.py +++ b/tests/ut/python/nn/test_embedding.py @@ -14,11 +14,10 @@ # ============================================================================ """ test_embedding """ import numpy as np -import pytest -from mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, EmbeddingPostprocessor from mindspore import Tensor from mindspore import dtype as mstype +from mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, EmbeddingPostprocessor from ..ut_filter import non_graph_engine @@ -39,6 +38,7 @@ def test_check_embedding_lookup_2(): use_one_hot_embeddings=True) m(Tensor(np.ones([128]), mstype.int32)) + @non_graph_engine def test_check_embedding_lookup_3(): m = EmbeddingLookup(vocab_size=32000, @@ -48,11 +48,12 @@ def test_check_embedding_lookup_3(): initializer_range=0.01) m(Tensor(np.ones([128]), mstype.int32)) + @non_graph_engine def test_embedding_post_1(): m = EmbeddingPostprocessor(embedding_size=768, - embedding_shape=[1, 128, 768], - use_token_type=True) + embedding_shape=[1, 128, 768], + use_token_type=True) m(Tensor(np.ones([128]), mstype.int32), Tensor(np.ones([1, 128, 768]), mstype.float32)) diff --git a/tests/ut/python/nn/test_flatten.py b/tests/ut/python/nn/test_flatten.py index 0045f811c81..c4b4cbefc37 100644 --- a/tests/ut/python/nn/test_flatten.py +++ b/tests/ut/python/nn/test_flatten.py @@ -16,9 +16,10 @@ test flatten api """ import numpy as np + import mindspore.nn as nn -from mindspore.common.api import _executor from mindspore import Tensor +from mindspore.common.api import _executor class Net(nn.Cell): diff --git a/tests/ut/python/nn/test_image_gradients.py b/tests/ut/python/nn/test_image_gradients.py index e268ceb9d9b..7971a8deb07 100644 --- a/tests/ut/python/nn/test_image_gradients.py +++ b/tests/ut/python/nn/test_image_gradients.py @@ -15,14 +15,17 @@ """ test image gradients """ import numpy as np import pytest -import mindspore.nn as nn -import mindspore.context as context + import mindspore.common.dtype as mstype +import mindspore.context as context +import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import _executor from mindspore.common.api import ms_function context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() @@ -32,9 +35,10 @@ class Net(nn.Cell): def construct(self, x): return self.image_gradients(x) + def test_compile(): # input shape 1 x 1 x 2 x 2 - image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32) + image = Tensor(np.array([[[[1, 2], [3, 4]]]]), dtype=mstype.int32) net = Net() _executor.compile(net, image) @@ -42,16 +46,17 @@ def test_compile(): def test_compile_multi_channel(): # input shape 4 x 2 x 2 x 2 dtype = mstype.int32 - image = Tensor(np.array([[[[1,2],[3,4]], [[5,6],[7,8]]], - [[[3,5],[7,9]], [[11,13],[15,17]]], - [[[5,10],[15,20]], [[25,30],[35,40]]], - [[[10,20],[30,40]], [[50,60],[70,80]]]]), dtype=dtype) + image = Tensor(np.array([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], + [[[3, 5], [7, 9]], [[11, 13], [15, 17]]], + [[[5, 10], [15, 20]], [[25, 30], [35, 40]]], + [[[10, 20], [30, 40]], [[50, 60], [70, 80]]]]), dtype=dtype) net = Net() _executor.compile(net, image) + def test_invalid_5d_input(): dtype = mstype.float32 image = Tensor(np.random.random([4, 1, 16, 16, 1]), dtype=dtype) net = Net() with pytest.raises(ValueError): - _executor.compile(net, image) \ No newline at end of file + _executor.compile(net, image) diff --git a/tests/ut/python/nn/test_loss.py b/tests/ut/python/nn/test_loss.py index e5e6c438855..4139e32d742 100644 --- a/tests/ut/python/nn/test_loss.py +++ b/tests/ut/python/nn/test_loss.py @@ -14,12 +14,12 @@ # ============================================================================ """ test loss """ import numpy as np -import pytest + import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import _executor from ..ut_filter import non_graph_engine -import mindspore + def test_L1Loss(): loss = nn.L1Loss() @@ -60,5 +60,5 @@ def test_SoftmaxCrossEntropyExpand(): loss = nn.SoftmaxCrossEntropyExpand() logits = Tensor(np.random.randint(0, 9, [100, 10]).astype(np.float32)) - labels = Tensor(np.random.randint(0, 9, [10,]).astype(np.float32)) + labels = Tensor(np.random.randint(0, 9, [10, ]).astype(np.float32)) _executor.compile(loss, logits, labels) diff --git a/tests/ut/python/nn/test_lstm.py b/tests/ut/python/nn/test_lstm.py index 18a904dc99b..b5f7d88c522 100644 --- a/tests/ut/python/nn/test_lstm.py +++ b/tests/ut/python/nn/test_lstm.py @@ -14,6 +14,7 @@ # ============================================================================ """ test lstm """ import pytest + import mindspore.context as context from mindspore import nn from ..ut_filter import run_on_gpu @@ -22,6 +23,7 @@ from ....ops_common import convert class LstmTestNet(nn.Cell): """ LstmTestNet definition """ + def __init__(self, input_size, hidden_size, num_layers, has_bias, batch_first, bidirectional): super(LstmTestNet, self).__init__() self.lstm = nn.LSTM(input_size=input_size, @@ -32,7 +34,6 @@ class LstmTestNet(nn.Cell): bidirectional=bidirectional, dropout=0.0) - def construct(self, inp, h0, c0): return self.lstm(inp, (h0, c0)) @@ -86,6 +87,7 @@ def test_compile(args): out = net(*inputs) print(f"out: {out}") + @run_on_gpu @pytest.mark.parametrize('args', test_case_cell_ops, ids=lambda x: x[0]) def test_execute(args): diff --git a/tests/ut/python/nn/test_nn_embedding.py b/tests/ut/python/nn/test_nn_embedding.py index 5be91aee218..91da404a2b1 100755 --- a/tests/ut/python/nn/test_nn_embedding.py +++ b/tests/ut/python/nn/test_nn_embedding.py @@ -14,10 +14,11 @@ # ============================================================================ """ test nn embedding """ import numpy as np + from mindspore import Tensor from mindspore.common import dtype -from mindspore.nn import Embedding from mindspore.common.api import _executor +from mindspore.nn import Embedding from ..ut_filter import non_graph_engine @@ -41,6 +42,7 @@ def test_check_embedding_3(): input_data = Tensor(np.ones([8, 128]), dtype.int32) _executor.compile(net, input_data) + @non_graph_engine def test_print_embedding(): net = Embedding(20000, 768, False) diff --git a/tests/ut/python/nn/test_nn_pad.py b/tests/ut/python/nn/test_nn_pad.py index a8b66bae5cc..5e0f7108d60 100644 --- a/tests/ut/python/nn/test_nn_pad.py +++ b/tests/ut/python/nn/test_nn_pad.py @@ -13,13 +13,12 @@ # limitations under the License. # ============================================================================ """ test nn pad """ -from mindspore import Tensor -from mindspore.ops import operations as P -import mindspore.nn as nn -from mindspore.ops.composite import GradOperation -from mindspore.common.api import ms_function import numpy as np -import mindspore.context as context + +import mindspore.nn as nn +from mindspore import Tensor +from mindspore.common.api import ms_function +from mindspore.ops.composite import GradOperation class Net(nn.Cell): diff --git a/tests/ut/python/nn/test_norm.py b/tests/ut/python/nn/test_norm.py index 72d18962964..a46ff5e3e0a 100644 --- a/tests/ut/python/nn/test_norm.py +++ b/tests/ut/python/nn/test_norm.py @@ -14,6 +14,7 @@ # ============================================================================ """ test norm """ import numpy as np + import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import _executor diff --git a/tests/ut/python/nn/test_parameter.py b/tests/ut/python/nn/test_parameter.py index d6bc40ba02c..6d384614d98 100644 --- a/tests/ut/python/nn/test_parameter.py +++ b/tests/ut/python/nn/test_parameter.py @@ -15,10 +15,11 @@ """ test parameter """ import numpy as np import pytest + from mindspore import Tensor, Parameter, ParameterTuple +from mindspore._checkparam import _check_str_by_regular from mindspore.common import dtype as mstype from mindspore.common.initializer import initializer -from mindspore._checkparam import _check_str_by_regular def test_parameter_init(): @@ -30,7 +31,7 @@ def test_parameter_init(): def test_parameter_tuple_illegal(): p1 = Parameter(initializer(0, [1], mstype.int32), name="global_step1") p2 = Parameter(initializer(0, [1], mstype.int32), name="global_step2") - plist = [p1,p2] + plist = [p1, p2] plist2 = [p1, "str"] ptuple = (p1, p2) ptuple_str = ("2", "1") @@ -100,21 +101,21 @@ def test_parameter_init_illegal(): with pytest.raises(TypeError): Parameter(tensor, name=data_str, requires_grad=data_tuple) - Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_bool) + Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_bool) with pytest.raises(TypeError): - Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=dat) + Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=dat) with pytest.raises(TypeError): - Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=tensor) + Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=tensor) with pytest.raises(TypeError): - Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_none) + Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_none) with pytest.raises(TypeError): - Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_str) + Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_str) with pytest.raises(TypeError): - Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_int) + Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_int) with pytest.raises(TypeError): - Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_list) + Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_list) with pytest.raises(TypeError): - Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_tuple) + Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_tuple) def test_check_str_by_regular(): diff --git a/tests/ut/python/nn/test_pooling.py b/tests/ut/python/nn/test_pooling.py index 863ffc555ff..0967fc3bfcb 100644 --- a/tests/ut/python/nn/test_pooling.py +++ b/tests/ut/python/nn/test_pooling.py @@ -16,9 +16,10 @@ test pooling api """ import numpy as np + import mindspore.nn as nn -from mindspore.common.api import _executor from mindspore import Tensor +from mindspore.common.api import _executor class AvgNet(nn.Cell): @@ -40,6 +41,7 @@ def test_compile_avg(): class MaxNet(nn.Cell): """ MaxNet definition """ + def __init__(self, kernel_size, stride=None, @@ -68,7 +70,8 @@ class Avg1dNet(nn.Cell): def construct(self, x): return self.avg1d(x) + def test_avg1d(): net = Avg1dNet(6, 1) input = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32)) - _executor.compile(net, input) \ No newline at end of file + _executor.compile(net, input) diff --git a/tests/ut/python/nn/test_psnr.py b/tests/ut/python/nn/test_psnr.py index c07d2468106..caed021c66a 100644 --- a/tests/ut/python/nn/test_psnr.py +++ b/tests/ut/python/nn/test_psnr.py @@ -17,11 +17,11 @@ test psnr """ import numpy as np import pytest + import mindspore.nn as nn +from mindspore import Tensor from mindspore.common import dtype as mstype from mindspore.common.api import _executor -from mindspore import Tensor - class PSNRNet(nn.Cell): @@ -40,6 +40,7 @@ def test_compile_psnr(): img2 = Tensor(np.random.random((8, 3, 16, 16))) _executor.compile(net, img1, img2) + def test_compile_psnr_grayscale(): max_val = 255 net = PSNRNet(max_val) @@ -47,21 +48,25 @@ def test_compile_psnr_grayscale(): img2 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) _executor.compile(net, img1, img2) + def test_psnr_max_val_negative(): max_val = -1 with pytest.raises(ValueError): net = PSNRNet(max_val) + def test_psnr_max_val_bool(): max_val = True with pytest.raises(TypeError): net = PSNRNet(max_val) + def test_psnr_max_val_zero(): max_val = 0 with pytest.raises(ValueError): net = PSNRNet(max_val) + def test_psnr_different_shape(): shape_1 = (8, 3, 16, 16) shape_2 = (8, 3, 8, 8) @@ -71,6 +76,7 @@ def test_psnr_different_shape(): with pytest.raises(ValueError): _executor.compile(net, img1, img2) + def test_psnr_different_dtype(): dtype_1 = mstype.float32 dtype_2 = mstype.float16 @@ -80,6 +86,7 @@ def test_psnr_different_dtype(): with pytest.raises(TypeError): _executor.compile(net, img1, img2) + def test_psnr_invalid_5d_input(): shape_1 = (8, 3, 16, 16) shape_2 = (8, 3, 8, 8) diff --git a/tests/ut/python/nn/test_ssim.py b/tests/ut/python/nn/test_ssim.py index 7389c2dbdad..319cc991f9f 100644 --- a/tests/ut/python/nn/test_ssim.py +++ b/tests/ut/python/nn/test_ssim.py @@ -17,10 +17,11 @@ test ssim """ import numpy as np import pytest -import mindspore.nn as nn + import mindspore.common.dtype as mstype -from mindspore.common.api import _executor +import mindspore.nn as nn from mindspore import Tensor +from mindspore.common.api import _executor class SSIMNet(nn.Cell): @@ -38,44 +39,53 @@ def test_compile(): img2 = Tensor(np.random.random((8, 3, 16, 16))) _executor.compile(net, img1, img2) + def test_compile_grayscale(): max_val = 255 - net = SSIMNet(max_val = max_val) + net = SSIMNet(max_val=max_val) img1 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) img2 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) _executor.compile(net, img1, img2) + def test_ssim_max_val_negative(): max_val = -1 with pytest.raises(ValueError): net = SSIMNet(max_val) + def test_ssim_max_val_bool(): max_val = True with pytest.raises(TypeError): net = SSIMNet(max_val) + def test_ssim_max_val_zero(): max_val = 0 with pytest.raises(ValueError): net = SSIMNet(max_val) + def test_ssim_filter_size_float(): with pytest.raises(TypeError): net = SSIMNet(filter_size=1.1) + def test_ssim_filter_size_zero(): with pytest.raises(ValueError): net = SSIMNet(filter_size=0) + def test_ssim_filter_sigma_zero(): with pytest.raises(ValueError): net = SSIMNet(filter_sigma=0.0) + def test_ssim_filter_sigma_negative(): with pytest.raises(ValueError): net = SSIMNet(filter_sigma=-0.1) + def test_ssim_k1_k2_wrong_value(): with pytest.raises(ValueError): net = SSIMNet(k1=1.1) @@ -95,6 +105,7 @@ def test_ssim_k1_k2_wrong_value(): with pytest.raises(ValueError): net = SSIMNet(k2=-1.0) + def test_ssim_different_shape(): shape_1 = (8, 3, 16, 16) shape_2 = (8, 3, 8, 8) @@ -104,6 +115,7 @@ def test_ssim_different_shape(): with pytest.raises(ValueError): _executor.compile(net, img1, img2) + def test_ssim_different_dtype(): dtype_1 = mstype.float32 dtype_2 = mstype.float16 @@ -113,6 +125,7 @@ def test_ssim_different_dtype(): with pytest.raises(TypeError): _executor.compile(net, img1, img2) + def test_ssim_invalid_5d_input(): shape_1 = (8, 3, 16, 16) shape_2 = (8, 3, 8, 8) diff --git a/tests/ut/python/nn/test_structure_output.py b/tests/ut/python/nn/test_structure_output.py index f5f6d77a670..5f83dd0aa77 100644 --- a/tests/ut/python/nn/test_structure_output.py +++ b/tests/ut/python/nn/test_structure_output.py @@ -17,9 +17,9 @@ test_structure_output """ import numpy as np +import mindspore.ops.operations as P from mindspore import Tensor, context from mindspore.nn import Cell -import mindspore.ops.operations as P from mindspore.ops.functional import depend context.set_context(mode=context.GRAPH_MODE) diff --git a/tests/ut/python/optimizer/__init__.py b/tests/ut/python/optimizer/__init__.py index 6d831431f51..652c7d94daa 100644 --- a/tests/ut/python/optimizer/__init__.py +++ b/tests/ut/python/optimizer/__init__.py @@ -13,4 +13,3 @@ # limitations under the License. # ============================================================================ """ init vm impl """ -from ....vm_impl import vm diff --git a/tests/ut/python/optimizer/test_debug_location.py b/tests/ut/python/optimizer/test_debug_location.py index 80793f37a13..78486c7a6c3 100644 --- a/tests/ut/python/optimizer/test_debug_location.py +++ b/tests/ut/python/optimizer/test_debug_location.py @@ -13,27 +13,26 @@ # limitations under the License. # ============================================================================ import numpy as np -import mindspore.nn as nn import pytest -from mindspore import context + +import mindspore.nn as nn from mindspore import Tensor, Parameter -from mindspore.nn.wrap.cell_wrapper import WithLossCell -from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager -from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell -from mindspore.ops import operations as P -from mindspore.nn.optim import Momentum -from mindspore.ops import functional as F +from mindspore import context from mindspore.common import dtype as mstype -from mindspore.train import Model -from ....dataset_mock import MindData -from mindspore.nn.optim import Lamb -from mindspore.ops._utils import _get_broadcast_shape -from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, prim_attr_register +from mindspore.nn.optim import Momentum +from mindspore.nn.wrap.cell_wrapper import WithLossCell +from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell +from mindspore.ops import functional as F +from mindspore.ops import operations as P from mindspore.ops._grad.grad_base import bprop_getters from mindspore.ops._grad.grad_math_ops import binop_grad_common +from mindspore.ops._utils import _get_broadcast_shape +from mindspore.ops.primitive import PrimitiveWithInfer, prim_attr_register +from mindspore.train.loss_scale_manager import DynamicLossScaleManager context.set_context(mode=context.GRAPH_MODE) + class MockNeg(PrimitiveWithInfer): @prim_attr_register def __init__(self): @@ -47,6 +46,7 @@ class MockNeg(PrimitiveWithInfer): raise TypeError("InferError") return input_x + class MockSub(PrimitiveWithInfer): @prim_attr_register def __init__(self): @@ -59,6 +59,7 @@ class MockSub(PrimitiveWithInfer): def infer_dtype(self, x_dtype, y_dtype): return x_dtype + @bprop_getters.register(MockSub) def get_bprop_mock_sub(self): """Grad definition for `MockSub` operation.""" @@ -66,8 +67,10 @@ def get_bprop_mock_sub(self): def bprop(x, y, out, dout): return binop_grad_common(x, y, dout, neg_func(dout)) + return bprop + class Net(nn.Cell): def __init__(self, in_features, out_features): super(Net, self).__init__() @@ -80,6 +83,7 @@ class Net(nn.Cell): output = self.add(self.matmul(input, self.weight), self.bias) return output + class NetFP16(nn.Cell): def __init__(self, in_features, out_features): super(NetFP16, self).__init__() @@ -90,16 +94,19 @@ class NetFP16(nn.Cell): self.cast = P.Cast() def construct(self, input): - output = self.cast(self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)), - self.cast(self.bias, mstype.float16)), mstype.float32) + output = self.cast( + self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)), + self.cast(self.bias, mstype.float16)), mstype.float32) return output + def get_axis(x): shape = F.shape(x) length = F.tuple_len(shape) perm = F.make_range(0, length) return perm + class MSELoss(nn.Cell): def __init__(self): super(MSELoss, self).__init__() @@ -107,17 +114,21 @@ class MSELoss(nn.Cell): self.square = P.Square() self.reduce_mean = P.ReduceMean() self.sub = MockSub() + def construct(self, data, label): diff = self.sub(data, label) return self.reduce_mean(self.square(diff), get_axis(diff)) + class NegCell(nn.Cell): def __init__(self): super(NegCell, self).__init__() self.neg = MockNeg() + def construct(self, x): return self.neg(x) + class Net3(nn.Cell): def __init__(self): super().__init__() @@ -146,6 +157,7 @@ class SequenceNet(nn.Cell): x = self.seq(x) + bbb return x + def test_sequential_resolve_error(): input_np = np.random.randn(2, 3, 4, 5).astype(np.float32) input_me = Tensor(input_np) @@ -153,6 +165,7 @@ def test_sequential_resolve_error(): with pytest.raises(RuntimeError) as e: net(input_me) + def test_compile_grad_error(): inputs = Tensor(np.ones([16, 16]).astype(np.float32)) label = Tensor(np.zeros([16, 16]).astype(np.float32)) @@ -164,9 +177,8 @@ def test_compile_grad_error(): net_with_loss = WithLossCell(net, loss) scale_manager = DynamicLossScaleManager() update_cell = scale_manager.get_update_cell() - train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell) + train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell) train_network.set_train() with pytest.raises(TypeError) as e: train_network(inputs, label) - print (e) - + print(e) diff --git a/tests/ut/python/optimizer/test_optimize_with_loss_scale.py b/tests/ut/python/optimizer/test_optimize_with_loss_scale.py index b61181adda3..95e9da3230d 100644 --- a/tests/ut/python/optimizer/test_optimize_with_loss_scale.py +++ b/tests/ut/python/optimizer/test_optimize_with_loss_scale.py @@ -13,19 +13,20 @@ # limitations under the License. # ============================================================================ import numpy as np + import mindspore.nn as nn -from mindspore import context from mindspore import Tensor, Parameter +from mindspore import context +from mindspore.common import dtype as mstype +from mindspore.nn.optim import Lamb +from mindspore.nn.optim import Momentum, Adam from mindspore.nn.wrap.cell_wrapper import WithLossCell from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell -from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager -from mindspore.ops import operations as P -from mindspore.nn.optim import Momentum, Adam from mindspore.ops import functional as F -from mindspore.common import dtype as mstype +from mindspore.ops import operations as P from mindspore.train import Model +from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager from ....dataset_mock import MindData -from mindspore.nn.optim import Lamb context.set_context(mode=context.GRAPH_MODE) @@ -36,6 +37,7 @@ class MindDataSet(MindData): np_types=dataset_types, output_shapes=dataset_shapes, input_indexs=(0, 1)) + def __next__(self): if self._size < self._iter_num: raise StopIteration @@ -45,6 +47,7 @@ class MindDataSet(MindData): next.append(Tensor(np.ones(shape).astype(type))) return tuple(next) + class Net(nn.Cell): def __init__(self, in_features, out_features): super(Net, self).__init__() @@ -57,6 +60,7 @@ class Net(nn.Cell): output = self.add(self.matmul(input, self.weight), self.bias) return output + class NetFP16(nn.Cell): def __init__(self, in_features, out_features): super(NetFP16, self).__init__() @@ -67,10 +71,12 @@ class NetFP16(nn.Cell): self.cast = P.Cast() def construct(self, input): - output = self.cast(self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)), - self.cast(self.bias, mstype.float16)), mstype.float32) + output = self.cast( + self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)), + self.cast(self.bias, mstype.float16)), mstype.float32) return output + def get_axis(x): shape_op = P.Shape() shape = shape_op(x) @@ -78,6 +84,7 @@ def get_axis(x): perm = F.make_range(0, length) return perm + class MSELoss(nn.Cell): def __init__(self): super(MSELoss, self).__init__() @@ -89,6 +96,7 @@ class MSELoss(nn.Cell): diff = data - label return self.reduce_mean(self.square(diff), get_axis(diff)) + def test_momentum_compile(): inputs = Tensor(np.ones([15, 1]).astype(np.float32)) label = Tensor(np.zeros([15, 1]).astype(np.float32)) @@ -104,6 +112,7 @@ def test_momentum_compile(): output = train_network(inputs, label, scaling_sens) print("the result is ", output) + def test_compile_fp16_not_overflow(): inputs = Tensor(np.ones([16, 16]).astype(np.float32)) label = Tensor(np.zeros([16, 16]).astype(np.float32)) @@ -119,6 +128,7 @@ def test_compile_fp16_not_overflow(): output = train_network(inputs, label, scaling_sens) print("the result is ", output) + def test_compile_fp16_lr_overflow(): inputs = Tensor(np.ones([16, 16]).astype(np.float32)) label = Tensor(np.zeros([16, 16]).astype(np.float32)) @@ -134,6 +144,7 @@ def test_compile_fp16_lr_overflow(): output = train_network(inputs, label, scaling_sens) print("the result is ", output) + def test_compile_fp16_overflow(): inputs = Tensor(np.ones([16, 16]).astype(np.float32)) label = Tensor(np.zeros([16, 16]).astype(np.float32)) @@ -148,6 +159,7 @@ def test_compile_fp16_overflow(): output = train_network(inputs, label, scaling_sens) print("the result is ", output) + def test_compile_fp16_lr_overflow_with_lossscale_update(): inputs = Tensor(np.ones([16, 16]).astype(np.float32)) label = Tensor(np.zeros([16, 16]).astype(np.float32)) @@ -165,6 +177,7 @@ def test_compile_fp16_lr_overflow_with_lossscale_update(): output = train_network(inputs, label, scaling_sens) print("the result is ", output) + def test_compile_f16_model_train(): dataset_types = (np.float32, np.float32) dataset_shapes = ((16, 16), (16, 16)) @@ -205,11 +218,12 @@ def test_compile_fp16_lr_overflow_fixed_feed(): net_with_loss = WithLossCell(net, loss) scale_manager = FixedLossScaleManager() update_cell = scale_manager.get_update_cell() - train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell) + train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell) train_network.set_train() output = train_network(inputs, label, scaling_sens) print("the result is ", output) + def test_compile_fp16_lr_overflow_dynamic_feed(): inputs = Tensor(np.ones([16, 16]).astype(np.float32)) label = Tensor(np.zeros([16, 16]).astype(np.float32)) @@ -222,11 +236,12 @@ def test_compile_fp16_lr_overflow_dynamic_feed(): net_with_loss = WithLossCell(net, loss) scale_manager = DynamicLossScaleManager() update_cell = scale_manager.get_update_cell() - train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell) + train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell) train_network.set_train() output = train_network(inputs, label, scaling_sens) print("the result is ", output) + def test_compile_fp16_lr_overflow_fixed_graph(): inputs = Tensor(np.ones([16, 16]).astype(np.float32)) label = Tensor(np.zeros([16, 16]).astype(np.float32)) @@ -238,11 +253,12 @@ def test_compile_fp16_lr_overflow_fixed_graph(): net_with_loss = WithLossCell(net, loss) scale_manager = FixedLossScaleManager(drop_overflow_update=True) update_cell = scale_manager.get_update_cell() - train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell) + train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell) train_network.set_train() output = train_network(inputs, label) print("the result is ", output) + def test_compile_fp16_lr_overflow_dynamic_graph(): inputs = Tensor(np.ones([16, 16]).astype(np.float32)) label = Tensor(np.zeros([16, 16]).astype(np.float32)) @@ -254,11 +270,12 @@ def test_compile_fp16_lr_overflow_dynamic_graph(): net_with_loss = WithLossCell(net, loss) scale_manager = DynamicLossScaleManager() update_cell = scale_manager.get_update_cell() - train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell) + train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell) train_network.set_train() output = train_network(inputs, label) print("the result is ", output) + def test_adam_compile(): inputs = Tensor(np.ones([15, 1]).astype(np.float32)) label = Tensor(np.zeros([15, 1]).astype(np.float32)) diff --git a/tests/ut/python/optimizer/test_optimize_with_parameter_groups.py b/tests/ut/python/optimizer/test_optimize_with_parameter_groups.py index 24ee9254a99..1e1fc452da9 100644 --- a/tests/ut/python/optimizer/test_optimize_with_parameter_groups.py +++ b/tests/ut/python/optimizer/test_optimize_with_parameter_groups.py @@ -14,20 +14,22 @@ # ============================================================================ import numpy as np import pytest + import mindspore.common.dtype as mstype import mindspore.nn as nn -from mindspore.nn.optim import Momentum, SGD, RMSProp, Adam from mindspore import context from mindspore.common.api import _executor from mindspore.common.tensor import Tensor -from mindspore.ops import operations as P from mindspore.nn import TrainOneStepCell, WithLossCell +from mindspore.nn.optim import Momentum, SGD, RMSProp, Adam +from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE) class LeNet5(nn.Cell): """ LeNet5 definition """ + def __init__(self): super(LeNet5, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') diff --git a/tests/ut/python/parameter_feature/test_parameter.py b/tests/ut/python/parameter_feature/test_parameter.py index 1409fef386d..18a8d7365d6 100644 --- a/tests/ut/python/parameter_feature/test_parameter.py +++ b/tests/ut/python/parameter_feature/test_parameter.py @@ -13,16 +13,17 @@ # limitations under the License. # ============================================================================ import numpy as np + import mindspore.context as context +import mindspore.ops.composite as C from mindspore import Tensor, Parameter from mindspore.nn import Cell from mindspore.ops import operations as P -import mindspore.ops.composite as C context.set_context(mode=context.GRAPH_MODE, save_graphs=True) -def test_parser_three_default_mixed_args_subnet(): +def test_parser_three_default_mixed_args_subnet(): class SubNetDefaultMixedArgs(Cell): def __init__(self): super().__init__() @@ -55,7 +56,7 @@ def test_net_vararg_kwonlyarg_kwarg(): super(FirstNet, self).__init__() self.net = SecondNet() - def construct(self, x=1, z=2+2+4, y=3): + def construct(self, x=1, z=2 + 2 + 4, y=3): c = self.net(22, 33, x, y, z, 2, 3, 4, 5, key1=10, key2=20, key3=30, key4=40) return c @@ -74,13 +75,14 @@ def test_net_vararg_kwonlyarg_kwarg(): net = FirstNet() net() + def test_net_vararg_normal_input(): class FirstNet(Cell): def __init__(self): super(FirstNet, self).__init__() self.net = SecondNet() - def construct(self, x=1, z=2+2+4, y=3): + def construct(self, x=1, z=2 + 2 + 4, y=3): c = self.net(22, 33, x, y, z, 2, 3, 4, 5, key1=10, key2=20, key3=30, key4=40) return c @@ -95,10 +97,12 @@ def test_net_vararg_normal_input(): d = var[0] * var[1] * var[2] * var[3] e = key1 - key2 - kwargs["key3"] + kwargs["key4"] return a + b + c + d + e + x = Tensor(np.ones((2, 3, 4), np.int32)) net = FirstNet() net(x, x, x) + def test_prim_vararg_kwonlyarg(): class FirstNet(Cell): def __init__(self): @@ -201,9 +205,11 @@ def test_net_variable_and_weights(): z = Tensor(np.ones((4,), np.float32)) net(x, y, z) + def test_net_vargs_expand(): class InputBackward(Cell): """ InputBackward definition """ + def __init__(self, network, c1=None, c2=None): super(InputBackward, self).__init__() self.network = network @@ -214,9 +220,11 @@ def test_net_vargs_expand(): def construct(self, *inputs): return self.grad(self.network)(*inputs) + class AddNet(Cell): def __init__(self): super(AddNet, self).__init__() + def construct(self, x, y): return x + y @@ -227,7 +235,7 @@ def test_net_vargs_expand(): net.set_train() net(x, y, sens) - + def test_mixed_precision_const_parameter(): class NetLoss(Cell): @@ -237,6 +245,7 @@ def test_mixed_precision_const_parameter(): self.up_sample1 = P.ResizeBilinear((14, 14)) self.up_sample2 = P.ResizeBilinear((28, 28)) self.up_sample3 = P.ResizeBilinear((36, 36)) + def construct(self, x, y, z, *args): ret = 0 if args[0] == self.shape(z)[2]: @@ -250,20 +259,23 @@ def test_mixed_precision_const_parameter(): ret = x * y ret = ret * z return ret + class NetMain(Cell): def __init__(self, loss_fn): super(NetMain, self).__init__() self.loss_fn = loss_fn self.shape = P.Shape() + def construct(self, x, y, z): size_x = self.shape(x)[2] size_y = self.shape(y)[2] ret = self.loss_fn(x, y, z, size_x, size_y) return ret + loss_fn = NetLoss() net = NetMain(loss_fn) net.add_flags_recursive(fp32=True) x = Tensor(np.ones((1, 3, 28, 28), np.float32)) y = Tensor(np.ones((1, 3, 14, 14), np.float32)) z = Tensor(np.ones((1, 3, 28, 28), np.float32)) - out = net(x, y, z) \ No newline at end of file + out = net(x, y, z) diff --git a/tests/ut/python/parameter_feature/test_var_grad.py b/tests/ut/python/parameter_feature/test_var_grad.py index 528456d02e6..05b0289d257 100644 --- a/tests/ut/python/parameter_feature/test_var_grad.py +++ b/tests/ut/python/parameter_feature/test_var_grad.py @@ -13,14 +13,14 @@ # limitations under the License. # ============================================================================ import numpy as np -from mindspore import context + +import mindspore.ops.composite as C from mindspore import Tensor, Parameter +from mindspore import context +from mindspore.common import dtype as mstype +from mindspore.common.parameter import ParameterTuple from mindspore.nn import Cell from mindspore.ops import operations as P -import mindspore.ops.composite as C -from mindspore.common.api import _executor -from mindspore.common.parameter import ParameterTuple -from mindspore.common import dtype as mstype context.set_context(mode=context.GRAPH_MODE) @@ -34,6 +34,7 @@ def test_net_vargs_expand(): def construct(self, x, y): return x + y + x = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) y = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) sens = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) @@ -51,7 +52,7 @@ class VarNet(Cell): self.net = net def construct(self, *args): - return self.net(*args)*self.w + self.b + return self.net(*args) * self.w + self.b class SecondNet(Cell): @@ -95,6 +96,7 @@ class Bprop(Cell): def test_all_var_args_grad_with_sens(): """"test grad_by_list_with_sens with all var args input""" + class GradNet(Cell): def __init__(self, net): super(GradNet, self).__init__() @@ -103,6 +105,7 @@ def test_all_var_args_grad_with_sens(): def construct(self, *inputs): return C.grad_by_list_with_sens(self.net, self.weights)(*inputs) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) sens = Tensor(1.0, dtype=mstype.float32) @@ -120,6 +123,7 @@ def test_grad_list_var_args(): def construct(self, *inputs): return C.grad_by_list(self.net, self.weights)(*inputs) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) net = VarNet(SecondNet()) @@ -136,6 +140,7 @@ def test_grad_all_var_args(): def construct(self, *inputs): return C.grad_all(self.net)(*inputs) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) net = VarNet(SecondNet()) @@ -152,6 +157,7 @@ def test_grad_all_var_args_with_sens(): def construct(self, *inputs): return C.grad_all_with_sens(self.net)(*inputs) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) sens = Tensor(1.0, dtype=mstype.float32) @@ -169,6 +175,7 @@ def test_grad_var_args_with_sens(): def construct(self, *inputs): return C.grad_with_sens(self.net)(*inputs) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) sens = Tensor(1.0, dtype=mstype.float32) @@ -206,6 +213,7 @@ def test_var_args_grad(): def construct(self, x, y, sens): return C.grad_by_list_with_sens(self.net, self.weights)(x, y, sens) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) sens = Tensor(1.0, dtype=mstype.float32) @@ -216,13 +224,14 @@ def test_var_args_grad(): def test_var_args_positional(): """"test grad_all with var args in inner graph""" + class VarNet(Cell): def __init__(self, net): super(VarNet, self).__init__() self.net = net def construct(self, x, y): - return self.net(x, y)*x + return self.net(x, y) * x class SecondNet(Cell): def __init__(self): @@ -239,6 +248,7 @@ def test_var_args_positional(): def construct(self, x, y): return C.grad_all(self.net)(x, y) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) net = VarNet(SecondNet()) @@ -258,6 +268,7 @@ def test_grad_within_if_else(): def construct(self, *inputs): return self.grad(*inputs) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) sens = Tensor(1.0, dtype=mstype.float32) @@ -309,6 +320,7 @@ def test_grad_for_concat(): def grad_cmp(self): input_grad_mindspore = self.grad_mindspore_impl() + fact = ConcatFactory(input_shape=( (2, 184320, 1), (2, 46080, 1), (2, 11520, 1), (2, 2880, 1), (2, 720, 1)), axis=1) fact.grad_cmp() diff --git a/tests/ut/python/test_log.py b/tests/ut/python/test_log.py index 04be0bd0b9f..9d1b135e8e6 100644 --- a/tests/ut/python/test_log.py +++ b/tests/ut/python/test_log.py @@ -15,12 +15,13 @@ """ log test """ +import logging import os -import sys -import time import re import shutil -import logging +import sys +import time + def test_log_stdout(): # Clean up environment variables @@ -50,8 +51,8 @@ def test_log_setlevel(): _rm_env_config() os.environ['GLOG_v'] = '0' from mindspore import log as logger - #logger_instance = logger._get_logger() - #del logger_instance + # logger_instance = logger._get_logger() + # del logger_instance loglevel = logger.get_level() log_str = 'print debug informations' logger.debug("5 test log message debug:%s", log_str) @@ -87,7 +88,7 @@ def test_log_file(): "\[.*:.*[0-9]\] test log message warning" match_obj = re.match(pattern, result) - #Clear test file + # Clear test file if os.path.exists(file_path): shutil.rmtree(file_path) @@ -100,7 +101,7 @@ def test_log_backup_count(): """ test backup count """ - #logger.reset_log_config(level=logging.INFO, console=False, + # logger.reset_log_config(level=logging.INFO, console=False, # filepath=file_path, maxBytes=1000, backupCount=10) _rm_env_config() file_path = '/tmp/log/mindspore_test' @@ -141,12 +142,12 @@ def test_log_verify_envconfig(): # level is not a number _rm_env_config() os.environ['GLOG_v'] = 'test' - verify_dict_0 = logger._get_env_config() + verify_dict_0 = logger._get_env_config() # level is not in range _rm_env_config() os.environ['GLOG_v'] = '100' - verify_dict_1 = logger._get_env_config() + verify_dict_1 = logger._get_env_config() # console is not a number _rm_env_config() @@ -236,7 +237,6 @@ def test_log_repeated_print(): logger._global_logger = None - def test_log_getconfig(): _rm_env_config() os.environ['GLOG_v'] = '3' @@ -307,7 +307,7 @@ def test_log_ms_import(): targetdict = {'GLOG_v': '2', 'GLOG_logtostderr': '1'} level = ms.get_level() assert configdict == targetdict and level == '2' - + def _rm_env_config(): envlist = ['GLOG_v', 'GLOG_logtostderr', 'GLOG_log_dir', 'logger_maxBytes', 'logger_backupCount'] diff --git a/tests/ut/python/train/summary/__init__.py b/tests/ut/python/train/summary/__init__.py index 5443c0ca48e..9f7610e25c4 100644 --- a/tests/ut/python/train/summary/__init__.py +++ b/tests/ut/python/train/summary/__init__.py @@ -15,6 +15,7 @@ """setup for pytest""" import mindspore.context as context + # pylint: disable=unused-argument def setup_module(module): context.set_context(mode=context.GRAPH_MODE) diff --git a/tests/ut/python/train/summary/test_graph_summary.py b/tests/ut/python/train/summary/test_graph_summary.py index cebf7064656..c69cedbad3f 100644 --- a/tests/ut/python/train/summary/test_graph_summary.py +++ b/tests/ut/python/train/summary/test_graph_summary.py @@ -13,15 +13,16 @@ # limitations under the License. # ============================================================================ """ test_graph_summary """ -import os import logging +import os + import numpy as np -import pytest + import mindspore.nn as nn -from mindspore.nn.optim import Momentum from mindspore import Model, context -from mindspore.train.summary.summary_record import SummaryRecord +from mindspore.nn.optim import Momentum from mindspore.train.callback import SummaryStep +from mindspore.train.summary.summary_record import SummaryRecord from .....dataset_mock import MindData CUR_DIR = os.getcwd() diff --git a/tests/ut/python/train/summary/test_histogram_summary.py b/tests/ut/python/train/summary/test_histogram_summary.py index 53c62990b1c..12fd3d573e4 100644 --- a/tests/ut/python/train/summary/test_histogram_summary.py +++ b/tests/ut/python/train/summary/test_histogram_summary.py @@ -21,8 +21,8 @@ import tempfile import numpy as np from mindspore.common.tensor import Tensor -from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data from mindspore.train.summary._summary_adapter import _calc_histogram_bins +from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data from .summary_reader import SummaryReader CUR_DIR = os.getcwd() diff --git a/tests/ut/python/train/summary/test_image_summary.py b/tests/ut/python/train/summary/test_image_summary.py index 2eb35d3f58a..7ebfee880a7 100644 --- a/tests/ut/python/train/summary/test_image_summary.py +++ b/tests/ut/python/train/summary/test_image_summary.py @@ -18,16 +18,18 @@ @Date : 2019-07-4 @Desc : test summary function """ -import os import logging +import os + import numpy as np + import mindspore.nn as nn -from mindspore.train.summary.summary_record import SummaryRecord, \ - _cache_summary_tensor_data +from mindspore import Model, context from mindspore import Tensor from mindspore.nn.optim import Momentum -from mindspore import Model, context from mindspore.train.callback import SummaryStep +from mindspore.train.summary.summary_record import SummaryRecord, \ + _cache_summary_tensor_data from .....dataset_mock import MindData CUR_DIR = os.getcwd() diff --git a/tests/ut/python/train/summary/test_summary.py b/tests/ut/python/train/summary/test_summary.py index 82287c4290d..394ae31f08e 100644 --- a/tests/ut/python/train/summary/test_summary.py +++ b/tests/ut/python/train/summary/test_summary.py @@ -18,16 +18,18 @@ @Date : 2019-07-4 @Desc : test summary function """ -import os import logging +import os import random + import numpy as np import pytest -from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data -from mindspore.train.callback import SummaryStep -from mindspore.common.tensor import Tensor + import mindspore.nn as nn +from mindspore.common.tensor import Tensor from mindspore.ops import operations as P +from mindspore.train.callback import SummaryStep +from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data CUR_DIR = os.getcwd() SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/" @@ -129,7 +131,8 @@ def test_scalar_summary_sample_with_shape_1(): # Test: test with ge class SummaryDemo(nn.Cell): """ SummaryDemo definition """ - def __init__(self,): + + def __init__(self, ): super(SummaryDemo, self).__init__() self.s = P.ScalarSummary() self.histogram_summary = P.HistogramSummary() @@ -218,9 +221,9 @@ def test_validate(): with pytest.raises(ValueError): sr.record(2.0) with pytest.raises(ValueError): - sr.record((1,3)) + sr.record((1, 3)) with pytest.raises(ValueError): - sr.record([2,3]) + sr.record([2, 3]) with pytest.raises(ValueError): sr.record("str") with pytest.raises(ValueError): @@ -235,8 +238,8 @@ def test_validate(): with pytest.raises(ValueError): SummaryStep(sr, "str") with pytest.raises(ValueError): - SummaryStep(sr, (1,2)) + SummaryStep(sr, (1, 2)) with pytest.raises(ValueError): - SummaryStep(sr, [3,4]) + SummaryStep(sr, [3, 4]) with pytest.raises(ValueError): SummaryStep(sr, sr) diff --git a/tests/ut/python/train/summary/test_summary_abnormal_input.py b/tests/ut/python/train/summary/test_summary_abnormal_input.py index cf6ae7b884b..40c65e5f8ce 100644 --- a/tests/ut/python/train/summary/test_summary_abnormal_input.py +++ b/tests/ut/python/train/summary/test_summary_abnormal_input.py @@ -18,11 +18,13 @@ @Date : 2019-08-5 @Desc : test summary function of abnormal input """ -import os import logging +import os + import numpy as np -from mindspore.train.summary.summary_record import SummaryRecord + from mindspore.common.tensor import Tensor +from mindspore.train.summary.summary_record import SummaryRecord CUR_DIR = os.getcwd() SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/" @@ -65,6 +67,7 @@ def test_summaryrecord_input_null_string(): assert False log.debug("finished test_summaryrecord_input_null_string") + def test_summaryrecord_input_None(): log.debug("begin test_summaryrecord_input_None") # step 0: create the thread @@ -76,6 +79,7 @@ def test_summaryrecord_input_None(): assert False log.debug("finished test_summaryrecord_input_None") + def test_summaryrecord_input_relative_dir_1(): log.debug("begin test_summaryrecord_input_relative_dir_1") # step 0: create the thread diff --git a/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py b/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py index ab1eb88d96c..a91855e24a5 100644 --- a/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py +++ b/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py @@ -18,16 +18,17 @@ @Date : 2019-08-5 @Desc : test summary function of ops params valid check """ -import os import logging +import os import random + import numpy as np import pytest -from mindspore.train.summary.summary_record import SummaryRecord -from mindspore.common.tensor import Tensor -import mindspore.nn as nn -from mindspore.ops import operations as P +import mindspore.nn as nn +from mindspore.common.tensor import Tensor +from mindspore.ops import operations as P +from mindspore.train.summary.summary_record import SummaryRecord CUR_DIR = os.getcwd() SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/" @@ -38,6 +39,7 @@ log.setLevel(level=logging.ERROR) class SummaryDemoTag(nn.Cell): """ SummaryDemoTag definition """ + def __init__(self, tag1, tag2, tag3): super(SummaryDemoTag, self).__init__() self.s = P.ScalarSummary() @@ -58,6 +60,7 @@ class SummaryDemoTag(nn.Cell): class SummaryDemoTagForSet(nn.Cell): """ SummaryDemoTagForSet definition """ + def __init__(self, tag_tuple): super(SummaryDemoTagForSet, self).__init__() self.s = P.ScalarSummary() @@ -75,6 +78,7 @@ class SummaryDemoTagForSet(nn.Cell): class SummaryDemoValue(nn.Cell): """ SummaryDemoValue definition """ + def __init__(self, value): super(SummaryDemoValue, self).__init__() self.s = P.ScalarSummary() @@ -88,8 +92,10 @@ class SummaryDemoValue(nn.Cell): self.s("y", self.v) return z + class SummaryDemoValueForSet(nn.Cell): """ SummaryDemoValueForSet definition """ + def __init__(self, value, tag_tuple): super(SummaryDemoValueForSet, self).__init__() self.s = P.ScalarSummary() @@ -106,11 +112,12 @@ class SummaryDemoValueForSet(nn.Cell): class HistogramSummaryNet(nn.Cell): "HistogramSummaryNet definition" + def __init__(self, value): self.histogram_summary = P.HistogramSummary() self.add = P.TensorAdd() self.value = value - + def construct(self, tensors1, tensor2): self.histogram_summary("value", self.value) return self.add(tensors1, tensor2) @@ -246,7 +253,7 @@ def test_histogram_summary_use_valid_value(): """Test histogram summary with valid value""" log.debug("Begin test_histogram_summary_use_valid_value") try: - net = HistogramSummaryNet(Tensor(np.array([1,2,3]))) + net = HistogramSummaryNet(Tensor(np.array([1, 2, 3]))) run_case(net) except: assert True diff --git a/tests/ut/python/train/summary/test_tensor_summary.py b/tests/ut/python/train/summary/test_tensor_summary.py index 8f83dd55969..34d71a704eb 100644 --- a/tests/ut/python/train/summary/test_tensor_summary.py +++ b/tests/ut/python/train/summary/test_tensor_summary.py @@ -18,13 +18,15 @@ @Date : 2019-07-4 @Desc : test summary function """ -import os import logging +import os + import numpy as np -from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data -from mindspore.common.tensor import Tensor + import mindspore.nn as nn +from mindspore.common.tensor import Tensor from mindspore.ops import operations as P +from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data CUR_DIR = os.getcwd() SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/" @@ -93,7 +95,6 @@ def test_tensor_summary_sample(): log.debug("finished test_tensor_summary_sample") - def get_test_data_check(step): """ get_test_data_check """ test_data_list = [] @@ -111,7 +112,8 @@ def get_test_data_check(step): # Test: test with ge class SummaryDemo(nn.Cell): """ SummaryDemo definition """ - def __init__(self,): + + def __init__(self, ): super(SummaryDemo, self).__init__() self.s = P.TensorSummary() self.add = P.TensorAdd() @@ -123,6 +125,7 @@ class SummaryDemo(nn.Cell): self.s("y1", y) return z + def test_tensor_summary_with_ge(): """ test_tensor_summary_with_ge """ log.debug("begin test_tensor_summary_with_ge") @@ -140,7 +143,7 @@ def test_tensor_summary_with_ge(): steps = 100 for i in range(1, steps): x = Tensor(np.array([[i], [i]]).astype(np.float32)) - y = Tensor(np.array([[i+1], [i+1]]).astype(np.float32)) + y = Tensor(np.array([[i + 1], [i + 1]]).astype(np.float32)) net(x, y) test_writer.record(i) diff --git a/tests/ut/python/train/test_amp.py b/tests/ut/python/train/test_amp.py index 2afb1e00b55..fe08809be1e 100644 --- a/tests/ut/python/train/test_amp.py +++ b/tests/ut/python/train/test_amp.py @@ -15,12 +15,11 @@ """ auto mixed precision """ import numpy as np import pytest + +import mindspore.context as context +from mindspore import Tensor from mindspore import amp from mindspore import nn -from mindspore import Tensor -from mindspore.common import dtype as mstype -import mindspore.context as context -from mindspore.model_zoo.resnet import resnet50 from mindspore.train import Model from ....dataset_mock import MindData @@ -96,6 +95,7 @@ class MindDataSet(MindData): np_types=dataset_types, output_shapes=dataset_shapes, input_indexs=(0, 1)) + def __next__(self): if self._size < self._iter_num: raise StopIteration @@ -122,6 +122,7 @@ def test_compile_model_train_O0(): # not actual run, the metrics step will fail, check if compile ok. model.eval(dataset) + def test_compile_model_train_O2(): dataset_types = (np.float32, np.float32) dataset_shapes = ((16, 16), (16, 16)) diff --git a/tests/ut/python/train/test_run_config.py b/tests/ut/python/train/test_run_config.py index 00572d62b36..fc56cbab8d4 100644 --- a/tests/ut/python/train/test_run_config.py +++ b/tests/ut/python/train/test_run_config.py @@ -14,6 +14,7 @@ # ============================================================================ """ test_run_config """ import pytest + from mindspore.train.callback import CheckpointConfig diff --git a/tests/ut/python/train/test_training.py b/tests/ut/python/train/test_training.py index c7ba01e8bd5..81bcd1b9d7b 100644 --- a/tests/ut/python/train/test_training.py +++ b/tests/ut/python/train/test_training.py @@ -14,12 +14,14 @@ # ============================================================================ """ test_training """ import logging + import numpy as np import pytest + import mindspore.nn as nn +from mindspore import Model, context from mindspore import Tensor from mindspore.nn.optim import Momentum -from mindspore import Model, context from mindspore.train.callback import SummaryStep from ..ut_filter import non_graph_engine from ....dataset_mock import MindData diff --git a/tests/ut/python/transform/test_transform.py b/tests/ut/python/transform/test_transform.py index 5a40e329115..36f970cd390 100644 --- a/tests/ut/python/transform/test_transform.py +++ b/tests/ut/python/transform/test_transform.py @@ -19,12 +19,12 @@ @Desc : test mindspore compile method """ import logging -import numpy as np -import mindspore.nn as nn -from mindspore import Tensor, Parameter, Model -from mindspore.ops import operations as P -from ..ut_filter import non_graph_engine +import numpy as np + +import mindspore.nn as nn +from mindspore import Tensor, Parameter +from mindspore.ops import operations as P log = logging.getLogger("test") log.setLevel(level=logging.ERROR) @@ -104,6 +104,7 @@ class ResidualBlock(nn.Cell): class ResNet(nn.Cell): """ ResNet definition """ + def __init__(self, tensor): super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) @@ -118,6 +119,7 @@ class ResNet(nn.Cell): class LeNet(nn.Cell): """ LeNet definition """ + def __init__(self): super(LeNet, self).__init__() self.relu = nn.ReLU() @@ -165,4 +167,3 @@ class Net(nn.Cell): def construct(self, input_x): return self.softmax(input_x) - diff --git a/tests/ut/python/utils/test_callback.py b/tests/ut/python/utils/test_callback.py index 8c10c8886d0..f605185d7f6 100644 --- a/tests/ut/python/utils/test_callback.py +++ b/tests/ut/python/utils/test_callback.py @@ -15,19 +15,19 @@ """test callback function.""" import os import stat + import numpy as np import pytest -import mindspore.nn as nn import mindspore.common.dtype as mstype -from mindspore import context +import mindspore.nn as nn +from mindspore.common.api import ms_function from mindspore.common.tensor import Tensor -from mindspore.nn.optim import Momentum from mindspore.nn import TrainOneStepCell, WithLossCell +from mindspore.nn.optim import Momentum from mindspore.train.callback import ModelCheckpoint, _check_file_name_prefix, RunContext, _checkpoint_cb_for_save_op, \ LossMonitor, _InternalCallbackParam, _chg_ckpt_file_name_if_same_exist, \ _build_callbacks, CheckpointConfig, _set_cur_net -from mindspore.common.api import ms_function class Net(nn.Cell): diff --git a/tests/ut/python/utils/test_checkparam.py b/tests/ut/python/utils/test_checkparam.py index 9289d26f487..4a5aeadf1b2 100644 --- a/tests/ut/python/utils/test_checkparam.py +++ b/tests/ut/python/utils/test_checkparam.py @@ -15,6 +15,7 @@ """ test_checkparam """ import numpy as np import pytest + import mindspore import mindspore.nn as nn from mindspore import Model, context @@ -23,6 +24,7 @@ from mindspore.common.tensor import Tensor class LeNet5(nn.Cell): """ LeNet5 definition """ + def __init__(self): super(LeNet5, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5, pad_mode="valid") diff --git a/tests/ut/python/utils/test_initializer.py b/tests/ut/python/utils/test_initializer.py index 31d24343417..19b0e0e62d1 100644 --- a/tests/ut/python/utils/test_initializer.py +++ b/tests/ut/python/utils/test_initializer.py @@ -15,19 +15,24 @@ """ test_initializer """ import math from functools import reduce + import numpy as np -from scipy import stats import pytest as py -import mindspore.common.initializer as init +from scipy import stats + import mindspore as ms +import mindspore.common.initializer as init from mindspore import context from mindspore.nn import Conv2d from ..ut_filter import non_graph_engine + + # pylint: disable=W0212 # W0212: protected-access class InitTwo(init.Initializer): """Initialize the array to two.""" + def _initialize(self, arr): init._assignment(arr, 2) @@ -44,7 +49,7 @@ def _check_value(tensor, value_min, value_max): def _check_uniform(tensor, boundary_a, boundary_b): samples = tensor.asnumpy().reshape((-1)) _, p = stats.kstest(samples, 'uniform', (boundary_a, (boundary_b - boundary_a))) - print("p-value is %f"%p) + print("p-value is %f" % p) return p > 0.0001 diff --git a/tests/ut/python/utils/test_initializer_fuzz.py b/tests/ut/python/utils/test_initializer_fuzz.py index 0073396bc33..3a6fb4833fb 100644 --- a/tests/ut/python/utils/test_initializer_fuzz.py +++ b/tests/ut/python/utils/test_initializer_fuzz.py @@ -14,12 +14,14 @@ # ============================================================================ """ test_initializer_fuzz """ import pytest + import mindspore.nn as nn -from mindspore import Model, context +from mindspore import Model class Net(nn.Cell): """ Net definition """ + def __init__(self, in_str): a, b, c, d, e, f, g, h = in_str.strip().split() a = int(a) @@ -56,6 +58,7 @@ def test_shape_error(): class LeNet5(nn.Cell): """ LeNet5 definition """ + def __init__(self, in_str): super(LeNet5, self).__init__() diff --git a/tests/ut/python/utils/test_serialize.py b/tests/ut/python/utils/test_serialize.py index 2cb27cadfd1..bf12d76c1a9 100644 --- a/tests/ut/python/utils/test_serialize.py +++ b/tests/ut/python/utils/test_serialize.py @@ -16,29 +16,30 @@ import os import stat import time + import numpy as np import pytest -import mindspore.nn as nn import mindspore.common.dtype as mstype -from mindspore.common.tensor import Tensor -from mindspore.common.parameter import Parameter -from mindspore.ops import operations as P -from mindspore.nn import SoftmaxCrossEntropyWithLogits -from mindspore.nn.optim.momentum import Momentum -from mindspore.nn import WithLossCell, TrainOneStepCell -from mindspore.train.callback import _CheckpointManager -from mindspore.train.serialization import save_checkpoint, load_checkpoint,load_param_into_net, \ - _exec_save_checkpoint, export, _save_graph -from ..ut_filter import run_on_onnxruntime, non_graph_engine +import mindspore.nn as nn from mindspore import context - +from mindspore.common.parameter import Parameter +from mindspore.common.tensor import Tensor +from mindspore.nn import SoftmaxCrossEntropyWithLogits +from mindspore.nn import WithLossCell, TrainOneStepCell +from mindspore.nn.optim.momentum import Momentum +from mindspore.ops import operations as P +from mindspore.train.callback import _CheckpointManager +from mindspore.train.serialization import save_checkpoint, load_checkpoint, load_param_into_net, \ + _exec_save_checkpoint, export, _save_graph +from ..ut_filter import run_on_onnxruntime, non_graph_engine context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): """Net definition.""" + def __init__(self, num_classes=10): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, weight_init="zeros") @@ -46,7 +47,7 @@ class Net(nn.Cell): self.relu = nn.ReLU() self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2) self.flatten = nn.Flatten() - self.fc = nn.Dense(int(224*224*64/16), num_classes) + self.fc = nn.Dense(int(224 * 224 * 64 / 16), num_classes) def construct(self, x): x = self.conv1(x) @@ -289,13 +290,14 @@ def test_load_checkpoint_empty_file(): class MYNET(nn.Cell): """ NET definition """ + def __init__(self): super(MYNET, self).__init__() self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal', pad_mode='valid') self.bn = nn.BatchNorm2d(64) self.relu = nn.ReLU() self.flatten = nn.Flatten() - self.fc = nn.Dense(64*222*222, 3) # padding=0 + self.fc = nn.Dense(64 * 222 * 222, 3) # padding=0 def construct(self, x): x = self.conv(x) @@ -310,11 +312,12 @@ class MYNET(nn.Cell): def test_export(): net = MYNET() input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]).astype(np.float32)) - export(net, input_data, file_name="./me_export.pb", file_format="GEIR") + export(net, input_data, file_name="./me_export.pb", file_format="GEIR") class BatchNormTester(nn.Cell): "used to test exporting network in training mode in onnx format" + def __init__(self, num_features): super(BatchNormTester, self).__init__() self.bn = nn.BatchNorm2d(num_features) @@ -339,6 +342,7 @@ class DepthwiseConv2dAndReLU6(nn.Cell): x = self.relu6(x) return x + def test_batchnorm_train_onnx_export(): input = Tensor(np.ones([1, 3, 32, 32]).astype(np.float32) * 0.01) net = BatchNormTester(3) @@ -352,6 +356,7 @@ def test_batchnorm_train_onnx_export(): class LeNet5(nn.Cell): """LeNet5 definition""" + def __init__(self): super(LeNet5, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid') @@ -378,8 +383,10 @@ def test_lenet5_onnx_export(): net = LeNet5() export(net, input, file_name='lenet5.onnx', file_format='ONNX') + class DefinedNet(nn.Cell): """simple Net definition with maxpoolwithargmax.""" + def __init__(self, num_classes=10): super(DefinedNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, weight_init="zeros") @@ -387,7 +394,7 @@ class DefinedNet(nn.Cell): self.relu = nn.ReLU() self.maxpool = P.MaxPoolWithArgmax(padding="same", ksize=2, strides=2) self.flatten = nn.Flatten() - self.fc = nn.Dense(int(56*56*64), num_classes) + self.fc = nn.Dense(int(56 * 56 * 64), num_classes) def construct(self, x): x = self.conv1(x) @@ -398,6 +405,7 @@ class DefinedNet(nn.Cell): x = self.fc(x) return x + def test_net_onnx_maxpoolwithargmax_export(): input = Tensor(np.ones([1, 3, 224, 224]).astype(np.float32) * 0.01) net = DefinedNet() @@ -426,7 +434,7 @@ def test_lenet5_onnx_load_run(): print('------------------ onnxruntime run ------------------') ort_session = ort.InferenceSession(onnx_file) - input_map = {'x' : input.asnumpy()} + input_map = {'x': input.asnumpy()} # provide only input x to run model outputs = ort_session.run(None, input_map) print(outputs[0]) @@ -459,7 +467,7 @@ def test_depthwiseconv_relu6_onnx_load_run(): print('------------------ onnxruntime run ------------------') ort_session = ort.InferenceSession(onnx_file) - input_map = {'x' : input.asnumpy()} + input_map = {'x': input.asnumpy()} # provide only input x to run model outputs = ort_session.run(None, input_map) print(outputs[0]) @@ -469,6 +477,7 @@ def test_depthwiseconv_relu6_onnx_load_run(): outputs = ort_session.run(None, input_map) print(outputs[0]) + def teardown_module(): files = ['parameters.ckpt', 'new_ckpt.ckpt', 'lenet5.onnx', 'batch_norm.onnx', 'empty.ckpt'] for item in files: diff --git a/tests/vm_impl/__init__.py b/tests/vm_impl/__init__.py index aff30eccc16..501a28ff4a7 100644 --- a/tests/vm_impl/__init__.py +++ b/tests/vm_impl/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. # ============================================================================ """Vm implementation.""" +from . import array_ops_vm_impl from . import math_ops_vm_impl from . import nn_ops_vm_impl -from . import array_ops_vm_impl from .vm_interface import vm diff --git a/tests/vm_impl/array_ops_vm_impl.py b/tests/vm_impl/array_ops_vm_impl.py index 38c613012e8..7236d9e9199 100644 --- a/tests/vm_impl/array_ops_vm_impl.py +++ b/tests/vm_impl/array_ops_vm_impl.py @@ -15,38 +15,45 @@ """Generate vm_impl function for array ops""" import numpy as np -from mindspore.ops import operations as P -from mindspore.common.tensor import Tensor import mindspore.common.dtype as mstype +from mindspore.common.tensor import Tensor +from mindspore.ops import operations as P from mindspore.ops.vm_impl_registry import vm_impl_registry as vm_impl_getters from .vm_interface import vm + + # pylint: disable=unused-argument @vm_impl_getters.register(P.ExpandDims) def vm_impl_expand_dims(self): """Generate vm_impl function for ExpandDims""" + def vm_impl(x, axis): if isinstance(x, float): x = Tensor(np.array([x])) x = x.asnumpy() out = vm.expand_dims(x, axis) return Tensor(out) + return vm_impl @vm_impl_getters.register(P.DType) def vm_impl_dType(self): """Generate vm_impl function for DType""" + def vm_impl(x): # update the src type return x.dtype() + return vm_impl @vm_impl_getters.register(P.Cast) def vm_impl_cast(self): """Generate vm_impl function for Cast""" + def vm_impl(x, t): if isinstance(t, type(mstype.tensor)): t = t.element_type() @@ -54,164 +61,197 @@ def vm_impl_cast(self): x = x.asnumpy() out = x.astype(mstype.dtype_to_nptype(t)) return Tensor(out) + return vm_impl @vm_impl_getters.register(P.Reshape) def vm_impl_reshape(self): """Generate vm_impl function for Reshape""" + def vm_impl(x, shp): x = x.asnumpy() out = vm.reshape(x, shp) return Tensor(out) + return vm_impl @vm_impl_getters.register(P.Shape) def vm_impl_shape(self): """Generate vm_impl function for Shape""" + def vm_impl(x): shp = vm.shape(x.asnumpy()) return shp + return vm_impl @vm_impl_getters.register(P.Squeeze) def vm_impl_squeeze(self): """Generate vm_impl function for Squeeze""" + def vm_impl(x): x = x.asnumpy() out = vm.squeeze(x, self.axis) return Tensor(out) + return vm_impl @vm_impl_getters.register(P.Transpose) def vm_impl_transpose(self): """Generate vm_impl function for Transpose""" + def vm_impl(x, perm=None): x = x.asnumpy() if perm is None: perm = [i for i in reversed(range(len(x.shape)))] out = vm.transpose(x, perm) return Tensor(out) + return vm_impl @vm_impl_getters.register(P.Split) def vm_impl_split(self): """Generate vm_impl function for Split""" + def vm_impl(x): x = x.asnumpy() output = np.array_split(x, (self.pos,)) return Tensor(output[0]), Tensor(output[1]) + return vm_impl @vm_impl_getters.register(P.Fill) def vm_impl_fill(self): """Generate vm_impl function for Fill""" + def vm_impl(dims, x): if isinstance(x, int): ret = np.full(dims, x, np.int32) else: ret = np.full(dims, x, np.float32) return Tensor(ret) + return vm_impl @vm_impl_getters.register(P.Eye) def vm_impl_eye(self): """Generate vm_impl function for Eye""" + def vm_impl(n, m, t): np_type = mstype.dtype_to_nptype(t) ret = np.eye(n, m, dtype=np_type) return Tensor(ret) + return vm_impl @vm_impl_getters.register(P.InvertPermutation) def vm_impl_invert_permutation(self): """Generate vm_impl function for InvertPermutation""" + def vm_impl(x): out = vm.invert_permutation(x) return out + return vm_impl @vm_impl_getters.register(P.Argmax) def vm_impl_argmax(self): """Generate vm_impl function for Argmax""" + def vm_impl(x): output = np.argmax(x.asnumpy(), axis=self.axis) return Tensor(output.ravel()) + return vm_impl + @vm_impl_getters.register(P.Tile) def vm_impl_tile(self): """Generate vm_impl function for Tile""" + def vm_impl(x, multiples): x = x.asnumpy() multiples = multiples.asnumpy() out = vm.Tile(x, multiples) return Tensor(out) + return vm_impl @vm_impl_getters.register(P.ReduceAll) def vm_impl_all(self): """Generate vm_impl function for All""" + def vm_impl(x, axis): x = x.asnumpy() out = vm.all(x, axis) return Tensor(out) + return vm_impl @vm_impl_getters.register(P.Concat) def vm_impl_concatV2(self): """Generate vm_impl function for Concat""" + def vm_impl(x): x = x.asnumpy() out = vm.Concat(x, self.axis) return Tensor(out) + return vm_impl @vm_impl_getters.register(P.Slice) def vm_impl_slice(self): """Generate vm_impl function for Slice""" + def vm_impl(x, begin, size): x = x.asnumpy() begin = begin.asnumpy() size = size.asnumpy() out = vm.Slice(x, begin, size) return Tensor(out) + return vm_impl @vm_impl_getters.register(P._grad_ops.ConcatOffset) def vm_impl_concatOffset(self): """Generate vm_impl function for ConcatOffset""" + def vm_impl(x): - out = vm.ConcatOffset(x) # out is tuple + out = vm.ConcatOffset(x) # out is tuple return out + return vm_impl @vm_impl_getters.register(P.ReduceSum) def vm_impl_sum(self): """Generate vm_impl function for Sum""" + def vm_impl(x, axis): x = x.asnumpy() out = vm.sum(x, axis) return Tensor(np.array(out)) + return vm_impl @vm_impl_getters.register(P.Select) def vm_impl_select(self): """Generate vm_impl function for Select""" + def vm_impl(cond, x, y): """ Args: @@ -224,13 +264,16 @@ def vm_impl_select(self): y = y.asnumpy() out = vm.select(cond, x, y) return Tensor(out) + return vm_impl @vm_impl_getters.register(P.Square) def vm_impl_square(self): """Generate vm_impl function for Square""" + def vm_impl(x): x = x.asnumpy() return Tensor(x * x) + return vm_impl diff --git a/tests/vm_impl/math_ops_vm_impl.py b/tests/vm_impl/math_ops_vm_impl.py index e42ba92d5e8..d674faed74f 100644 --- a/tests/vm_impl/math_ops_vm_impl.py +++ b/tests/vm_impl/math_ops_vm_impl.py @@ -14,33 +14,42 @@ # ============================================================================ """Generate vm_impl function for math ops""" import copy + import numpy as np -from mindspore.ops import operations as P -from mindspore.common.tensor import Tensor -from mindspore.ops.vm_impl_registry import vm_impl_registry as vm_impl_getters + from mindspore.common.dtype import dtype_to_nptype +from mindspore.common.tensor import Tensor +from mindspore.ops import operations as P +from mindspore.ops.vm_impl_registry import vm_impl_registry as vm_impl_getters from .vm_interface import vm + + # pylint: disable=unused-argument @vm_impl_getters.register(P.TensorAdd) def vm_impl_tensor_add(self): """Generate vm_impl function for TensorAdd.""" + def vm_impl(x, y): x = x.asnumpy() y = y.asnumpy() return Tensor(x + y) + return vm_impl + @vm_impl_getters.register(P.LogicalNot) def vm_impl_logical_not(self): x = x.asnumpy() out = vm.logical_not(x) return Tensor(out) + @vm_impl_getters.register(P.MatMul) def vm_impl_mat_mul(self): """Generate vm_impl function for MatMul.""" + def vm_impl(x, w): x = x.asnumpy() w = w.asnumpy() @@ -48,192 +57,232 @@ def vm_impl_mat_mul(self): x = x.transpose() if self.transpose_b: w = w.transpose() - z = x@w + z = x @ w return Tensor(z) + return vm_impl @vm_impl_getters.register(P.AddN) def vm_impl_addn(self): """Generate vm_impl function for AddN.""" + def vm_impl(inputs): added = copy.deepcopy(inputs[0].asnumpy()) for x in inputs[1:]: added += x.asnumpy() return Tensor(added) + return vm_impl @vm_impl_getters.register(P.Neg) def vm_impl_neg(self): """Generate vm_impl function for Neg.""" + def vm_impl(x): x = x.asnumpy() return Tensor(-x) + return vm_impl @vm_impl_getters.register(P.Sub) def vm_impl_Sub(self): """Generate vm_impl function for Sub.""" + def vm_impl(x, y): x = x.asnumpy() y = y.asnumpy() return Tensor(x - y) + return vm_impl @vm_impl_getters.register(P.Mul) def vm_impl_mul(self): """Generate vm_impl function for Mul.""" + def vm_impl(x, y): x = x.asnumpy() y = y.asnumpy() return Tensor(x * y) + return vm_impl @vm_impl_getters.register(P.Square) def vm_impl_square(self): """Generate vm_impl function for Square.""" + def vm_impl(x): x = x.asnumpy() return Tensor(x * x) + return vm_impl @vm_impl_getters.register(P.Sqrt) def vm_impl_sqrt(self): """Generate vm_impl function for Sqrt.""" + def vm_impl(x): x = x.asnumpy() res = vm.sqrt(x) return Tensor(res) + return vm_impl @vm_impl_getters.register(P.Pow) def vm_impl_pow(self): """Generate vm_impl function for Pow.""" + def vm_impl(x, y): x = x.asnumpy() y = y.asnumpy() res = vm.power(x, y) return Tensor(res) + return vm_impl @vm_impl_getters.register(P.Exp) def vm_impl_exp(self): """Generate vm_impl function for Exp.""" + def vm_impl(x): x = x.asnumpy() res = vm.exp(x) return Tensor(res) + return vm_impl @vm_impl_getters.register(P.RealDiv) def vm_impl_real_div(self): """Generate vm_impl function for RealDiv.""" + def vm_impl(x, y): x = x.asnumpy() y = y.asnumpy() out = x / y out = np.array(out, x.dtype) return Tensor(out) + return vm_impl @vm_impl_getters.register(P.Div) def vm_impl_div(self): """Generate vm_impl function for Div.""" + def vm_impl(x, y): x = x.asnumpy() y = y.asnumpy() return Tensor(x / y) + return vm_impl @vm_impl_getters.register(P.ReduceMean) def vm_impl_reduce_mean(self): """Generate vm_impl function for ReduceMean.""" + def vm_impl(x, axis): x = x.asnumpy() out = vm.mean(x, axis) return Tensor(out) + return vm_impl @vm_impl_getters.register(P.Equal) def vm_impl_equal(self): """Generate vm_impl function for Equal.""" + def vm_impl(x, y): x = x.asnumpy() y = y.asnumpy() out = vm.equal(x, y) return Tensor(np.array(out)) + return vm_impl @vm_impl_getters.register(P.NotEqual) def vm_impl_not_equal(self): """Generate vm_impl function for NotEqual.""" + def vm_impl(x, y): x = x.asnumpy() y = y.asnumpy() out = vm.not_equal(x, y) return Tensor(np.array(out)) + return vm_impl @vm_impl_getters.register(P.Greater) def vm_impl_greater(self): """Generate vm_impl function for Greater.""" + def vm_impl(x, y): x = x.asnumpy() y = y.asnumpy() out = vm.greater(x, y) return Tensor(np.array(out)) + return vm_impl + @vm_impl_getters.register(P.Maximum) def vm_impl_maximum(self): """Generate vm_impl function for Maximum.""" + def vm_impl(x, y): x = x.asnumpy() y = y.asnumpy() out = vm.maximum(x, y) return Tensor(out) + return vm_impl @vm_impl_getters.register(P.Minimum) def vm_impl_minimum(self): """Generate vm_impl function for Minimum.""" + def vm_impl(x, y): x = x.asnumpy() y = y.asnumpy() out = vm.minimum(x, y) return Tensor(out) + return vm_impl + @vm_impl_getters.register(P.Less) def vm_impl_less(self): """Generate vm_impl function for Less""" + def vm_impl(x, y): x = x.asnumpy() y = y.asnumpy() out = vm.less(x, y) return Tensor(np.array(out)) + return vm_impl + @vm_impl_getters.register(P.ScalarCast) def vm_impl_scalar_cast(self): """Generate vm_impl function for ScalarCast""" + def vm_impl(x, t): np_type = dtype_to_nptype(t) value = np_type(x) cast_value = value.item() return cast_value + return vm_impl diff --git a/tests/vm_impl/nn_ops_vm_impl.py b/tests/vm_impl/nn_ops_vm_impl.py index 0df4b5fbaa6..2468314a958 100644 --- a/tests/vm_impl/nn_ops_vm_impl.py +++ b/tests/vm_impl/nn_ops_vm_impl.py @@ -14,9 +14,10 @@ # ============================================================================ """Generate vm_impl function for nn ops""" import numpy as np + +from mindspore.common.tensor import Tensor from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G -from mindspore.common.tensor import Tensor from mindspore.ops.vm_impl_registry import vm_impl_registry as vm_impl_getters from .vm_interface import vm diff --git a/tests/vm_impl/vm_me.py b/tests/vm_impl/vm_me.py index 82b0324fb57..d9973787ba9 100644 --- a/tests/vm_impl/vm_me.py +++ b/tests/vm_impl/vm_me.py @@ -15,6 +15,7 @@ """VM implementations based on numpy.""" import numpy as np + from mindspore._checkparam import Rel from mindspore._checkparam import Validator as validator @@ -34,11 +35,11 @@ def avg_pooling(x, pool_h, pool_w, stride): """ validator.check_integer("stride", stride, 0, Rel.GT, None) num, channel, height, width = x.shape - out_h = (height - pool_h)//stride + 1 - out_w = (width - pool_w)//stride + 1 + out_h = (height - pool_h) // stride + 1 + out_w = (width - pool_w) // stride + 1 col = im2col(x, pool_h, pool_w, stride) - col = col.reshape(-1, pool_h*pool_w) + col = col.reshape(-1, pool_h * pool_w) out = np.mean(col, axis=1) out = out.reshape((num, out_h, out_w, channel)).transpose(0, 3, 1, 2) @@ -65,7 +66,7 @@ def avg_pool_grad(dout, origin_shape, pool_h, pool_w, stride): dx = np.zeros(origin_shape) for i in range(height): for j in range(width): - dx[:, :, i:(i+pool_h), j:(j+pool_w)] += np.ones((pool_h, pool_w)) + dx[:, :, i:(i + pool_h), j:(j + pool_w)] += np.ones((pool_h, pool_w)) return dx @@ -89,14 +90,14 @@ def _batch_norm(x, scale, shift, running_mean=None, running_var=None, x_var = np.var(x, axis=0) # Normalization followed by Affine transformation - x_norm = (x - x_mean)/np.sqrt(x_var + eps) + x_norm = (x - x_mean) / np.sqrt(x_var + eps) # Estimate running average of mean and variance to use at test time running_mean = momentum * running_mean + (1 - momentum) * x_mean running_var = momentum * running_var + (1 - momentum) * x_var else: # normalize using running average - x_norm = (x - running_mean)/np.sqrt(running_var + eps) + x_norm = (x - running_mean) / np.sqrt(running_var + eps) x_mean = running_mean x_var = running_var @@ -132,11 +133,11 @@ def _batch_norm_grad(dout, x, scale, save_mean, save_inv_variance, \ eps=eps, momentum=momentum, is_training=is_training) batch_size = x.shape[0] dx_norm = scale * dout - dvar = np.sum(dx_norm*(x - x_mean)*((x_var + eps)**(-3.0/2))*(-1.0/2), axis=0) - dmean = np.sum(dx_norm*(-1.0/np.sqrt(x_var + eps)), axis=0) \ - + dvar*(np.sum(-2*(x - x_mean), axis=0)*(1.0/batch_size)) - dx = dx_norm*(1.0/np.sqrt(x_var + eps)) + dvar*(2.0*(x - x_mean)/batch_size) + dmean*(1.0/batch_size) - dgamma = np.sum(dout*x_norm, axis=0) + dvar = np.sum(dx_norm * (x - x_mean) * ((x_var + eps) ** (-3.0 / 2)) * (-1.0 / 2), axis=0) + dmean = np.sum(dx_norm * (-1.0 / np.sqrt(x_var + eps)), axis=0) \ + + dvar * (np.sum(-2 * (x - x_mean), axis=0) * (1.0 / batch_size)) + dx = dx_norm * (1.0 / np.sqrt(x_var + eps)) + dvar * (2.0 * (x - x_mean) / batch_size) + dmean * (1.0 / batch_size) + dgamma = np.sum(dout * x_norm, axis=0) dbeta = np.sum(dout, axis=0) return dx, dgamma, dbeta @@ -169,20 +170,20 @@ def col2im(col, input_shape, filter_h, filter_w, stride=1, pad=0): f"a tuple of two or four int numbers, but got {stride}") batch_num, channel, height, width = input_shape - out_h = (height + 2*pad - filter_h)//stride_h + 1 - out_w = (width + 2*pad - filter_w)//stride_w + 1 + out_h = (height + 2 * pad - filter_h) // stride_h + 1 + out_w = (width + 2 * pad - filter_w) // stride_w + 1 col = col.reshape(batch_num, out_h, out_w, channel, filter_h, filter_w) \ - .transpose(0, 3, 4, 5, 1, 2) + .transpose(0, 3, 4, 5, 1, 2) img = np.zeros((batch_num, channel, - height + 2*pad + stride_h - 1, - width + 2*pad + stride_w - 1)) \ - .astype(col.dtype) + height + 2 * pad + stride_h - 1, + width + 2 * pad + stride_w - 1)) \ + .astype(col.dtype) for y in range(filter_h): - y_max = y + stride_h*out_h + y_max = y + stride_h * out_h for x in range(filter_w): - x_max = x + stride_h*out_w + x_max = x + stride_h * out_w img[:, :, y:y_max:stride_h, x:x_max:stride_h] += col[:, :, y, x, :, :] return img[:, :, pad:height + pad, pad:width + pad] @@ -223,8 +224,8 @@ def conv2d(x, weight, bias=None, stride=1, pad=0, elif len(stride) == 4: stride = (stride[2], stride[3]) if len(stride) != 2 or (not isinstance(stride[0], int)) or \ - (not isinstance(stride[1], int)) or \ - stride[0] < 1 or stride[1] < 1: + (not isinstance(stride[1], int)) or \ + stride[0] < 1 or stride[1] < 1: raise ValueError(f"The \'stride\' of \'conv2d\' should be an positive int number or " f"a tuple of two positive int numbers, but got {stride}") stride_h = stride[0] @@ -235,8 +236,8 @@ def conv2d(x, weight, bias=None, stride=1, pad=0, elif len(dilation) == 4: dilation = (dilation[2], dilation[3]) if len(dilation) != 2 or (not isinstance(dilation[0], int)) or \ - (not isinstance(dilation[1], int)) or \ - dilation[0] < 1 or dilation[1] < 1: + (not isinstance(dilation[1], int)) or \ + dilation[0] < 1 or dilation[1] < 1: raise ValueError(f"The \'dilation\' of \'conv2d\' should be an positive int number or " f"a tuple of two positive int numbers, but got {dilation}") dilation_h = dilation[0] @@ -348,19 +349,19 @@ def im2col(img, filter_h, filter_w, stride=1, pad=0, dilation=1): f"a tuple of two or four int numbers, but got {dilation}") batch_num, channel, height, width = img.shape - out_h = (height + 2*pad - filter_h- (filter_h - 1) * (dilation_h - 1))//stride_h + 1 - out_w = (width + 2*pad - filter_w- (filter_w - 1) * (dilation_w - 1))//stride_w + 1 + out_h = (height + 2 * pad - filter_h - (filter_h - 1) * (dilation_h - 1)) // stride_h + 1 + out_w = (width + 2 * pad - filter_w - (filter_w - 1) * (dilation_w - 1)) // stride_w + 1 img = np.pad(img, [(0, 0), (0, 0), (pad, pad), (pad, pad)], 'constant') col = np.zeros((batch_num, channel, filter_h, filter_w, out_h, out_w)).astype(img.dtype) for y in range(filter_h): - y_max = y + stride_h*out_h + y_max = y + stride_h * out_h for x in range(filter_w): - x_max = x + stride_h*out_w + x_max = x + stride_h * out_w col[:, :, y, x, :, :] = img[:, :, y:y_max:stride_h, x:x_max:stride_h] - col = col.transpose(0, 4, 5, 1, 2, 3).reshape(batch_num*out_h*out_w, -1) + col = col.transpose(0, 4, 5, 1, 2, 3).reshape(batch_num * out_h * out_w, -1) return col @@ -386,11 +387,11 @@ def max_pooling(x, pool_h, pool_w, stride): """Max pooling.""" validator.check_integer("stride", stride, 0, Rel.GT, None) num, channel, height, width = x.shape - out_h = (height - pool_h)//stride + 1 - out_w = (width - pool_w)//stride + 1 + out_h = (height - pool_h) // stride + 1 + out_w = (width - pool_w) // stride + 1 col = im2col(x, pool_h, pool_w, stride) - col = col.reshape(-1, pool_h*pool_w) + col = col.reshape(-1, pool_h * pool_w) out = np.max(col, axis=1) out = out.reshape((num, out_h, out_w, channel)).transpose(0, 3, 1, 2) @@ -404,11 +405,11 @@ def max_pool_grad(x, dout, pool_h, pool_w, stride): pool_size = pool_h * pool_w dmax = np.zeros((dout.size, pool_size)) col = im2col(x, pool_h, pool_w, stride) - col = col.reshape(-1, pool_h*pool_w) + col = col.reshape(-1, pool_h * pool_w) arg_max = np.argmax(col, axis=1) dmax[np.arange(arg_max.size), arg_max.flatten()] = dout.flatten() dmax = dmax.reshape(dout.shape + (pool_size,)) - dcol = dmax.reshape(dmax.shape[0]*dmax.shape[1]*dmax.shape[2], -1) + dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2], -1) dx = col2im(dcol, x.shape, pool_h, pool_w, stride) return dx @@ -420,7 +421,7 @@ def max_pool_grad_with_argmax(x, dout, arg_max, pool_h, pool_w, stride): dmax = np.zeros((dout.size, pool_size)) dmax[np.arange(arg_max.size), arg_max.flatten()] = dout.flatten() dmax = dmax.reshape(dout.shape + (pool_size,)) - dcol = dmax.reshape(dmax.shape[0]*dmax.shape[1]*dmax.shape[2], -1) + dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2], -1) dx = col2im(dcol, x.shape, pool_h, pool_w, stride) return dx @@ -429,10 +430,10 @@ def max_pool_with_argmax(x, pool_h, pool_w, stride): """Max pooling with argmax.""" validator.check_integer("stride", stride, 0, Rel.GT, None) num, channel, height, width = x.shape - out_h = (height - pool_h)//stride + 1 - out_w = (width - pool_w)//stride + 1 + out_h = (height - pool_h) // stride + 1 + out_w = (width - pool_w) // stride + 1 col = im2col(x, pool_h, pool_w, stride) - col = col.reshape(-1, pool_h*pool_w) + col = col.reshape(-1, pool_h * pool_w) out = np.max(col, axis=1) out_argmax = np.argmax(col, axis=1) out = out.reshape((num, out_h, out_w, channel)).transpose(0, 3, 1, 2) @@ -515,7 +516,7 @@ def softmax_cross_entropy_with_logits(logits, labels): sample_num = labels.shape[0] prob = softmax(logits) log_likelihood = -np.log(prob[range(sample_num)]) * labels - #loss = np.sum(log_likelihood) + # loss = np.sum(log_likelihood) loss = log_likelihood dx = prob.copy() @@ -704,6 +705,7 @@ def greater(x, y): """ return np.greater(x, y) + def less(x, y): """ Get the truth value of (x < y) element-wise. @@ -718,8 +720,6 @@ def less(x, y): return np.less(x, y) - - def logical_not(x): """ Gets the truth value of NOT x element-wise.