forked from mindspore-Ecosystem/mindspore
pylint waring clean
This commit is contained in:
parent
a2d5ad5abe
commit
26fd75895d
|
@ -18,13 +18,13 @@
|
|||
# pylint: disable=missing-docstring, arguments-differ, W0612
|
||||
|
||||
import os
|
||||
|
||||
import mindspore.common.dtype as mstype
|
||||
import mindspore.context as context
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn.optim import AdamWeightDecayDynamicLR
|
||||
from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertNetworkWithLoss, BertTrainOneStepCell, \
|
||||
BertTrainOneStepWithLossScaleCell
|
||||
from mindspore.nn.wrap.loss_scale import FixedLossScaleUpdateCell
|
||||
from mindspore.nn.optim import AdamWeightDecayDynamicLR
|
||||
from mindspore.train.loss_scale_manager import DynamicLossScaleManager
|
||||
from ...dataset_mock import MindData
|
||||
from ...ops_common import nn, np, batch_tuple_tensor, build_construct_graph
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
"""use ImageNetToMR tool generate mindrecord"""
|
||||
import os
|
||||
from mindspore.mindrecord import ImageNetToMR
|
||||
|
||||
IMAGENET_MAP_FILE = "../../../ut/data/mindrecord/testImageNetDataWhole/labels_map.txt"
|
||||
|
@ -21,6 +20,7 @@ IMAGENET_IMAGE_DIR = "../../../ut/data/mindrecord/testImageNetDataWhole/images"
|
|||
MINDRECORD_FILE = "./imagenet.mindrecord"
|
||||
PARTITION_NUMBER = 16
|
||||
|
||||
|
||||
def imagenet_to_mindrecord():
|
||||
imagenet_transformer = ImageNetToMR(IMAGENET_MAP_FILE,
|
||||
IMAGENET_IMAGE_DIR,
|
||||
|
@ -28,5 +28,6 @@ def imagenet_to_mindrecord():
|
|||
PARTITION_NUMBER)
|
||||
imagenet_transformer.transform()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
imagenet_to_mindrecord()
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
"""generate tfrecord"""
|
||||
import collections
|
||||
import os
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
IMAGENET_MAP_FILE = "../../../ut/data/mindrecord/testImageNetDataWhole/labels_map.txt"
|
||||
|
@ -22,6 +23,7 @@ IMAGENET_IMAGE_DIR = "../../../ut/data/mindrecord/testImageNetDataWhole/images"
|
|||
TFRECORD_FILE = "./imagenet.tfrecord"
|
||||
PARTITION_NUMBER = 16
|
||||
|
||||
|
||||
def get_imagenet_filename_label_pic(map_file, image_dir):
|
||||
"""
|
||||
Get data from imagenet.
|
||||
|
@ -69,18 +71,22 @@ def get_imagenet_filename_label_pic(map_file, image_dir):
|
|||
continue
|
||||
yield str(file_name), int(label), image_bytes
|
||||
|
||||
|
||||
def create_int_feature(values):
|
||||
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=[values]))
|
||||
return feature
|
||||
|
||||
|
||||
def create_string_feature(values):
|
||||
feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[bytes(values, encoding='utf-8')]))
|
||||
return feature
|
||||
|
||||
|
||||
def create_bytes_feature(values):
|
||||
feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
|
||||
return feature
|
||||
|
||||
|
||||
def imagenet_to_tfrecord():
|
||||
writers = []
|
||||
for i in range(PARTITION_NUMBER):
|
||||
|
@ -109,5 +115,6 @@ def imagenet_to_tfrecord():
|
|||
|
||||
print("Write {} total examples".format(total_written))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
imagenet_to_tfrecord()
|
||||
|
|
|
@ -14,17 +14,20 @@
|
|||
# ============================================================================
|
||||
"""test dataset performance about mindspore.MindDataset, mindspore.TFRecordDataset, tf.data.TFRecordDataset"""
|
||||
import time
|
||||
import mindspore.dataset as ds
|
||||
from mindspore.mindrecord import FileReader
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
import mindspore.dataset as ds
|
||||
from mindspore.mindrecord import FileReader
|
||||
|
||||
print_step = 5000
|
||||
|
||||
|
||||
def print_log(count):
|
||||
if count % print_step == 0:
|
||||
print("Read {} rows ...".format(count))
|
||||
|
||||
|
||||
def use_filereader(mindrecord):
|
||||
start = time.time()
|
||||
columns_list = ["data", "label"]
|
||||
|
@ -38,6 +41,7 @@ def use_filereader(mindrecord):
|
|||
end = time.time()
|
||||
print("Read by FileReader - total rows: {}, cost time: {}s".format(num_iter, end - start))
|
||||
|
||||
|
||||
def use_minddataset(mindrecord):
|
||||
start = time.time()
|
||||
columns_list = ["data", "label"]
|
||||
|
@ -51,6 +55,7 @@ def use_minddataset(mindrecord):
|
|||
end = time.time()
|
||||
print("Read by MindDataset - total rows: {}, cost time: {}s".format(num_iter, end - start))
|
||||
|
||||
|
||||
def use_tfrecorddataset(tfrecord):
|
||||
start = time.time()
|
||||
columns_list = ["data", "label"]
|
||||
|
@ -66,8 +71,10 @@ def use_tfrecorddataset(tfrecord):
|
|||
end = time.time()
|
||||
print("Read by TFRecordDataset - total rows: {}, cost time: {}s".format(num_iter, end - start))
|
||||
|
||||
|
||||
def use_tensorflow_tfrecorddataset(tfrecord):
|
||||
start = time.time()
|
||||
|
||||
def _parse_record(example_photo):
|
||||
features = {
|
||||
'file_name': tf.io.FixedLenFeature([], tf.string),
|
||||
|
@ -87,6 +94,7 @@ def use_tensorflow_tfrecorddataset(tfrecord):
|
|||
end = time.time()
|
||||
print("Read by TensorFlow TFRecordDataset - total rows: {}, cost time: {}s".format(num_iter, end - start))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# use MindDataset
|
||||
mindrecord = './imagenet.mindrecord00'
|
||||
|
|
|
@ -18,15 +18,14 @@
|
|||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor
|
||||
from mindspore.model_zoo.lenet import LeNet
|
||||
from mindspore import context
|
||||
import mindspore.ops.composite as C
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.model_zoo.lenet import LeNet
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
batch_size = 1
|
||||
channel = 1
|
||||
height = 32
|
||||
|
@ -36,6 +35,7 @@ num_class = 10
|
|||
|
||||
class LeNetGrad(nn.Cell):
|
||||
"""Backward of LeNet"""
|
||||
|
||||
def __init__(self, network):
|
||||
super(LeNetGrad, self).__init__()
|
||||
self.grad_op = C.grad_all_with_sens
|
||||
|
|
|
@ -17,10 +17,11 @@
|
|||
|
||||
import numpy as np
|
||||
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from .resnet_example import resnet50
|
||||
|
||||
|
||||
def test_compile():
|
||||
net = resnet50()
|
||||
inp = Tensor(np.ones([1, 3, 224, 224]).astype(np.float32))
|
||||
|
|
|
@ -20,9 +20,9 @@
|
|||
import numpy as np
|
||||
|
||||
from mindspore import Tensor
|
||||
from ..train_step_wrap import train_step_without_opt
|
||||
from .resnet_example import resnet50
|
||||
from ..vm_impl import *
|
||||
from ..train_step_wrap import train_step_without_opt
|
||||
|
||||
|
||||
def test_resnet50_pynative():
|
||||
net = train_step_without_opt(resnet50())
|
||||
|
|
|
@ -17,13 +17,15 @@
|
|||
|
||||
import numpy as np
|
||||
|
||||
from mindspore.common.api import _executor
|
||||
import mindspore.context as context
|
||||
from mindspore import Tensor
|
||||
from ..train_step_wrap import train_step_with_loss_warp
|
||||
from mindspore.common.api import _executor
|
||||
from .resnet_example import resnet50
|
||||
from ..train_step_wrap import train_step_with_loss_warp
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
def test_train_step():
|
||||
net = train_step_with_loss_warp(resnet50())
|
||||
net.set_train()
|
||||
|
|
|
@ -16,15 +16,15 @@
|
|||
train step wrap
|
||||
"""
|
||||
import mindspore.nn as nn
|
||||
from mindspore.ops import functional as F
|
||||
from mindspore import ParameterTuple
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore import Parameter, ParameterTuple
|
||||
|
||||
|
||||
class TrainStepWrap(nn.Cell):
|
||||
"""
|
||||
TrainStepWrap definition
|
||||
"""
|
||||
|
||||
def __init__(self, network):
|
||||
super(TrainStepWrap, self).__init__()
|
||||
self.network = network
|
||||
|
@ -39,10 +39,12 @@ class TrainStepWrap(nn.Cell):
|
|||
grads = self.grad(self.network, weights)(x, label)
|
||||
return self.optimizer(grads)
|
||||
|
||||
|
||||
class NetWithLossClass(nn.Cell):
|
||||
"""
|
||||
NetWithLossClass definition
|
||||
"""
|
||||
|
||||
def __init__(self, network):
|
||||
super(NetWithLossClass, self).__init__(auto_prefix=False)
|
||||
self.loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
|
@ -61,6 +63,7 @@ class TrainStepWrap2(nn.Cell):
|
|||
"""
|
||||
TrainStepWrap2 definition
|
||||
"""
|
||||
|
||||
def __init__(self, network, sens):
|
||||
super(TrainStepWrap2, self).__init__()
|
||||
self.network = network
|
||||
|
@ -76,13 +79,16 @@ class TrainStepWrap2(nn.Cell):
|
|||
grads = self.grad(self.network, weights)(x, self.sens)
|
||||
return self.optimizer(grads)
|
||||
|
||||
|
||||
def train_step_with_sens(network, sens):
|
||||
return TrainStepWrap2(network, sens)
|
||||
|
||||
|
||||
class TrainStepWrapWithoutOpt(nn.Cell):
|
||||
"""
|
||||
TrainStepWrapWithoutOpt definition
|
||||
"""
|
||||
|
||||
def __init__(self, network):
|
||||
super(TrainStepWrapWithoutOpt, self).__init__()
|
||||
self.network = network
|
||||
|
@ -93,5 +99,6 @@ class TrainStepWrapWithoutOpt(nn.Cell):
|
|||
grads = self.grad(self.network, self.weights)(x, label)
|
||||
return grads
|
||||
|
||||
|
||||
def train_step_without_opt(network):
|
||||
return TrainStepWrapWithoutOpt(NetWithLossClass(network))
|
||||
|
|
|
@ -28,6 +28,7 @@ context.set_context(mode=context.GRAPH_MODE)
|
|||
def Xtest_arg_dict():
|
||||
class DictNet(Cell):
|
||||
"""DictNet definition"""
|
||||
|
||||
def __init__(self):
|
||||
super(DictNet, self).__init__()
|
||||
self.max = P.Maximum()
|
||||
|
@ -48,6 +49,7 @@ def Xtest_arg_dict():
|
|||
def test_const_dict():
|
||||
class DictNet(Cell):
|
||||
"""DictNet1 definition"""
|
||||
|
||||
def __init__(self):
|
||||
super(DictNet, self).__init__()
|
||||
self.max = P.Maximum()
|
||||
|
@ -58,6 +60,7 @@ def test_const_dict():
|
|||
a = self.max(self.dictionary["x"], self.dictionary["y"])
|
||||
b = self.min(self.dictionary["x"], self.dictionary["y"])
|
||||
return a + b
|
||||
|
||||
net = DictNet()
|
||||
net()
|
||||
|
||||
|
@ -65,6 +68,7 @@ def test_const_dict():
|
|||
def test_dict_set_or_get_item():
|
||||
class DictNet(Cell):
|
||||
"""DictNet1 definition"""
|
||||
|
||||
def __init__(self):
|
||||
super(DictNet, self).__init__()
|
||||
self.dict_ = {"x": 1, "y": 2}
|
||||
|
@ -91,6 +95,7 @@ def test_dict_set_or_get_item():
|
|||
def test_dict_set_or_get_item_2():
|
||||
class DictNet(Cell):
|
||||
"""DictNet1 definition"""
|
||||
|
||||
def __init__(self):
|
||||
super(DictNet, self).__init__()
|
||||
|
||||
|
@ -117,6 +122,7 @@ def test_dict_set_or_get_item_2():
|
|||
def test_dict_set_or_get_item_3():
|
||||
class DictNet(Cell):
|
||||
"""DictNet1 definition"""
|
||||
|
||||
def __init__(self):
|
||||
super(DictNet, self).__init__()
|
||||
self.dict_ = {"x": Tensor(np.ones([2, 2, 3], np.float32)), "y": 1}
|
||||
|
@ -130,5 +136,3 @@ def test_dict_set_or_get_item_3():
|
|||
|
||||
net = DictNet()
|
||||
assert net() == Tensor(np.ones([4, 2, 3], np.float32))
|
||||
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from mindspore import Tensor, context
|
||||
from mindspore.nn import Cell
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
"""setup for pytest"""
|
||||
import mindspore.context as context
|
||||
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def setup_module(module):
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
resnet50 example
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
|
|
|
@ -16,19 +16,21 @@
|
|||
test assign add
|
||||
"""
|
||||
import numpy as np
|
||||
import mindspore.nn as nn
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.common.initializer import initializer
|
||||
from mindspore import Tensor, Parameter
|
||||
|
||||
import mindspore as ms
|
||||
from ..ut_filter import non_graph_engine
|
||||
from mindspore.common.api import _executor
|
||||
import mindspore.context as context
|
||||
import pytest
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.initializer import initializer
|
||||
from mindspore.ops import operations as P
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
"""Net definition"""
|
||||
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.AssignAdd = P.AssignAdd()
|
||||
|
@ -39,18 +41,19 @@ class Net(nn.Cell):
|
|||
out = self.AssignAdd(self.inputdata, x)
|
||||
return out
|
||||
|
||||
|
||||
@non_graph_engine
|
||||
def test_AssignAdd_1():
|
||||
"""test AssignAdd 1"""
|
||||
import mindspore.context as context
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
net = Net()
|
||||
x = Tensor(np.ones([1]).astype(np.int64)*100)
|
||||
x = Tensor(np.ones([1]).astype(np.int64) * 100)
|
||||
|
||||
print("MyPrintResult dataX:", x)
|
||||
result = net(x)
|
||||
print("MyPrintResult data::", result)
|
||||
expect = np.ones([1]).astype(np.int64)*101
|
||||
expect = np.ones([1]).astype(np.int64) * 101
|
||||
diff = result.asnumpy() - expect
|
||||
|
||||
print("MyPrintExpect:", expect)
|
||||
|
@ -58,18 +61,19 @@ def test_AssignAdd_1():
|
|||
error = np.ones(shape=[1]) * 1.0e-3
|
||||
assert np.all(diff < error)
|
||||
|
||||
|
||||
@non_graph_engine
|
||||
def test_AssignAdd_2():
|
||||
"""test AssignAdd 2"""
|
||||
import mindspore.context as context
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
net = Net()
|
||||
x = Tensor(np.ones([1]).astype(np.int64)*102)
|
||||
x = Tensor(np.ones([1]).astype(np.int64) * 102)
|
||||
|
||||
print("MyPrintResult dataX:", x)
|
||||
result = net(x)
|
||||
print("MyPrintResult data::", result.asnumpy())
|
||||
expect = np.ones([1]).astype(np.int64)*103
|
||||
expect = np.ones([1]).astype(np.int64) * 103
|
||||
diff = result.asnumpy() - expect
|
||||
|
||||
print("MyPrintExpect:", expect)
|
||||
|
@ -77,8 +81,10 @@ def test_AssignAdd_2():
|
|||
error = np.ones(shape=[1]) * 1.0e-3
|
||||
assert np.all(diff < error)
|
||||
|
||||
|
||||
class AssignAddNet(nn.Cell):
|
||||
"""Net definition"""
|
||||
|
||||
def __init__(self):
|
||||
super(AssignAddNet, self).__init__()
|
||||
self.AssignAdd = P.AssignAdd()
|
||||
|
@ -89,9 +95,10 @@ class AssignAddNet(nn.Cell):
|
|||
z1 = self.AssignAdd(self.inputdata, self.one)
|
||||
return z1
|
||||
|
||||
|
||||
@non_graph_engine
|
||||
def test_assignadd_scalar_cast():
|
||||
net = AssignAddNet()
|
||||
x = Tensor(np.ones([1]).astype(np.int64)*102)
|
||||
#_executor.compile(net, 1)
|
||||
x = Tensor(np.ones([1]).astype(np.int64) * 102)
|
||||
# _executor.compile(net, 1)
|
||||
result = net(x)
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# ============================================================================
|
||||
""" test Activations """
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
|
|
@ -16,15 +16,17 @@
|
|||
test assign sub
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
import mindspore.ops.operations as P
|
||||
from mindspore import Tensor
|
||||
import mindspore.context as context
|
||||
from mindspore.common.initializer import initializer
|
||||
from mindspore.common.parameter import Parameter
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# ============================================================================
|
||||
"""ut for batchnorm layer"""
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
|
|
@ -14,14 +14,17 @@
|
|||
# ============================================================================
|
||||
""" test BiasAdd """
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.common.initializer import initializer
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.initializer import initializer
|
||||
from mindspore.ops import operations as P
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
"""Net definition"""
|
||||
|
||||
def __init__(self,
|
||||
output_channels,
|
||||
bias_init='zeros',
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# ============================================================================
|
||||
"""test conv"""
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
@ -25,6 +26,7 @@ out_channels = 64
|
|||
|
||||
class Net(nn.Cell):
|
||||
"""Net definition"""
|
||||
|
||||
def __init__(self,
|
||||
cin,
|
||||
cout,
|
||||
|
@ -70,6 +72,7 @@ def test_compile2():
|
|||
output = net(input_data)
|
||||
print(output.asnumpy())
|
||||
|
||||
|
||||
@non_graph_engine
|
||||
def test_compile3():
|
||||
net = Net(3, 1, (3, 3), weight_init='ONES')
|
||||
|
|
|
@ -14,12 +14,15 @@
|
|||
# ============================================================================
|
||||
""" test Dense """
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
"""Net definition"""
|
||||
|
||||
def __init__(self,
|
||||
input_channels,
|
||||
output_channels,
|
||||
|
|
|
@ -14,11 +14,12 @@
|
|||
# ============================================================================
|
||||
"""test eval"""
|
||||
import numpy as np
|
||||
|
||||
import mindspore as ms
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
|
|
|
@ -16,8 +16,8 @@
|
|||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
|
|
|
@ -15,12 +15,12 @@
|
|||
"""
|
||||
test pooling api
|
||||
"""
|
||||
import numpy as np
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
|
||||
|
||||
class MaxNet(nn.Cell):
|
||||
"""MaxNet definition"""
|
||||
|
||||
def __init__(self,
|
||||
kernel_size,
|
||||
stride=None):
|
||||
|
|
|
@ -16,9 +16,11 @@
|
|||
test softmax api
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
def __init__(self, dim):
|
||||
super(Net, self).__init__()
|
||||
|
|
|
@ -14,10 +14,12 @@
|
|||
# ============================================================================
|
||||
""" test TensorAdd """
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
|
|
|
@ -14,29 +14,33 @@
|
|||
# ============================================================================
|
||||
""" test model train """
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.common.initializer import initializer
|
||||
from mindspore import Tensor, Parameter, Model
|
||||
from mindspore.common.initializer import initializer
|
||||
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
|
||||
# fn is a funcation use i as input
|
||||
def lr_gen(fn, epoch_size):
|
||||
for i in range(epoch_size):
|
||||
yield fn(i)
|
||||
|
||||
|
||||
def me_train_tensor(net, input_np, label_np, epoch_size=2):
|
||||
"""me_train_tensor"""
|
||||
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean")
|
||||
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr_gen(lambda i: 0.1, epoch_size), 0.9, 0.01, 1024)
|
||||
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr_gen(lambda i: 0.1, epoch_size), 0.9,
|
||||
0.01, 1024)
|
||||
Model(net, loss, opt)
|
||||
_network = nn.WithLossCell(net, loss)
|
||||
_train_net = nn.TrainOneStepCell(_network, opt)
|
||||
_train_net.set_train()
|
||||
label_np = np.argmax(label_np, axis=-1).astype(np.int32)
|
||||
for epoch in range(0, epoch_size):
|
||||
print(f"epoch %d"%(epoch))
|
||||
print(f"epoch %d" % (epoch))
|
||||
_train_net(Tensor(input_np), Tensor(label_np))
|
||||
|
||||
|
||||
|
@ -52,6 +56,7 @@ def test_bias_add(test_with_simu):
|
|||
|
||||
class Net(nn.Cell):
|
||||
"""Net definition"""
|
||||
|
||||
def __init__(self,
|
||||
output_channels,
|
||||
bias_init='zeros',
|
||||
|
@ -87,6 +92,7 @@ def test_conv(test_with_simu):
|
|||
|
||||
class Net(nn.Cell):
|
||||
"Net definition"""
|
||||
|
||||
def __init__(self,
|
||||
cin,
|
||||
cout,
|
||||
|
@ -116,6 +122,7 @@ def test_net():
|
|||
|
||||
class Net(nn.Cell):
|
||||
"""Net definition"""
|
||||
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
Tensor(np.ones([64, 3, 7, 7]).astype(np.float32) * 0.01)
|
||||
|
@ -141,6 +148,7 @@ def test_net():
|
|||
label_np = np.ones([32, 12]).astype(np.int32)
|
||||
me_train_tensor(net, input_np, label_np)
|
||||
|
||||
|
||||
def test_bn():
|
||||
"""test_bn"""
|
||||
import mindspore.context as context
|
||||
|
@ -151,6 +159,7 @@ def test_bn():
|
|||
|
||||
class Net(nn.Cell):
|
||||
"""Net definition"""
|
||||
|
||||
def __init__(self, cin, cout):
|
||||
super(Net, self).__init__()
|
||||
self.bn = nn.BatchNorm2d(cin)
|
||||
|
|
|
@ -23,6 +23,7 @@ from mindspore.ops import composite as C
|
|||
from mindspore.ops import functional as F
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
|
||||
def get_reordered_parameters(parameters):
|
||||
"""get_reordered_parameters"""
|
||||
# put the bias parameter to the end
|
||||
|
@ -36,12 +37,15 @@ def get_reordered_parameters(parameters):
|
|||
reordered_params = tuple(non_bias_param + bias_param)
|
||||
return len(non_bias_param), len(reordered_params), reordered_params
|
||||
|
||||
|
||||
def get_net_trainable_reordered_params(net):
|
||||
params = net.trainable_params()
|
||||
return get_reordered_parameters(params)
|
||||
|
||||
|
||||
class TrainOneStepWithLarsCell(nn.Cell):
|
||||
"""TrainOneStepWithLarsCell definition"""
|
||||
|
||||
def __init__(self, network, optimizer, sens=1.0):
|
||||
super(TrainOneStepWithLarsCell, self).__init__(auto_prefix=False)
|
||||
self.network = network
|
||||
|
@ -66,11 +70,13 @@ class TrainOneStepWithLarsCell(nn.Cell):
|
|||
new_grads = lars_grads + bias_grads
|
||||
return F.depend(loss, self.optimizer(new_grads))
|
||||
|
||||
|
||||
# fn is a funcation use i as input
|
||||
def lr_gen(fn, epoch_size):
|
||||
for i in range(epoch_size):
|
||||
yield fn(i)
|
||||
|
||||
|
||||
def me_train_tensor(net, input_np, label_np, epoch_size=2):
|
||||
"""me_train_tensor"""
|
||||
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
|
||||
|
|
|
@ -14,12 +14,14 @@
|
|||
# ============================================================================
|
||||
"""test_dtype"""
|
||||
from dataclasses import dataclass
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore as ms
|
||||
from mindspore.common import dtype
|
||||
|
||||
|
||||
def test_dtype_to_nptype():
|
||||
"""test_dtype2nptype"""
|
||||
assert ms.dtype_to_nptype(ms.bool_) == np.bool_
|
||||
|
@ -59,6 +61,7 @@ def test_dtype_to_pytype():
|
|||
@dataclass
|
||||
class Foo:
|
||||
x: int
|
||||
|
||||
def inf(self):
|
||||
return self.x
|
||||
|
||||
|
|
|
@ -25,25 +25,27 @@ import mindspore as ms
|
|||
import mindspore.common.api as me
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.common.initializer import initializer
|
||||
from mindspore.common.parameter import Parameter
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
ndarr = np.ones((2, 3))
|
||||
|
||||
|
||||
def test_tensor_flatten():
|
||||
with pytest.raises(AttributeError):
|
||||
lst = [1, 2, 3, 4,]
|
||||
lst = [1, 2, 3, 4, ]
|
||||
tensor_list = ms.Tensor(lst, ms.float32)
|
||||
tensor_list = tensor_list.Flatten()
|
||||
print(tensor_list)
|
||||
|
||||
|
||||
def test_tensor_list():
|
||||
lst = [[1.0, 2.0, 1.0], [1.0, 10.0, 9.0]]
|
||||
tensor_list = ms.Tensor(lst, ms.float32)
|
||||
print(tensor_list)
|
||||
|
||||
|
||||
def test_tensor():
|
||||
"""test_tensor"""
|
||||
t1 = ms.Tensor(ndarr)
|
||||
|
@ -63,6 +65,7 @@ def test_tensor():
|
|||
assert isinstance(t4, ms.Tensor)
|
||||
assert t4.dtype() == ms.int64
|
||||
|
||||
|
||||
def test_tensor_type_float16():
|
||||
t_float16 = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float16))
|
||||
assert isinstance(t_float16, ms.Tensor)
|
||||
|
@ -107,6 +110,7 @@ def test_tensor_type_float64_user_define():
|
|||
assert t_float64.shape() == (2, 3)
|
||||
assert t_float64.dtype() == ms.float64
|
||||
|
||||
|
||||
def test_tensor_type_bool():
|
||||
# init a tensor with bool type
|
||||
ts_bool_array = ms.Tensor(np.zeros([2, 3], np.bool), ms.bool_)
|
||||
|
@ -122,6 +126,7 @@ def test_tensor_type_bool():
|
|||
assert t_bool_array.shape() == (2, 3)
|
||||
assert t_bool_array.dtype() == ms.bool_
|
||||
|
||||
|
||||
def test_tensor_type_int8():
|
||||
t_int8_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8))
|
||||
assert isinstance(t_int8_array, ms.Tensor)
|
||||
|
@ -154,6 +159,7 @@ def test_tensor_type_int64():
|
|||
assert t_int64.shape() == (2, 3)
|
||||
assert t_int64.dtype() == ms.int64
|
||||
|
||||
|
||||
def test_tensor_type_uint8():
|
||||
t_uint8_array = ms.Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8))
|
||||
assert isinstance(t_uint8_array, ms.Tensor)
|
||||
|
@ -181,6 +187,7 @@ def test_tensor_type_uint64():
|
|||
assert t_uint64.shape() == (2, 3)
|
||||
assert t_uint64.dtype() == ms.uint64
|
||||
|
||||
|
||||
def test_set_type():
|
||||
t = ms.Tensor(ndarr)
|
||||
t.set_dtype(ms.float32)
|
||||
|
@ -202,15 +209,17 @@ def test_sub():
|
|||
z = x - y
|
||||
assert isinstance(z, ms.Tensor)
|
||||
|
||||
|
||||
@non_graph_engine
|
||||
def test_div():
|
||||
x = ms.Tensor(np.array([[2,6,10],[12, 4, 8]]).astype(np.float32))
|
||||
y = ms.Tensor(np.array([[2,2,5],[6, 1, 2]]).astype(np.float32))
|
||||
x = ms.Tensor(np.array([[2, 6, 10], [12, 4, 8]]).astype(np.float32))
|
||||
y = ms.Tensor(np.array([[2, 2, 5], [6, 1, 2]]).astype(np.float32))
|
||||
z = x / y
|
||||
z2 = x / 2
|
||||
assert isinstance(z, ms.Tensor)
|
||||
assert isinstance(z2, ms.Tensor)
|
||||
|
||||
|
||||
@non_graph_engine
|
||||
def test_parameter():
|
||||
x = Parameter(initializer(1, [1], ms.float32), name="beta1_power")
|
||||
|
@ -220,6 +229,7 @@ def test_parameter():
|
|||
|
||||
class Net(nn.Cell):
|
||||
"""Net definition"""
|
||||
|
||||
def __init__(self, dim):
|
||||
super(Net, self).__init__()
|
||||
self.dim = dim
|
||||
|
@ -266,6 +276,7 @@ def test_tensor_contiguous():
|
|||
assert True, rt_f.flags['C_CONTIGUOUS']
|
||||
print("rt_f flags = ", rt_f.flags)
|
||||
|
||||
|
||||
def test_tensor_contiguous2():
|
||||
input_data = np.random.randn(32, 112, 112, 3).astype(np.float32)
|
||||
input_me = input_data.transpose(0, 3, 1, 2)
|
||||
|
@ -274,36 +285,43 @@ def test_tensor_contiguous2():
|
|||
out_f = tensor_f_float32.asnumpy()
|
||||
print("out_f flags = ", out_f.flags)
|
||||
|
||||
|
||||
def test_tensor_input_string():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = 'ccc'
|
||||
ms.Tensor(input_data)
|
||||
|
||||
|
||||
def test_tensor_input_tuple_string():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = (2, 3, '4', 5)
|
||||
ms.Tensor(input_data)
|
||||
|
||||
|
||||
def test_tensor_input_list_string():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = [[2, 3, '4', 5], [1, 2, 3, 4]]
|
||||
ms.Tensor(input_data)
|
||||
|
||||
|
||||
def test_tensor_input_none():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = None
|
||||
ms.Tensor(input_data, np.int64)
|
||||
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
def test_tensor_input_empty():
|
||||
with pytest.raises(TypeError):
|
||||
ms.Tensor()
|
||||
|
||||
|
||||
def test_tensor_input_ndarray_str():
|
||||
with pytest.raises(TypeError):
|
||||
inp = np.array(["88", 2, 4])
|
||||
ms.Tensor(inp)
|
||||
|
||||
|
||||
def test_tensor_input_ndarray_bool():
|
||||
inp = np.array([True, 2, 4])
|
||||
ms.Tensor(inp)
|
||||
|
@ -311,86 +329,103 @@ def test_tensor_input_ndarray_bool():
|
|||
inp = np.array([False, 2, 4])
|
||||
ms.Tensor(inp)
|
||||
|
||||
|
||||
def test_tensor_input_ndarray_complex():
|
||||
with pytest.raises(TypeError):
|
||||
inp = np.array([20j, 2, 4])
|
||||
ms.Tensor(inp)
|
||||
|
||||
|
||||
def test_tensor_input_ndarray_none():
|
||||
with pytest.raises(TypeError):
|
||||
inp = np.array([None, 2, 4])
|
||||
ms.Tensor(inp)
|
||||
|
||||
|
||||
def test_tensor_input_ndarray_dict():
|
||||
with pytest.raises(TypeError):
|
||||
inp = {'a': 6, 'b': 7}
|
||||
ms.Tensor(inp)
|
||||
|
||||
|
||||
def test_tensor_input_np_nan():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = (1, 2, 3, np.nan)
|
||||
ms.Tensor(input_data, np.int64)
|
||||
|
||||
|
||||
def test_tensor_input_tuple_inf():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = (1, 2, 3, float("inf"))
|
||||
ms.Tensor(input_data, np.int64)
|
||||
|
||||
|
||||
def test_tensor_input_dict():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = {'a': 6, 'b': 7}
|
||||
ms.Tensor(input_data, np.int64)
|
||||
|
||||
|
||||
def test_tensor_input_complex():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = (1, 2j, 3)
|
||||
ms.Tensor(input_data, np.int64)
|
||||
|
||||
|
||||
def test_tensor_dtype_np_float():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = np.random.randn(32, 112, 112, 3).astype(np.float)
|
||||
ms.Tensor(input_data, np.float)
|
||||
|
||||
|
||||
def test_tensor_dtype_np_float16():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = np.random.randn(32, 112, 112, 3).astype(np.float16)
|
||||
ms.Tensor(input_data, np.float16)
|
||||
|
||||
|
||||
def test_tensor_dtype_np_float32():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = np.random.randn(32, 112, 112, 3).astype(np.float32)
|
||||
ms.Tensor(input_data, np.float32)
|
||||
|
||||
|
||||
def test_tensor_dtype_np_float64():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = np.random.randn(32, 112, 112, 3).astype(np.float64)
|
||||
ms.Tensor(input_data, np.float64)
|
||||
|
||||
|
||||
def test_tensor_dtype_np_int():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = np.random.randn(32, 112, 112, 3).astype(np.int)
|
||||
ms.Tensor(input_data, np.int)
|
||||
|
||||
|
||||
def test_tensor_dtype_np_int8():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = np.random.randn(32, 112, 112, 3).astype(np.int8)
|
||||
ms.Tensor(input_data, np.int8)
|
||||
|
||||
|
||||
def test_tensor_dtype_np_int16():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = np.random.randn(32, 112, 112, 3).astype(np.int16)
|
||||
ms.Tensor(input_data, np.int16)
|
||||
|
||||
|
||||
def test_tensor_dtype_np_int32():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = np.random.randn(32, 112, 112, 3).astype(np.int32)
|
||||
ms.Tensor(input_data, np.int32)
|
||||
|
||||
|
||||
def test_tensor_dtype_np_int64():
|
||||
with pytest.raises(TypeError):
|
||||
input_data = np.random.randn(32, 112, 112, 3).astype(np.int64)
|
||||
ms.Tensor(input_data, np.int64)
|
||||
|
||||
|
||||
def test_tensor_dtype_fp32_to_bool():
|
||||
with pytest.raises(RuntimeError):
|
||||
input = np.random.randn(2, 3, 4, 5).astype(np.float32)
|
||||
|
@ -399,7 +434,7 @@ def test_tensor_dtype_fp32_to_bool():
|
|||
|
||||
|
||||
def test_tensor_operation():
|
||||
x = Tensor(np.ones((3,3)) * 4)
|
||||
x = Tensor(np.ones((3, 3)) * 4)
|
||||
res = x + 1
|
||||
assert np.all(res.asnumpy() == np.ones((3, 3)) * 5)
|
||||
res = 1 + x
|
||||
|
|
|
@ -14,10 +14,11 @@
|
|||
# ============================================================================
|
||||
"""test tensor py"""
|
||||
import numpy as np
|
||||
|
||||
import mindspore as ms
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.nn import Cell
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.common.api import _executor
|
||||
import mindspore as ms
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
|
@ -93,6 +94,7 @@ def test_float():
|
|||
|
||||
def test_tensor_method_sub():
|
||||
"""test_tensor_method_sub"""
|
||||
|
||||
class Net(Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
|
@ -111,6 +113,7 @@ def test_tensor_method_sub():
|
|||
|
||||
def test_tensor_method_mul():
|
||||
"""test_tensor_method_mul"""
|
||||
|
||||
class Net(Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
|
@ -129,6 +132,7 @@ def test_tensor_method_mul():
|
|||
|
||||
def test_tensor_method_div():
|
||||
"""test_tensor_method_div"""
|
||||
|
||||
class Net(Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
|
|
|
@ -13,24 +13,25 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
from mindspore.common.api import ms_function
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
import mindspore.ops.functional as F
|
||||
|
||||
import mindspore.context as context
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops.composite import core
|
||||
from mindspore.common import dtype as mstype
|
||||
import mindspore.nn as nn
|
||||
import mindspore.ops.functional as F
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
add1 = P.TensorAdd()
|
||||
mul1 = P.MatMul()
|
||||
add2 = P.TensorAdd()
|
||||
|
||||
|
||||
def add(x, y):
|
||||
return add1(x, y)
|
||||
|
||||
|
||||
class Func(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Func, self).__init__()
|
||||
|
@ -48,7 +49,10 @@ class Func(nn.Cell):
|
|||
out = F.depend(out, clear)
|
||||
return out
|
||||
|
||||
|
||||
grad_s = C.GradOperation('grad_with_sens', get_all=True, sens_param=True)
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
|
@ -69,6 +73,7 @@ class Net(nn.Cell):
|
|||
out = F.depend(out, clear)
|
||||
return out
|
||||
|
||||
|
||||
def test_add():
|
||||
x = Tensor(np.ones([3, 3]).astype(np.float32))
|
||||
y = Tensor(np.ones([3, 3]).astype(np.float32))
|
||||
|
@ -76,6 +81,7 @@ def test_add():
|
|||
func.add_flags(has_effect=True)
|
||||
func(x, y)
|
||||
|
||||
|
||||
def test_sens():
|
||||
x = Tensor(np.ones([3, 3]).astype(np.float32))
|
||||
y = Tensor(np.ones([3, 3]).astype(np.float32))
|
||||
|
@ -84,6 +90,7 @@ def test_sens():
|
|||
net.add_flags(has_effect=True)
|
||||
out = net(x, y, sens)
|
||||
|
||||
|
||||
class Net_hyper(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net_hyper, self).__init__()
|
||||
|
@ -105,6 +112,7 @@ class Net_hyper(nn.Cell):
|
|||
out = F.depend(out, clear)
|
||||
return out
|
||||
|
||||
|
||||
def test_hyper_add():
|
||||
x = Tensor(np.ones([3, 3]).astype(np.float32))
|
||||
y = Tensor(np.ones([3, 3]).astype(np.float32))
|
||||
|
@ -113,6 +121,7 @@ def test_hyper_add():
|
|||
net.add_flags(has_effect=True)
|
||||
out = net(x, y, sens)
|
||||
|
||||
|
||||
def test_keep_order_io_effect_exception_return_dtype():
|
||||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
|
|
|
@ -14,10 +14,13 @@
|
|||
# ============================================================================
|
||||
"""test accuracy"""
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from mindspore.nn.metrics import Accuracy
|
||||
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn.metrics import Accuracy
|
||||
|
||||
|
||||
def test_classification_accuracy():
|
||||
"""test_classification_accuracy"""
|
||||
|
@ -29,8 +32,9 @@ def test_classification_accuracy():
|
|||
metric.update(x, y)
|
||||
accuracy = metric.eval()
|
||||
accuracy2 = metric(x, y2)
|
||||
assert math.isclose(accuracy, 2/3)
|
||||
assert math.isclose(accuracy2, 2/3)
|
||||
assert math.isclose(accuracy, 2 / 3)
|
||||
assert math.isclose(accuracy2, 2 / 3)
|
||||
|
||||
|
||||
def test_multilabel_accuracy():
|
||||
x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]]))
|
||||
|
@ -39,7 +43,8 @@ def test_multilabel_accuracy():
|
|||
metric.clear()
|
||||
metric.update(x, y)
|
||||
accuracy = metric.eval()
|
||||
assert accuracy == 1/3
|
||||
assert accuracy == 1 / 3
|
||||
|
||||
|
||||
def test_shape_accuracy():
|
||||
x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]]))
|
||||
|
@ -49,6 +54,7 @@ def test_shape_accuracy():
|
|||
with pytest.raises(ValueError):
|
||||
metric.update(x, y)
|
||||
|
||||
|
||||
def test_shape_accuracy2():
|
||||
x = Tensor(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]]))
|
||||
y = Tensor(np.array([0, 1, 1, 1]))
|
||||
|
@ -57,6 +63,7 @@ def test_shape_accuracy2():
|
|||
with pytest.raises(ValueError):
|
||||
metric.update(x, y)
|
||||
|
||||
|
||||
def test_shape_accuracy3():
|
||||
x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]))
|
||||
y = Tensor(np.array([[1, 0, 1], [1, 1, 1]]))
|
||||
|
@ -65,6 +72,7 @@ def test_shape_accuracy3():
|
|||
with pytest.raises(ValueError):
|
||||
metric.update(x, y)
|
||||
|
||||
|
||||
def test_shape_accuracy4():
|
||||
x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]))
|
||||
y = Tensor(np.array(1))
|
||||
|
@ -73,6 +81,7 @@ def test_shape_accuracy4():
|
|||
with pytest.raises(ValueError):
|
||||
metric.update(x, y)
|
||||
|
||||
|
||||
def test_type_accuracy():
|
||||
with pytest.raises(TypeError):
|
||||
Accuracy('test')
|
||||
|
|
|
@ -14,10 +14,12 @@
|
|||
# ============================================================================
|
||||
"""test error"""
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from mindspore.nn.metrics import MAE, MSE
|
||||
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn.metrics import MAE, MSE
|
||||
|
||||
|
||||
def test_MAE():
|
||||
|
@ -27,7 +29,7 @@ def test_MAE():
|
|||
error.clear()
|
||||
error.update(x, y)
|
||||
result = error.eval()
|
||||
assert math.isclose(result, 0.15/4)
|
||||
assert math.isclose(result, 0.15 / 4)
|
||||
|
||||
|
||||
def test_input_MAE():
|
||||
|
@ -52,7 +54,7 @@ def test_MSE():
|
|||
error.clear()
|
||||
error.update(x, y)
|
||||
result = error.eval()
|
||||
assert math.isclose(result, 0.0125/4)
|
||||
assert math.isclose(result, 0.0125 / 4)
|
||||
|
||||
|
||||
def test_input_MSE():
|
||||
|
|
|
@ -13,11 +13,11 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
# """test_fbeta"""
|
||||
import math
|
||||
import numpy as np
|
||||
import pytest
|
||||
from mindspore.nn.metrics import get_metric_fn, Fbeta
|
||||
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn.metrics import get_metric_fn, Fbeta
|
||||
|
||||
|
||||
def test_classification_fbeta():
|
||||
|
@ -32,9 +32,9 @@ def test_classification_fbeta():
|
|||
fbeta_mean = metric.eval(True)
|
||||
fbeta2 = metric(x, y2)
|
||||
|
||||
assert np.allclose(fbeta, np.array([2/3, 2/3]))
|
||||
assert np.allclose(fbeta2, np.array([2/3, 2/3]))
|
||||
assert np.allclose(fbeta_mean, 2/3)
|
||||
assert np.allclose(fbeta, np.array([2 / 3, 2 / 3]))
|
||||
assert np.allclose(fbeta2, np.array([2 / 3, 2 / 3]))
|
||||
assert np.allclose(fbeta_mean, 2 / 3)
|
||||
|
||||
|
||||
def test_fbeta_update1():
|
||||
|
@ -46,6 +46,7 @@ def test_fbeta_update1():
|
|||
with pytest.raises(ValueError):
|
||||
metric.update(x, y)
|
||||
|
||||
|
||||
def test_fbeta_update2():
|
||||
x1 = Tensor(np.array([[0.2, 0.5, 0.7], [0.3, 0.1, 0.2], [0.9, 0.6, 0.5]]))
|
||||
y1 = Tensor(np.array([1, 0, 2]))
|
||||
|
|
|
@ -15,8 +15,9 @@
|
|||
"""test loss"""
|
||||
import numpy as np
|
||||
import pytest
|
||||
from mindspore.nn.metrics import Loss
|
||||
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn.metrics import Loss
|
||||
|
||||
|
||||
def test_loss_inputs_error():
|
||||
|
|
|
@ -14,9 +14,11 @@
|
|||
# ============================================================================
|
||||
"""test_metric_factory"""
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
from mindspore.nn.metrics import get_metric_fn
|
||||
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn.metrics import get_metric_fn
|
||||
|
||||
|
||||
def test_classification_accuracy():
|
||||
|
@ -26,7 +28,7 @@ def test_classification_accuracy():
|
|||
metric.clear()
|
||||
metric.update(x, y)
|
||||
accuracy = metric.eval()
|
||||
assert math.isclose(accuracy, 2/3)
|
||||
assert math.isclose(accuracy, 2 / 3)
|
||||
|
||||
|
||||
def test_classification_accuracy_by_alias():
|
||||
|
@ -36,7 +38,7 @@ def test_classification_accuracy_by_alias():
|
|||
metric.clear()
|
||||
metric.update(x, y)
|
||||
accuracy = metric.eval()
|
||||
assert math.isclose(accuracy, 2/3)
|
||||
assert math.isclose(accuracy, 2 / 3)
|
||||
|
||||
|
||||
def test_classification_precision():
|
||||
|
|
|
@ -14,10 +14,12 @@
|
|||
# ============================================================================
|
||||
"""test_precision"""
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from mindspore.nn.metrics import Precision
|
||||
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn.metrics import Precision
|
||||
|
||||
|
||||
def test_classification_precision():
|
||||
|
@ -43,7 +45,7 @@ def test_multilabel_precision():
|
|||
metric.update(x, y)
|
||||
precision = metric.eval()
|
||||
|
||||
assert np.equal(precision, np.array([1, 2/3, 1])).all()
|
||||
assert np.equal(precision, np.array([1, 2 / 3, 1])).all()
|
||||
|
||||
|
||||
def test_average_precision():
|
||||
|
@ -54,7 +56,7 @@ def test_average_precision():
|
|||
metric.update(x, y)
|
||||
precision = metric.eval(True)
|
||||
|
||||
assert math.isclose(precision, (1 + 2/3 + 1) / 3)
|
||||
assert math.isclose(precision, (1 + 2 / 3 + 1) / 3)
|
||||
|
||||
|
||||
def test_num_precision():
|
||||
|
|
|
@ -14,10 +14,12 @@
|
|||
# ============================================================================
|
||||
"""test recall"""
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from mindspore.nn.metrics import Recall
|
||||
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn.metrics import Recall
|
||||
|
||||
|
||||
def test_classification_recall():
|
||||
|
@ -43,7 +45,7 @@ def test_multilabel_recall():
|
|||
metric.update(x, y)
|
||||
recall = metric.eval()
|
||||
|
||||
assert np.equal(recall, np.array([2/3, 2/3, 1])).all()
|
||||
assert np.equal(recall, np.array([2 / 3, 2 / 3, 1])).all()
|
||||
|
||||
|
||||
def test_average_recall():
|
||||
|
@ -54,7 +56,7 @@ def test_average_recall():
|
|||
metric.update(x, y)
|
||||
recall = metric.eval(True)
|
||||
|
||||
assert math.isclose(recall, (2/3 + 2/3 + 1) / 3)
|
||||
assert math.isclose(recall, (2 / 3 + 2 / 3 + 1) / 3)
|
||||
|
||||
|
||||
def test_num_recall():
|
||||
|
|
|
@ -14,10 +14,12 @@
|
|||
# ============================================================================
|
||||
"""test topk"""
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from mindspore.nn.metrics import TopKCategoricalAccuracy, Top1CategoricalAccuracy, Top5CategoricalAccuracy
|
||||
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn.metrics import TopKCategoricalAccuracy, Top1CategoricalAccuracy, Top5CategoricalAccuracy
|
||||
|
||||
|
||||
def test_type_topk():
|
||||
|
@ -54,8 +56,8 @@ def test_topk():
|
|||
topk.update(x, y)
|
||||
result = topk.eval()
|
||||
result2 = topk(x, y2)
|
||||
assert math.isclose(result, 2/3)
|
||||
assert math.isclose(result2, 2/3)
|
||||
assert math.isclose(result, 2 / 3)
|
||||
assert math.isclose(result2, 2 / 3)
|
||||
|
||||
|
||||
def test_zero_topk():
|
||||
|
@ -79,8 +81,8 @@ def test_top1():
|
|||
topk.update(x, y)
|
||||
result = topk.eval()
|
||||
result2 = topk(x, y2)
|
||||
assert math.isclose(result, 1/3)
|
||||
assert math.isclose(result2, 1/3)
|
||||
assert math.isclose(result, 1 / 3)
|
||||
assert math.isclose(result2, 1 / 3)
|
||||
|
||||
|
||||
def test_top5():
|
||||
|
@ -97,5 +99,5 @@ def test_top5():
|
|||
topk.update(x, y)
|
||||
result = topk.eval()
|
||||
result2 = topk(x, y2)
|
||||
assert math.isclose(result, 2/3)
|
||||
assert math.isclose(result2, 2/3)
|
||||
assert math.isclose(result, 2 / 3)
|
||||
assert math.isclose(result2, 2 / 3)
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
"""setup for pytest"""
|
||||
import mindspore.context as context
|
||||
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def setup_module(module):
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
|
|
@ -17,10 +17,10 @@ resnet50 example
|
|||
"""
|
||||
import numpy as np
|
||||
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops.operations import TensorAdd
|
||||
import mindspore.nn as nn # pylint: disable=C0414
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.ops.operations import TensorAdd
|
||||
from ...train_step_wrap import train_step_with_loss_warp
|
||||
|
||||
|
||||
|
|
|
@ -15,9 +15,8 @@
|
|||
""" test bert cell """
|
||||
import numpy as np
|
||||
import pytest
|
||||
from mindspore import Model
|
||||
from mindspore.nn.optim import AdamWeightDecay
|
||||
from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertModel, BertNetworkWithLoss, BertTrainOneStepCell
|
||||
|
||||
from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertModel
|
||||
from ....dataset_mock import MindData
|
||||
|
||||
|
||||
|
|
|
@ -14,26 +14,30 @@
|
|||
# ============================================================================
|
||||
""" test bert of graph compile """
|
||||
import functools
|
||||
|
||||
import numpy as np
|
||||
|
||||
import mindspore.common.dtype as mstype
|
||||
import mindspore.nn as nn
|
||||
import mindspore.ops.composite as C
|
||||
from mindspore.common.initializer import TruncatedNormal
|
||||
from mindspore.common.parameter import ParameterTuple
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.model_zoo.Bert_NEZHA import BertPretrainingLoss, GetNextSentenceOutput
|
||||
from mindspore.model_zoo.Bert_NEZHA.bert_for_pre_training import ClipGradients
|
||||
from mindspore.model_zoo.Bert_NEZHA.bert_model import BertConfig, \
|
||||
EmbeddingLookup, EmbeddingPostprocessor, BertOutput, RelaPosMatrixGenerator, \
|
||||
RelaPosEmbeddingsGenerator, SaturateCast, BertAttention, BertSelfAttention, \
|
||||
BertEncoderCell, BertTransformer, CreateAttentionMaskFromInputMask, BertModel
|
||||
from mindspore.nn.layer.basic import Norm
|
||||
from mindspore.model_zoo.Bert_NEZHA import BertPretrainingLoss, GetNextSentenceOutput
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.initializer import TruncatedNormal
|
||||
from mindspore.common.parameter import ParameterTuple
|
||||
from mindspore.nn.optim import AdamWeightDecay, AdamWeightDecayDynamicLR
|
||||
from mindspore.model_zoo.Bert_NEZHA.bert_for_pre_training import ClipGradients
|
||||
import mindspore.ops.composite as C
|
||||
from mindspore.ops import functional as F
|
||||
from ....ops_common import convert
|
||||
from ....mindspore_test_framework.mindspore_test import mindspore_test
|
||||
from ....mindspore_test_framework.pipeline.forward.compile_forward import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
|
||||
from ....mindspore_test_framework.pipeline.gradient.compile_gradient import pipeline_for_compile_grad_ge_graph_for_case_by_case_config
|
||||
from ....mindspore_test_framework.pipeline.forward.compile_forward import \
|
||||
pipeline_for_compile_forward_ge_graph_for_case_by_case_config
|
||||
from ....mindspore_test_framework.pipeline.gradient.compile_gradient import \
|
||||
pipeline_for_compile_grad_ge_graph_for_case_by_case_config
|
||||
from ....ops_common import convert
|
||||
|
||||
|
||||
def bert_trans():
|
||||
"""bert_trans"""
|
||||
|
@ -53,10 +57,12 @@ def bert_trans():
|
|||
net.set_train()
|
||||
return net
|
||||
|
||||
|
||||
def set_train(net):
|
||||
net.set_train()
|
||||
return net
|
||||
|
||||
|
||||
class NetForAdam(nn.Cell):
|
||||
def __init__(self):
|
||||
super(NetForAdam, self).__init__()
|
||||
|
@ -66,8 +72,10 @@ class NetForAdam(nn.Cell):
|
|||
x = self.dense(x)
|
||||
return x
|
||||
|
||||
|
||||
class TrainStepWrapForAdam(nn.Cell):
|
||||
"""TrainStepWrapForAdam definition"""
|
||||
|
||||
def __init__(self, network):
|
||||
super(TrainStepWrapForAdam, self).__init__()
|
||||
self.network = network
|
||||
|
@ -81,8 +89,10 @@ class TrainStepWrapForAdam(nn.Cell):
|
|||
grads = self.clip_gradients(grads, 1, 1.0)
|
||||
return self.optimizer(grads)
|
||||
|
||||
|
||||
class TrainStepWrapForAdamDynamicLr(nn.Cell):
|
||||
"""TrainStepWrapForAdamDynamicLr definition"""
|
||||
|
||||
def __init__(self, network):
|
||||
super(TrainStepWrapForAdamDynamicLr, self).__init__()
|
||||
self.network = network
|
||||
|
@ -95,16 +105,19 @@ class TrainStepWrapForAdamDynamicLr(nn.Cell):
|
|||
grads = C.grad_by_list_with_sens(self.network, weights)(x, self.sens)
|
||||
return self.optimizer(grads)
|
||||
|
||||
|
||||
class TempC2Wrap(nn.Cell):
|
||||
def __init__(self, op, c1=None, c2=None,):
|
||||
def __init__(self, op, c1=None, c2=None, ):
|
||||
super(TempC2Wrap, self).__init__()
|
||||
self.op = op
|
||||
self.c1 = c1
|
||||
self.c2 = c2
|
||||
|
||||
def construct(self, x1):
|
||||
x = self.op(x1, self.c1, self.c2)
|
||||
return x
|
||||
|
||||
|
||||
test_case_cell_ops = [
|
||||
('Norm_keepdims', {
|
||||
'block': Norm(keep_dims=True),
|
||||
|
@ -373,7 +386,7 @@ test_case_cell_ops = [
|
|||
'block': set_train(nn.Dense(in_channels=768,
|
||||
out_channels=3072,
|
||||
activation='gelu',
|
||||
weight_init=TruncatedNormal(0.02),)),
|
||||
weight_init=TruncatedNormal(0.02), )),
|
||||
'desc_inputs': [[3, 768]],
|
||||
'desc_bprop': [[3, 3072]]}),
|
||||
('GetNextSentenceOutput', {
|
||||
|
@ -396,9 +409,9 @@ test_case_cell_ops = [
|
|||
'block': TempC2Wrap(ClipGradients(), 1, 1.0),
|
||||
'desc_inputs': [tuple(convert(shp) for shp in [[1], [1], [1]])],
|
||||
'skip': ['backward', 'exec']}),
|
||||
]
|
||||
]
|
||||
|
||||
test_case = functools.reduce(lambda x, y: x+y, [test_case_cell_ops])
|
||||
test_case = functools.reduce(lambda x, y: x + y, [test_case_cell_ops])
|
||||
# use -k to select certain testcast
|
||||
# pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm
|
||||
|
||||
|
@ -412,10 +425,12 @@ test_check_gradient_case = filter(lambda x: 'skip' not in x[1] or
|
|||
'backward' not in x[1]['skip'] and 'backward_exec'
|
||||
not in x[1]['skip'], test_case)
|
||||
|
||||
|
||||
@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
|
||||
def test_exec():
|
||||
return test_exec_case
|
||||
|
||||
|
||||
@mindspore_test(pipeline_for_compile_grad_ge_graph_for_case_by_case_config)
|
||||
def test_backward_exec():
|
||||
return test_backward_exec_case
|
||||
|
|
|
@ -15,16 +15,19 @@
|
|||
"""test lenet"""
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.ops import operations as P
|
||||
from ....train_step_wrap import train_step_with_loss_warp, train_step_with_sens
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class LeNet5(nn.Cell):
|
||||
"""LeNet5 definition"""
|
||||
|
||||
def __init__(self):
|
||||
super(LeNet5, self).__init__()
|
||||
self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid')
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
# ============================================================================
|
||||
"""test_lenet_core_after_exception"""
|
||||
import numpy as np
|
||||
|
||||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.tensor import Tensor
|
||||
|
@ -25,6 +25,7 @@ from ....train_step_wrap import train_step_with_loss_warp
|
|||
|
||||
class LeNet5(nn.Cell):
|
||||
"""LeNet5 definition"""
|
||||
|
||||
def __init__(self):
|
||||
super(LeNet5, self).__init__()
|
||||
self.conv1 = nn.Conv2d(3, 6, 5, pad_mode="valid")
|
||||
|
|
|
@ -15,23 +15,24 @@
|
|||
"""test_mix_precision"""
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
import mindspore.common.dtype as mstype
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, context
|
||||
from mindspore.common import ParameterTuple
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.common import ParameterTuple
|
||||
from mindspore import Tensor, context
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn import Momentum
|
||||
from ....train_step_wrap import train_step_with_loss_warp
|
||||
from tests.ops_common import convert
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.ops import composite as C
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.train.parallel_utils import ParallelMode
|
||||
from tests.ops_common import convert
|
||||
from ....train_step_wrap import train_step_with_loss_warp
|
||||
|
||||
|
||||
class LeNet5(nn.Cell):
|
||||
"""LeNet5"""
|
||||
|
||||
def __init__(self):
|
||||
super(LeNet5, self).__init__()
|
||||
self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid')
|
||||
|
@ -148,10 +149,13 @@ def test_cast():
|
|||
|
||||
|
||||
"""test grad of PReLU, which cause AddN(generated by grad) fail"""
|
||||
|
||||
|
||||
class IRBlockZ(nn.Cell):
|
||||
def __init__(self, inplanes, planes):
|
||||
super(IRBlockZ, self).__init__()
|
||||
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, pad_mode="same", group=1, has_bias=False, dilation=1)
|
||||
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, pad_mode="same", group=1, has_bias=False,
|
||||
dilation=1)
|
||||
self.act_layer = nn.PReLU(planes)
|
||||
|
||||
def construct(self, x):
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# ============================================================================
|
||||
"""test_dataset_utils"""
|
||||
import pytest
|
||||
|
||||
import mindspore as ms
|
||||
from mindspore.train._utils import _construct_tensor_list
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
"""setup for pytest"""
|
||||
import mindspore.context as context
|
||||
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def setup_module(module):
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
|
|
@ -15,16 +15,18 @@
|
|||
""" test adam """
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.nn.optim import AdamWeightDecay, AdamWeightDecayDynamicLR
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
""" Net definition """
|
||||
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight")
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
""" test FTRL """
|
||||
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
|
@ -47,4 +48,3 @@ def test_ftrl():
|
|||
net_with_loss = WithLossCell(net, loss)
|
||||
train_network = TrainOneStepCell(net_with_loss, optimizer)
|
||||
_executor.compile(train_network, inputs, label)
|
||||
|
||||
|
|
|
@ -14,16 +14,18 @@
|
|||
# ============================================================================
|
||||
""" test lamb """
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.nn.optim import Lamb
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
""" Net definition """
|
||||
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight")
|
||||
|
@ -38,6 +40,7 @@ class Net(nn.Cell):
|
|||
|
||||
class NetWithoutWeight(nn.Cell):
|
||||
""" NetWithoutWeight definition """
|
||||
|
||||
def __init__(self):
|
||||
super(NetWithoutWeight, self).__init__()
|
||||
self.matmul = P.MatMul()
|
||||
|
|
|
@ -12,15 +12,17 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
from collections import Counter
|
||||
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim import LARS, Momentum
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.common import dtype as mstype
|
||||
from collections import Counter
|
||||
|
||||
|
||||
def multisteplr(total_steps, milestone, base_lr=0.9, gamma=0.1, dtype=mstype.float32):
|
||||
|
|
|
@ -14,19 +14,17 @@
|
|||
# ============================================================================
|
||||
""" test_lr_schedule """
|
||||
import numpy as np
|
||||
from mindspore.nn import Cell
|
||||
from mindspore.ops.operations import BiasAdd, MatMul
|
||||
|
||||
from mindspore import Parameter, ParameterTuple, Tensor
|
||||
from mindspore.nn import WithLossCell
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore.nn import SoftmaxCrossEntropyWithLogits
|
||||
from mindspore.ops.composite import grad_by_list
|
||||
from mindspore.ops import functional as F
|
||||
from mindspore.nn import Cell
|
||||
from mindspore.nn.optim import Optimizer
|
||||
from mindspore.ops.composite import grad_by_list
|
||||
from mindspore.ops.operations import BiasAdd, MatMul
|
||||
|
||||
|
||||
class Net(Cell):
|
||||
""" Net definition """
|
||||
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.weight = Parameter(Tensor(np.ones([64, 10])), name="weight")
|
||||
|
@ -41,6 +39,7 @@ class Net(Cell):
|
|||
|
||||
class _TrainOneStepCell(Cell):
|
||||
""" _TrainOneStepCell definition """
|
||||
|
||||
def __init__(self, network, optimizer):
|
||||
"""
|
||||
Append an optimizer to the training network after that the construct
|
||||
|
@ -67,4 +66,3 @@ class _TrainOneStepCell(Cell):
|
|||
if self.lr_schedule:
|
||||
self.schedule.update_lr(*args)
|
||||
return self.optimizer(grads)
|
||||
|
||||
|
|
|
@ -14,16 +14,18 @@
|
|||
# ============================================================================
|
||||
""" test momentum """
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
""" Net definition """
|
||||
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight")
|
||||
|
|
|
@ -15,9 +15,10 @@
|
|||
""" test optimizer """
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn.optim import Optimizer, SGD, Adam, AdamWeightDecay, AdamWeightDecayDynamicLR
|
||||
from mindspore.common.parameter import Parameter
|
||||
from mindspore.nn.optim import Optimizer, SGD, Adam, AdamWeightDecay, AdamWeightDecayDynamicLR
|
||||
|
||||
|
||||
class IterableObjc:
|
||||
|
@ -30,6 +31,7 @@ class IterableObjc:
|
|||
|
||||
params = IterableObjc()
|
||||
|
||||
|
||||
class TestOptimizer():
|
||||
def test_init(self):
|
||||
Optimizer(0.5, params)
|
||||
|
@ -44,6 +46,7 @@ class TestOptimizer():
|
|||
|
||||
class TestAdam():
|
||||
""" TestAdam definition """
|
||||
|
||||
def test_init(self):
|
||||
Adam(params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False,
|
||||
use_nesterov=False, weight_decay=0.0, loss_scale=1.0)
|
||||
|
@ -58,6 +61,7 @@ class TestAdam():
|
|||
|
||||
class TestSGD():
|
||||
""" TestSGD definition """
|
||||
|
||||
def test_init(self):
|
||||
with pytest.raises(ValueError):
|
||||
SGD(params, learning_rate=0.1, momentum=-0.1, dampening=0, weight_decay=0, nesterov=False)
|
||||
|
@ -68,6 +72,7 @@ class TestSGD():
|
|||
|
||||
class TestNullParam():
|
||||
""" TestNullParam definition """
|
||||
|
||||
def test_optim_init(self):
|
||||
with pytest.raises(ValueError):
|
||||
Optimizer(0.1, None)
|
||||
|
@ -84,8 +89,10 @@ class TestNullParam():
|
|||
with pytest.raises(ValueError):
|
||||
SGD(None)
|
||||
|
||||
|
||||
class TestUnsupportParam():
|
||||
""" TestUnsupportParam definition """
|
||||
|
||||
def test_optim_init(self):
|
||||
with pytest.raises(ValueError):
|
||||
Optimizer(0.1, (1, 2, 3))
|
||||
|
|
|
@ -15,16 +15,18 @@
|
|||
""" test adam """
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.nn.optim import RMSProp
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
""" Net definition """
|
||||
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name="weight")
|
||||
|
@ -59,4 +61,3 @@ def test_rmsprop_e():
|
|||
|
||||
with pytest.raises(TypeError):
|
||||
RMSProp(net.get_parameters(), momentum=1, learning_rate=0.1)
|
||||
|
||||
|
|
|
@ -14,11 +14,13 @@
|
|||
# ============================================================================
|
||||
""" test Activations """
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
class SoftmaxNet(nn.Cell):
|
||||
def __init__(self, dim):
|
||||
super(SoftmaxNet, self).__init__()
|
||||
|
|
|
@ -17,8 +17,8 @@ import numpy as np
|
|||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
|
||||
|
||||
def test_bn_pars_valid1():
|
||||
|
@ -62,11 +62,12 @@ class GroupNet(nn.Cell):
|
|||
def __init__(self):
|
||||
super(GroupNet, self).__init__()
|
||||
self.group_bn = nn.GroupNorm()
|
||||
|
||||
def construct(self, x):
|
||||
return self.group_bn(x)
|
||||
|
||||
|
||||
def test_compile_groupnorm():
|
||||
net = nn.GroupNorm(16, 64)
|
||||
input_data = Tensor(np.random.rand(1,64,256,256).astype(np.float32))
|
||||
input_data = Tensor(np.random.rand(1, 64, 256, 256).astype(np.float32))
|
||||
_executor.compile(net, input_data)
|
||||
|
|
|
@ -15,11 +15,10 @@
|
|||
""" test cell """
|
||||
import numpy as np
|
||||
import pytest
|
||||
import mindspore.context as context
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common.api import _executor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
class ModA(nn.Cell):
|
||||
|
@ -90,7 +89,7 @@ class ConvNet(nn.Cell):
|
|||
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
|
||||
self.flatten = nn.Flatten()
|
||||
self.fc = nn.Dense(
|
||||
int(ConvNet.image_h*ConvNet.image_w*ConvNet.output_ch/(4*4)),
|
||||
int(ConvNet.image_h * ConvNet.image_w * ConvNet.output_ch / (4 * 4)),
|
||||
num_classes)
|
||||
|
||||
def construct(self, x):
|
||||
|
|
|
@ -12,14 +12,15 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import pytest
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell, ParameterUpdate
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
|
||||
|
|
|
@ -14,10 +14,10 @@
|
|||
# ============================================================================
|
||||
""" test checkparameter """
|
||||
import pytest
|
||||
|
||||
from mindspore._checkparam import check_int, check_int_positive, \
|
||||
check_input_format, check_bool, twice
|
||||
|
||||
|
||||
kernel_size = 5
|
||||
kernel_size1 = twice(kernel_size)
|
||||
assert kernel_size1 == (5, 5)
|
||||
|
|
|
@ -14,11 +14,12 @@
|
|||
# ============================================================================
|
||||
""" test clip_by_norm """
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
@non_graph_engine
|
||||
def test_clip_by_norm():
|
||||
clip_by_norm = nn.ClipByNorm()
|
||||
|
|
|
@ -14,12 +14,13 @@
|
|||
# ============================================================================
|
||||
""" test container """
|
||||
from collections import OrderedDict
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
|
||||
|
||||
weight = Tensor(np.ones([2, 2]))
|
||||
conv2 = nn.Conv2d(3, 64, (3, 3), stride=2, padding=0)
|
||||
|
||||
|
@ -31,6 +32,7 @@ avg_pool = nn.AvgPool2d(kernel_size, stride)
|
|||
|
||||
class TestSequentialCell():
|
||||
""" TestSequentialCell """
|
||||
|
||||
def test_SequentialCell_init(self):
|
||||
m = nn.SequentialCell()
|
||||
assert type(m).__name__ == 'SequentialCell'
|
||||
|
@ -86,6 +88,7 @@ class TestSequentialCell():
|
|||
|
||||
class TestCellList():
|
||||
""" TestCellList """
|
||||
|
||||
def test_init1(self):
|
||||
cell_list = nn.CellList([conv2, avg_pool])
|
||||
assert len(cell_list) == 2
|
||||
|
@ -118,7 +121,6 @@ class TestCellList():
|
|||
cell = item
|
||||
assert type(cell).__name__ == 'AvgPool2d'
|
||||
|
||||
|
||||
def test_add(self):
|
||||
cell_list = nn.CellList([conv2, avg_pool])
|
||||
cell_list += [conv2]
|
||||
|
|
|
@ -15,10 +15,11 @@
|
|||
""" test nn.Dense """
|
||||
import numpy as np
|
||||
import pytest
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
|
||||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
|
@ -68,6 +69,7 @@ def test_dense_channels_error():
|
|||
|
||||
class Net(nn.Cell):
|
||||
""" Net definition """
|
||||
|
||||
def __init__(self,
|
||||
input_channels,
|
||||
output_channels,
|
||||
|
|
|
@ -15,12 +15,14 @@
|
|||
""" Test Dropout """
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import context
|
||||
|
||||
context.set_context(device_target="Ascend")
|
||||
|
||||
|
||||
def test_check_dropout_3():
|
||||
Tensor(np.ones([20, 16, 50]).astype(np.int32))
|
||||
with pytest.raises(ValueError):
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# ============================================================================
|
||||
""" Test Dynamic Learning Rate """
|
||||
import pytest
|
||||
import mindspore
|
||||
|
||||
from mindspore.nn import dynamic_lr as dr
|
||||
|
||||
milestone = [10, 20, 30]
|
||||
|
@ -29,6 +29,7 @@ min_lr = 0.01
|
|||
max_lr = 0.1
|
||||
power = 0.5
|
||||
|
||||
|
||||
class TestInputs:
|
||||
def test_milestone1(self):
|
||||
milestone1 = 1
|
||||
|
@ -226,6 +227,7 @@ def test_cosine_decay():
|
|||
lr = dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch)
|
||||
assert len(lr) == total_step
|
||||
|
||||
|
||||
def test_polynomial_decay():
|
||||
lr1 = dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power)
|
||||
assert len(lr1) == total_step
|
||||
|
|
|
@ -14,11 +14,10 @@
|
|||
# ============================================================================
|
||||
""" test_embedding """
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, EmbeddingPostprocessor
|
||||
from mindspore import Tensor
|
||||
from mindspore import dtype as mstype
|
||||
from mindspore.model_zoo.Bert_NEZHA import EmbeddingLookup, EmbeddingPostprocessor
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
|
@ -39,6 +38,7 @@ def test_check_embedding_lookup_2():
|
|||
use_one_hot_embeddings=True)
|
||||
m(Tensor(np.ones([128]), mstype.int32))
|
||||
|
||||
|
||||
@non_graph_engine
|
||||
def test_check_embedding_lookup_3():
|
||||
m = EmbeddingLookup(vocab_size=32000,
|
||||
|
@ -48,6 +48,7 @@ def test_check_embedding_lookup_3():
|
|||
initializer_range=0.01)
|
||||
m(Tensor(np.ones([128]), mstype.int32))
|
||||
|
||||
|
||||
@non_graph_engine
|
||||
def test_embedding_post_1():
|
||||
m = EmbeddingPostprocessor(embedding_size=768,
|
||||
|
|
|
@ -16,9 +16,10 @@
|
|||
test flatten api
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -15,14 +15,17 @@
|
|||
""" test image gradients """
|
||||
import numpy as np
|
||||
import pytest
|
||||
import mindspore.nn as nn
|
||||
import mindspore.context as context
|
||||
|
||||
import mindspore.common.dtype as mstype
|
||||
import mindspore.context as context
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.api import ms_function
|
||||
|
||||
context.set_context(device_target="Ascend")
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
|
@ -32,9 +35,10 @@ class Net(nn.Cell):
|
|||
def construct(self, x):
|
||||
return self.image_gradients(x)
|
||||
|
||||
|
||||
def test_compile():
|
||||
# input shape 1 x 1 x 2 x 2
|
||||
image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32)
|
||||
image = Tensor(np.array([[[[1, 2], [3, 4]]]]), dtype=mstype.int32)
|
||||
net = Net()
|
||||
_executor.compile(net, image)
|
||||
|
||||
|
@ -42,13 +46,14 @@ def test_compile():
|
|||
def test_compile_multi_channel():
|
||||
# input shape 4 x 2 x 2 x 2
|
||||
dtype = mstype.int32
|
||||
image = Tensor(np.array([[[[1,2],[3,4]], [[5,6],[7,8]]],
|
||||
[[[3,5],[7,9]], [[11,13],[15,17]]],
|
||||
[[[5,10],[15,20]], [[25,30],[35,40]]],
|
||||
[[[10,20],[30,40]], [[50,60],[70,80]]]]), dtype=dtype)
|
||||
image = Tensor(np.array([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
|
||||
[[[3, 5], [7, 9]], [[11, 13], [15, 17]]],
|
||||
[[[5, 10], [15, 20]], [[25, 30], [35, 40]]],
|
||||
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]]]), dtype=dtype)
|
||||
net = Net()
|
||||
_executor.compile(net, image)
|
||||
|
||||
|
||||
def test_invalid_5d_input():
|
||||
dtype = mstype.float32
|
||||
image = Tensor(np.random.random([4, 1, 16, 16, 1]), dtype=dtype)
|
||||
|
|
|
@ -14,12 +14,12 @@
|
|||
# ============================================================================
|
||||
""" test loss """
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
from ..ut_filter import non_graph_engine
|
||||
import mindspore
|
||||
|
||||
|
||||
def test_L1Loss():
|
||||
loss = nn.L1Loss()
|
||||
|
@ -60,5 +60,5 @@ def test_SoftmaxCrossEntropyExpand():
|
|||
loss = nn.SoftmaxCrossEntropyExpand()
|
||||
|
||||
logits = Tensor(np.random.randint(0, 9, [100, 10]).astype(np.float32))
|
||||
labels = Tensor(np.random.randint(0, 9, [10,]).astype(np.float32))
|
||||
labels = Tensor(np.random.randint(0, 9, [10, ]).astype(np.float32))
|
||||
_executor.compile(loss, logits, labels)
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# ============================================================================
|
||||
""" test lstm """
|
||||
import pytest
|
||||
|
||||
import mindspore.context as context
|
||||
from mindspore import nn
|
||||
from ..ut_filter import run_on_gpu
|
||||
|
@ -22,6 +23,7 @@ from ....ops_common import convert
|
|||
|
||||
class LstmTestNet(nn.Cell):
|
||||
""" LstmTestNet definition """
|
||||
|
||||
def __init__(self, input_size, hidden_size, num_layers, has_bias, batch_first, bidirectional):
|
||||
super(LstmTestNet, self).__init__()
|
||||
self.lstm = nn.LSTM(input_size=input_size,
|
||||
|
@ -32,7 +34,6 @@ class LstmTestNet(nn.Cell):
|
|||
bidirectional=bidirectional,
|
||||
dropout=0.0)
|
||||
|
||||
|
||||
def construct(self, inp, h0, c0):
|
||||
return self.lstm(inp, (h0, c0))
|
||||
|
||||
|
@ -86,6 +87,7 @@ def test_compile(args):
|
|||
out = net(*inputs)
|
||||
print(f"out: {out}")
|
||||
|
||||
|
||||
@run_on_gpu
|
||||
@pytest.mark.parametrize('args', test_case_cell_ops, ids=lambda x: x[0])
|
||||
def test_execute(args):
|
||||
|
|
|
@ -14,10 +14,11 @@
|
|||
# ============================================================================
|
||||
""" test nn embedding """
|
||||
import numpy as np
|
||||
|
||||
from mindspore import Tensor
|
||||
from mindspore.common import dtype
|
||||
from mindspore.nn import Embedding
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.nn import Embedding
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
|
||||
|
@ -41,6 +42,7 @@ def test_check_embedding_3():
|
|||
input_data = Tensor(np.ones([8, 128]), dtype.int32)
|
||||
_executor.compile(net, input_data)
|
||||
|
||||
|
||||
@non_graph_engine
|
||||
def test_print_embedding():
|
||||
net = Embedding(20000, 768, False)
|
||||
|
|
|
@ -13,13 +13,12 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test nn pad """
|
||||
from mindspore import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
import mindspore.nn as nn
|
||||
from mindspore.ops.composite import GradOperation
|
||||
from mindspore.common.api import ms_function
|
||||
import numpy as np
|
||||
import mindspore.context as context
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import ms_function
|
||||
from mindspore.ops.composite import GradOperation
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# ============================================================================
|
||||
""" test norm """
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
|
|
|
@ -15,10 +15,11 @@
|
|||
""" test parameter """
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from mindspore import Tensor, Parameter, ParameterTuple
|
||||
from mindspore._checkparam import _check_str_by_regular
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.initializer import initializer
|
||||
from mindspore._checkparam import _check_str_by_regular
|
||||
|
||||
|
||||
def test_parameter_init():
|
||||
|
@ -30,7 +31,7 @@ def test_parameter_init():
|
|||
def test_parameter_tuple_illegal():
|
||||
p1 = Parameter(initializer(0, [1], mstype.int32), name="global_step1")
|
||||
p2 = Parameter(initializer(0, [1], mstype.int32), name="global_step2")
|
||||
plist = [p1,p2]
|
||||
plist = [p1, p2]
|
||||
plist2 = [p1, "str"]
|
||||
ptuple = (p1, p2)
|
||||
ptuple_str = ("2", "1")
|
||||
|
@ -100,21 +101,21 @@ def test_parameter_init_illegal():
|
|||
with pytest.raises(TypeError):
|
||||
Parameter(tensor, name=data_str, requires_grad=data_tuple)
|
||||
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_bool)
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_bool)
|
||||
with pytest.raises(TypeError):
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=dat)
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=dat)
|
||||
with pytest.raises(TypeError):
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=tensor)
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=tensor)
|
||||
with pytest.raises(TypeError):
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_none)
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_none)
|
||||
with pytest.raises(TypeError):
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_str)
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_str)
|
||||
with pytest.raises(TypeError):
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_int)
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_int)
|
||||
with pytest.raises(TypeError):
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_list)
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_list)
|
||||
with pytest.raises(TypeError):
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool,layerwise_parallel=data_tuple)
|
||||
Parameter(tensor, name=data_str, requires_grad=data_bool, layerwise_parallel=data_tuple)
|
||||
|
||||
|
||||
def test_check_str_by_regular():
|
||||
|
|
|
@ -16,9 +16,10 @@
|
|||
test pooling api
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
|
||||
|
||||
class AvgNet(nn.Cell):
|
||||
|
@ -40,6 +41,7 @@ def test_compile_avg():
|
|||
|
||||
class MaxNet(nn.Cell):
|
||||
""" MaxNet definition """
|
||||
|
||||
def __init__(self,
|
||||
kernel_size,
|
||||
stride=None,
|
||||
|
@ -68,6 +70,7 @@ class Avg1dNet(nn.Cell):
|
|||
def construct(self, x):
|
||||
return self.avg1d(x)
|
||||
|
||||
|
||||
def test_avg1d():
|
||||
net = Avg1dNet(6, 1)
|
||||
input = Tensor(np.random.randint(0, 255, [1, 3, 6]).astype(np.float32))
|
||||
|
|
|
@ -17,11 +17,11 @@ test psnr
|
|||
"""
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore import Tensor
|
||||
|
||||
|
||||
|
||||
class PSNRNet(nn.Cell):
|
||||
|
@ -40,6 +40,7 @@ def test_compile_psnr():
|
|||
img2 = Tensor(np.random.random((8, 3, 16, 16)))
|
||||
_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_compile_psnr_grayscale():
|
||||
max_val = 255
|
||||
net = PSNRNet(max_val)
|
||||
|
@ -47,21 +48,25 @@ def test_compile_psnr_grayscale():
|
|||
img2 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8))
|
||||
_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_psnr_max_val_negative():
|
||||
max_val = -1
|
||||
with pytest.raises(ValueError):
|
||||
net = PSNRNet(max_val)
|
||||
|
||||
|
||||
def test_psnr_max_val_bool():
|
||||
max_val = True
|
||||
with pytest.raises(TypeError):
|
||||
net = PSNRNet(max_val)
|
||||
|
||||
|
||||
def test_psnr_max_val_zero():
|
||||
max_val = 0
|
||||
with pytest.raises(ValueError):
|
||||
net = PSNRNet(max_val)
|
||||
|
||||
|
||||
def test_psnr_different_shape():
|
||||
shape_1 = (8, 3, 16, 16)
|
||||
shape_2 = (8, 3, 8, 8)
|
||||
|
@ -71,6 +76,7 @@ def test_psnr_different_shape():
|
|||
with pytest.raises(ValueError):
|
||||
_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_psnr_different_dtype():
|
||||
dtype_1 = mstype.float32
|
||||
dtype_2 = mstype.float16
|
||||
|
@ -80,6 +86,7 @@ def test_psnr_different_dtype():
|
|||
with pytest.raises(TypeError):
|
||||
_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_psnr_invalid_5d_input():
|
||||
shape_1 = (8, 3, 16, 16)
|
||||
shape_2 = (8, 3, 8, 8)
|
||||
|
|
|
@ -17,10 +17,11 @@ test ssim
|
|||
"""
|
||||
import numpy as np
|
||||
import pytest
|
||||
import mindspore.nn as nn
|
||||
|
||||
import mindspore.common.dtype as mstype
|
||||
from mindspore.common.api import _executor
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common.api import _executor
|
||||
|
||||
|
||||
class SSIMNet(nn.Cell):
|
||||
|
@ -38,44 +39,53 @@ def test_compile():
|
|||
img2 = Tensor(np.random.random((8, 3, 16, 16)))
|
||||
_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_compile_grayscale():
|
||||
max_val = 255
|
||||
net = SSIMNet(max_val = max_val)
|
||||
net = SSIMNet(max_val=max_val)
|
||||
img1 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8))
|
||||
img2 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8))
|
||||
_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_ssim_max_val_negative():
|
||||
max_val = -1
|
||||
with pytest.raises(ValueError):
|
||||
net = SSIMNet(max_val)
|
||||
|
||||
|
||||
def test_ssim_max_val_bool():
|
||||
max_val = True
|
||||
with pytest.raises(TypeError):
|
||||
net = SSIMNet(max_val)
|
||||
|
||||
|
||||
def test_ssim_max_val_zero():
|
||||
max_val = 0
|
||||
with pytest.raises(ValueError):
|
||||
net = SSIMNet(max_val)
|
||||
|
||||
|
||||
def test_ssim_filter_size_float():
|
||||
with pytest.raises(TypeError):
|
||||
net = SSIMNet(filter_size=1.1)
|
||||
|
||||
|
||||
def test_ssim_filter_size_zero():
|
||||
with pytest.raises(ValueError):
|
||||
net = SSIMNet(filter_size=0)
|
||||
|
||||
|
||||
def test_ssim_filter_sigma_zero():
|
||||
with pytest.raises(ValueError):
|
||||
net = SSIMNet(filter_sigma=0.0)
|
||||
|
||||
|
||||
def test_ssim_filter_sigma_negative():
|
||||
with pytest.raises(ValueError):
|
||||
net = SSIMNet(filter_sigma=-0.1)
|
||||
|
||||
|
||||
def test_ssim_k1_k2_wrong_value():
|
||||
with pytest.raises(ValueError):
|
||||
net = SSIMNet(k1=1.1)
|
||||
|
@ -95,6 +105,7 @@ def test_ssim_k1_k2_wrong_value():
|
|||
with pytest.raises(ValueError):
|
||||
net = SSIMNet(k2=-1.0)
|
||||
|
||||
|
||||
def test_ssim_different_shape():
|
||||
shape_1 = (8, 3, 16, 16)
|
||||
shape_2 = (8, 3, 8, 8)
|
||||
|
@ -104,6 +115,7 @@ def test_ssim_different_shape():
|
|||
with pytest.raises(ValueError):
|
||||
_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_ssim_different_dtype():
|
||||
dtype_1 = mstype.float32
|
||||
dtype_2 = mstype.float16
|
||||
|
@ -113,6 +125,7 @@ def test_ssim_different_dtype():
|
|||
with pytest.raises(TypeError):
|
||||
_executor.compile(net, img1, img2)
|
||||
|
||||
|
||||
def test_ssim_invalid_5d_input():
|
||||
shape_1 = (8, 3, 16, 16)
|
||||
shape_2 = (8, 3, 8, 8)
|
||||
|
|
|
@ -17,9 +17,9 @@ test_structure_output
|
|||
"""
|
||||
import numpy as np
|
||||
|
||||
import mindspore.ops.operations as P
|
||||
from mindspore import Tensor, context
|
||||
from mindspore.nn import Cell
|
||||
import mindspore.ops.operations as P
|
||||
from mindspore.ops.functional import depend
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
|
|
@ -13,4 +13,3 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" init vm impl """
|
||||
from ....vm_impl import vm
|
||||
|
|
|
@ -13,27 +13,26 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
import mindspore.nn as nn
|
||||
import pytest
|
||||
from mindspore import context
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.nn.wrap.cell_wrapper import WithLossCell
|
||||
from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager
|
||||
from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore.ops import functional as F
|
||||
from mindspore import context
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.train import Model
|
||||
from ....dataset_mock import MindData
|
||||
from mindspore.nn.optim import Lamb
|
||||
from mindspore.ops._utils import _get_broadcast_shape
|
||||
from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, prim_attr_register
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore.nn.wrap.cell_wrapper import WithLossCell
|
||||
from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell
|
||||
from mindspore.ops import functional as F
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.ops._grad.grad_base import bprop_getters
|
||||
from mindspore.ops._grad.grad_math_ops import binop_grad_common
|
||||
from mindspore.ops._utils import _get_broadcast_shape
|
||||
from mindspore.ops.primitive import PrimitiveWithInfer, prim_attr_register
|
||||
from mindspore.train.loss_scale_manager import DynamicLossScaleManager
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class MockNeg(PrimitiveWithInfer):
|
||||
@prim_attr_register
|
||||
def __init__(self):
|
||||
|
@ -47,6 +46,7 @@ class MockNeg(PrimitiveWithInfer):
|
|||
raise TypeError("InferError")
|
||||
return input_x
|
||||
|
||||
|
||||
class MockSub(PrimitiveWithInfer):
|
||||
@prim_attr_register
|
||||
def __init__(self):
|
||||
|
@ -59,6 +59,7 @@ class MockSub(PrimitiveWithInfer):
|
|||
def infer_dtype(self, x_dtype, y_dtype):
|
||||
return x_dtype
|
||||
|
||||
|
||||
@bprop_getters.register(MockSub)
|
||||
def get_bprop_mock_sub(self):
|
||||
"""Grad definition for `MockSub` operation."""
|
||||
|
@ -66,8 +67,10 @@ def get_bprop_mock_sub(self):
|
|||
|
||||
def bprop(x, y, out, dout):
|
||||
return binop_grad_common(x, y, dout, neg_func(dout))
|
||||
|
||||
return bprop
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
def __init__(self, in_features, out_features):
|
||||
super(Net, self).__init__()
|
||||
|
@ -80,6 +83,7 @@ class Net(nn.Cell):
|
|||
output = self.add(self.matmul(input, self.weight), self.bias)
|
||||
return output
|
||||
|
||||
|
||||
class NetFP16(nn.Cell):
|
||||
def __init__(self, in_features, out_features):
|
||||
super(NetFP16, self).__init__()
|
||||
|
@ -90,16 +94,19 @@ class NetFP16(nn.Cell):
|
|||
self.cast = P.Cast()
|
||||
|
||||
def construct(self, input):
|
||||
output = self.cast(self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)),
|
||||
output = self.cast(
|
||||
self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)),
|
||||
self.cast(self.bias, mstype.float16)), mstype.float32)
|
||||
return output
|
||||
|
||||
|
||||
def get_axis(x):
|
||||
shape = F.shape(x)
|
||||
length = F.tuple_len(shape)
|
||||
perm = F.make_range(0, length)
|
||||
return perm
|
||||
|
||||
|
||||
class MSELoss(nn.Cell):
|
||||
def __init__(self):
|
||||
super(MSELoss, self).__init__()
|
||||
|
@ -107,17 +114,21 @@ class MSELoss(nn.Cell):
|
|||
self.square = P.Square()
|
||||
self.reduce_mean = P.ReduceMean()
|
||||
self.sub = MockSub()
|
||||
|
||||
def construct(self, data, label):
|
||||
diff = self.sub(data, label)
|
||||
return self.reduce_mean(self.square(diff), get_axis(diff))
|
||||
|
||||
|
||||
class NegCell(nn.Cell):
|
||||
def __init__(self):
|
||||
super(NegCell, self).__init__()
|
||||
self.neg = MockNeg()
|
||||
|
||||
def construct(self, x):
|
||||
return self.neg(x)
|
||||
|
||||
|
||||
class Net3(nn.Cell):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
@ -146,6 +157,7 @@ class SequenceNet(nn.Cell):
|
|||
x = self.seq(x) + bbb
|
||||
return x
|
||||
|
||||
|
||||
def test_sequential_resolve_error():
|
||||
input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
|
||||
input_me = Tensor(input_np)
|
||||
|
@ -153,6 +165,7 @@ def test_sequential_resolve_error():
|
|||
with pytest.raises(RuntimeError) as e:
|
||||
net(input_me)
|
||||
|
||||
|
||||
def test_compile_grad_error():
|
||||
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
|
||||
label = Tensor(np.zeros([16, 16]).astype(np.float32))
|
||||
|
@ -164,9 +177,8 @@ def test_compile_grad_error():
|
|||
net_with_loss = WithLossCell(net, loss)
|
||||
scale_manager = DynamicLossScaleManager()
|
||||
update_cell = scale_manager.get_update_cell()
|
||||
train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell)
|
||||
train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell)
|
||||
train_network.set_train()
|
||||
with pytest.raises(TypeError) as e:
|
||||
train_network(inputs, label)
|
||||
print (e)
|
||||
|
||||
print(e)
|
||||
|
|
|
@ -13,19 +13,20 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import context
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore import context
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.nn.optim import Lamb
|
||||
from mindspore.nn.optim import Momentum, Adam
|
||||
from mindspore.nn.wrap.cell_wrapper import WithLossCell
|
||||
from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell
|
||||
from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.nn.optim import Momentum, Adam
|
||||
from mindspore.ops import functional as F
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.train import Model
|
||||
from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager
|
||||
from ....dataset_mock import MindData
|
||||
from mindspore.nn.optim import Lamb
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
@ -36,6 +37,7 @@ class MindDataSet(MindData):
|
|||
np_types=dataset_types,
|
||||
output_shapes=dataset_shapes,
|
||||
input_indexs=(0, 1))
|
||||
|
||||
def __next__(self):
|
||||
if self._size < self._iter_num:
|
||||
raise StopIteration
|
||||
|
@ -45,6 +47,7 @@ class MindDataSet(MindData):
|
|||
next.append(Tensor(np.ones(shape).astype(type)))
|
||||
return tuple(next)
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
def __init__(self, in_features, out_features):
|
||||
super(Net, self).__init__()
|
||||
|
@ -57,6 +60,7 @@ class Net(nn.Cell):
|
|||
output = self.add(self.matmul(input, self.weight), self.bias)
|
||||
return output
|
||||
|
||||
|
||||
class NetFP16(nn.Cell):
|
||||
def __init__(self, in_features, out_features):
|
||||
super(NetFP16, self).__init__()
|
||||
|
@ -67,10 +71,12 @@ class NetFP16(nn.Cell):
|
|||
self.cast = P.Cast()
|
||||
|
||||
def construct(self, input):
|
||||
output = self.cast(self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)),
|
||||
output = self.cast(
|
||||
self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)),
|
||||
self.cast(self.bias, mstype.float16)), mstype.float32)
|
||||
return output
|
||||
|
||||
|
||||
def get_axis(x):
|
||||
shape_op = P.Shape()
|
||||
shape = shape_op(x)
|
||||
|
@ -78,6 +84,7 @@ def get_axis(x):
|
|||
perm = F.make_range(0, length)
|
||||
return perm
|
||||
|
||||
|
||||
class MSELoss(nn.Cell):
|
||||
def __init__(self):
|
||||
super(MSELoss, self).__init__()
|
||||
|
@ -89,6 +96,7 @@ class MSELoss(nn.Cell):
|
|||
diff = data - label
|
||||
return self.reduce_mean(self.square(diff), get_axis(diff))
|
||||
|
||||
|
||||
def test_momentum_compile():
|
||||
inputs = Tensor(np.ones([15, 1]).astype(np.float32))
|
||||
label = Tensor(np.zeros([15, 1]).astype(np.float32))
|
||||
|
@ -104,6 +112,7 @@ def test_momentum_compile():
|
|||
output = train_network(inputs, label, scaling_sens)
|
||||
print("the result is ", output)
|
||||
|
||||
|
||||
def test_compile_fp16_not_overflow():
|
||||
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
|
||||
label = Tensor(np.zeros([16, 16]).astype(np.float32))
|
||||
|
@ -119,6 +128,7 @@ def test_compile_fp16_not_overflow():
|
|||
output = train_network(inputs, label, scaling_sens)
|
||||
print("the result is ", output)
|
||||
|
||||
|
||||
def test_compile_fp16_lr_overflow():
|
||||
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
|
||||
label = Tensor(np.zeros([16, 16]).astype(np.float32))
|
||||
|
@ -134,6 +144,7 @@ def test_compile_fp16_lr_overflow():
|
|||
output = train_network(inputs, label, scaling_sens)
|
||||
print("the result is ", output)
|
||||
|
||||
|
||||
def test_compile_fp16_overflow():
|
||||
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
|
||||
label = Tensor(np.zeros([16, 16]).astype(np.float32))
|
||||
|
@ -148,6 +159,7 @@ def test_compile_fp16_overflow():
|
|||
output = train_network(inputs, label, scaling_sens)
|
||||
print("the result is ", output)
|
||||
|
||||
|
||||
def test_compile_fp16_lr_overflow_with_lossscale_update():
|
||||
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
|
||||
label = Tensor(np.zeros([16, 16]).astype(np.float32))
|
||||
|
@ -165,6 +177,7 @@ def test_compile_fp16_lr_overflow_with_lossscale_update():
|
|||
output = train_network(inputs, label, scaling_sens)
|
||||
print("the result is ", output)
|
||||
|
||||
|
||||
def test_compile_f16_model_train():
|
||||
dataset_types = (np.float32, np.float32)
|
||||
dataset_shapes = ((16, 16), (16, 16))
|
||||
|
@ -205,11 +218,12 @@ def test_compile_fp16_lr_overflow_fixed_feed():
|
|||
net_with_loss = WithLossCell(net, loss)
|
||||
scale_manager = FixedLossScaleManager()
|
||||
update_cell = scale_manager.get_update_cell()
|
||||
train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell)
|
||||
train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell)
|
||||
train_network.set_train()
|
||||
output = train_network(inputs, label, scaling_sens)
|
||||
print("the result is ", output)
|
||||
|
||||
|
||||
def test_compile_fp16_lr_overflow_dynamic_feed():
|
||||
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
|
||||
label = Tensor(np.zeros([16, 16]).astype(np.float32))
|
||||
|
@ -222,11 +236,12 @@ def test_compile_fp16_lr_overflow_dynamic_feed():
|
|||
net_with_loss = WithLossCell(net, loss)
|
||||
scale_manager = DynamicLossScaleManager()
|
||||
update_cell = scale_manager.get_update_cell()
|
||||
train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell)
|
||||
train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell)
|
||||
train_network.set_train()
|
||||
output = train_network(inputs, label, scaling_sens)
|
||||
print("the result is ", output)
|
||||
|
||||
|
||||
def test_compile_fp16_lr_overflow_fixed_graph():
|
||||
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
|
||||
label = Tensor(np.zeros([16, 16]).astype(np.float32))
|
||||
|
@ -238,11 +253,12 @@ def test_compile_fp16_lr_overflow_fixed_graph():
|
|||
net_with_loss = WithLossCell(net, loss)
|
||||
scale_manager = FixedLossScaleManager(drop_overflow_update=True)
|
||||
update_cell = scale_manager.get_update_cell()
|
||||
train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell)
|
||||
train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell)
|
||||
train_network.set_train()
|
||||
output = train_network(inputs, label)
|
||||
print("the result is ", output)
|
||||
|
||||
|
||||
def test_compile_fp16_lr_overflow_dynamic_graph():
|
||||
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
|
||||
label = Tensor(np.zeros([16, 16]).astype(np.float32))
|
||||
|
@ -254,11 +270,12 @@ def test_compile_fp16_lr_overflow_dynamic_graph():
|
|||
net_with_loss = WithLossCell(net, loss)
|
||||
scale_manager = DynamicLossScaleManager()
|
||||
update_cell = scale_manager.get_update_cell()
|
||||
train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell)
|
||||
train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=update_cell)
|
||||
train_network.set_train()
|
||||
output = train_network(inputs, label)
|
||||
print("the result is ", output)
|
||||
|
||||
|
||||
def test_adam_compile():
|
||||
inputs = Tensor(np.ones([15, 1]).astype(np.float32))
|
||||
label = Tensor(np.zeros([15, 1]).astype(np.float32))
|
||||
|
|
|
@ -14,20 +14,22 @@
|
|||
# ============================================================================
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.common.dtype as mstype
|
||||
import mindspore.nn as nn
|
||||
from mindspore.nn.optim import Momentum, SGD, RMSProp, Adam
|
||||
from mindspore import context
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim import Momentum, SGD, RMSProp, Adam
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
||||
class LeNet5(nn.Cell):
|
||||
""" LeNet5 definition """
|
||||
|
||||
def __init__(self):
|
||||
super(LeNet5, self).__init__()
|
||||
self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid')
|
||||
|
|
|
@ -13,16 +13,17 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
|
||||
import mindspore.context as context
|
||||
import mindspore.ops.composite as C
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.nn import Cell
|
||||
from mindspore.ops import operations as P
|
||||
import mindspore.ops.composite as C
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
|
||||
|
||||
def test_parser_three_default_mixed_args_subnet():
|
||||
|
||||
def test_parser_three_default_mixed_args_subnet():
|
||||
class SubNetDefaultMixedArgs(Cell):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
@ -55,7 +56,7 @@ def test_net_vararg_kwonlyarg_kwarg():
|
|||
super(FirstNet, self).__init__()
|
||||
self.net = SecondNet()
|
||||
|
||||
def construct(self, x=1, z=2+2+4, y=3):
|
||||
def construct(self, x=1, z=2 + 2 + 4, y=3):
|
||||
c = self.net(22, 33, x, y, z, 2, 3, 4, 5, key1=10, key2=20, key3=30, key4=40)
|
||||
return c
|
||||
|
||||
|
@ -74,13 +75,14 @@ def test_net_vararg_kwonlyarg_kwarg():
|
|||
net = FirstNet()
|
||||
net()
|
||||
|
||||
|
||||
def test_net_vararg_normal_input():
|
||||
class FirstNet(Cell):
|
||||
def __init__(self):
|
||||
super(FirstNet, self).__init__()
|
||||
self.net = SecondNet()
|
||||
|
||||
def construct(self, x=1, z=2+2+4, y=3):
|
||||
def construct(self, x=1, z=2 + 2 + 4, y=3):
|
||||
c = self.net(22, 33, x, y, z, 2, 3, 4, 5, key1=10, key2=20, key3=30, key4=40)
|
||||
return c
|
||||
|
||||
|
@ -95,10 +97,12 @@ def test_net_vararg_normal_input():
|
|||
d = var[0] * var[1] * var[2] * var[3]
|
||||
e = key1 - key2 - kwargs["key3"] + kwargs["key4"]
|
||||
return a + b + c + d + e
|
||||
|
||||
x = Tensor(np.ones((2, 3, 4), np.int32))
|
||||
net = FirstNet()
|
||||
net(x, x, x)
|
||||
|
||||
|
||||
def test_prim_vararg_kwonlyarg():
|
||||
class FirstNet(Cell):
|
||||
def __init__(self):
|
||||
|
@ -201,9 +205,11 @@ def test_net_variable_and_weights():
|
|||
z = Tensor(np.ones((4,), np.float32))
|
||||
net(x, y, z)
|
||||
|
||||
|
||||
def test_net_vargs_expand():
|
||||
class InputBackward(Cell):
|
||||
""" InputBackward definition """
|
||||
|
||||
def __init__(self, network, c1=None, c2=None):
|
||||
super(InputBackward, self).__init__()
|
||||
self.network = network
|
||||
|
@ -214,9 +220,11 @@ def test_net_vargs_expand():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return self.grad(self.network)(*inputs)
|
||||
|
||||
class AddNet(Cell):
|
||||
def __init__(self):
|
||||
super(AddNet, self).__init__()
|
||||
|
||||
def construct(self, x, y):
|
||||
return x + y
|
||||
|
||||
|
@ -237,6 +245,7 @@ def test_mixed_precision_const_parameter():
|
|||
self.up_sample1 = P.ResizeBilinear((14, 14))
|
||||
self.up_sample2 = P.ResizeBilinear((28, 28))
|
||||
self.up_sample3 = P.ResizeBilinear((36, 36))
|
||||
|
||||
def construct(self, x, y, z, *args):
|
||||
ret = 0
|
||||
if args[0] == self.shape(z)[2]:
|
||||
|
@ -250,16 +259,19 @@ def test_mixed_precision_const_parameter():
|
|||
ret = x * y
|
||||
ret = ret * z
|
||||
return ret
|
||||
|
||||
class NetMain(Cell):
|
||||
def __init__(self, loss_fn):
|
||||
super(NetMain, self).__init__()
|
||||
self.loss_fn = loss_fn
|
||||
self.shape = P.Shape()
|
||||
|
||||
def construct(self, x, y, z):
|
||||
size_x = self.shape(x)[2]
|
||||
size_y = self.shape(y)[2]
|
||||
ret = self.loss_fn(x, y, z, size_x, size_y)
|
||||
return ret
|
||||
|
||||
loss_fn = NetLoss()
|
||||
net = NetMain(loss_fn)
|
||||
net.add_flags_recursive(fp32=True)
|
||||
|
|
|
@ -13,14 +13,14 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
from mindspore import context
|
||||
|
||||
import mindspore.ops.composite as C
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore import context
|
||||
from mindspore.common import dtype as mstype
|
||||
from mindspore.common.parameter import ParameterTuple
|
||||
from mindspore.nn import Cell
|
||||
from mindspore.ops import operations as P
|
||||
import mindspore.ops.composite as C
|
||||
from mindspore.common.api import _executor
|
||||
from mindspore.common.parameter import ParameterTuple
|
||||
from mindspore.common import dtype as mstype
|
||||
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
||||
|
@ -34,6 +34,7 @@ def test_net_vargs_expand():
|
|||
|
||||
def construct(self, x, y):
|
||||
return x + y
|
||||
|
||||
x = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32))
|
||||
y = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32))
|
||||
sens = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32))
|
||||
|
@ -51,7 +52,7 @@ class VarNet(Cell):
|
|||
self.net = net
|
||||
|
||||
def construct(self, *args):
|
||||
return self.net(*args)*self.w + self.b
|
||||
return self.net(*args) * self.w + self.b
|
||||
|
||||
|
||||
class SecondNet(Cell):
|
||||
|
@ -95,6 +96,7 @@ class Bprop(Cell):
|
|||
|
||||
def test_all_var_args_grad_with_sens():
|
||||
""""test grad_by_list_with_sens with all var args input"""
|
||||
|
||||
class GradNet(Cell):
|
||||
def __init__(self, net):
|
||||
super(GradNet, self).__init__()
|
||||
|
@ -103,6 +105,7 @@ def test_all_var_args_grad_with_sens():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return C.grad_by_list_with_sens(self.net, self.weights)(*inputs)
|
||||
|
||||
x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
sens = Tensor(1.0, dtype=mstype.float32)
|
||||
|
@ -120,6 +123,7 @@ def test_grad_list_var_args():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return C.grad_by_list(self.net, self.weights)(*inputs)
|
||||
|
||||
x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
net = VarNet(SecondNet())
|
||||
|
@ -136,6 +140,7 @@ def test_grad_all_var_args():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return C.grad_all(self.net)(*inputs)
|
||||
|
||||
x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
net = VarNet(SecondNet())
|
||||
|
@ -152,6 +157,7 @@ def test_grad_all_var_args_with_sens():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return C.grad_all_with_sens(self.net)(*inputs)
|
||||
|
||||
x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
sens = Tensor(1.0, dtype=mstype.float32)
|
||||
|
@ -169,6 +175,7 @@ def test_grad_var_args_with_sens():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return C.grad_with_sens(self.net)(*inputs)
|
||||
|
||||
x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
sens = Tensor(1.0, dtype=mstype.float32)
|
||||
|
@ -206,6 +213,7 @@ def test_var_args_grad():
|
|||
|
||||
def construct(self, x, y, sens):
|
||||
return C.grad_by_list_with_sens(self.net, self.weights)(x, y, sens)
|
||||
|
||||
x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
sens = Tensor(1.0, dtype=mstype.float32)
|
||||
|
@ -216,13 +224,14 @@ def test_var_args_grad():
|
|||
|
||||
def test_var_args_positional():
|
||||
""""test grad_all with var args in inner graph"""
|
||||
|
||||
class VarNet(Cell):
|
||||
def __init__(self, net):
|
||||
super(VarNet, self).__init__()
|
||||
self.net = net
|
||||
|
||||
def construct(self, x, y):
|
||||
return self.net(x, y)*x
|
||||
return self.net(x, y) * x
|
||||
|
||||
class SecondNet(Cell):
|
||||
def __init__(self):
|
||||
|
@ -239,6 +248,7 @@ def test_var_args_positional():
|
|||
|
||||
def construct(self, x, y):
|
||||
return C.grad_all(self.net)(x, y)
|
||||
|
||||
x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
net = VarNet(SecondNet())
|
||||
|
@ -258,6 +268,7 @@ def test_grad_within_if_else():
|
|||
|
||||
def construct(self, *inputs):
|
||||
return self.grad(*inputs)
|
||||
|
||||
x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)
|
||||
sens = Tensor(1.0, dtype=mstype.float32)
|
||||
|
@ -309,6 +320,7 @@ def test_grad_for_concat():
|
|||
|
||||
def grad_cmp(self):
|
||||
input_grad_mindspore = self.grad_mindspore_impl()
|
||||
|
||||
fact = ConcatFactory(input_shape=(
|
||||
(2, 184320, 1), (2, 46080, 1), (2, 11520, 1), (2, 2880, 1), (2, 720, 1)), axis=1)
|
||||
fact.grad_cmp()
|
||||
|
|
|
@ -15,12 +15,13 @@
|
|||
"""
|
||||
log test
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import re
|
||||
import shutil
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def test_log_stdout():
|
||||
# Clean up environment variables
|
||||
|
@ -50,8 +51,8 @@ def test_log_setlevel():
|
|||
_rm_env_config()
|
||||
os.environ['GLOG_v'] = '0'
|
||||
from mindspore import log as logger
|
||||
#logger_instance = logger._get_logger()
|
||||
#del logger_instance
|
||||
# logger_instance = logger._get_logger()
|
||||
# del logger_instance
|
||||
loglevel = logger.get_level()
|
||||
log_str = 'print debug informations'
|
||||
logger.debug("5 test log message debug:%s", log_str)
|
||||
|
@ -87,7 +88,7 @@ def test_log_file():
|
|||
"\[.*:.*[0-9]\] test log message warning"
|
||||
match_obj = re.match(pattern, result)
|
||||
|
||||
#Clear test file
|
||||
# Clear test file
|
||||
if os.path.exists(file_path):
|
||||
shutil.rmtree(file_path)
|
||||
|
||||
|
@ -100,7 +101,7 @@ def test_log_backup_count():
|
|||
"""
|
||||
test backup count
|
||||
"""
|
||||
#logger.reset_log_config(level=logging.INFO, console=False,
|
||||
# logger.reset_log_config(level=logging.INFO, console=False,
|
||||
# filepath=file_path, maxBytes=1000, backupCount=10)
|
||||
_rm_env_config()
|
||||
file_path = '/tmp/log/mindspore_test'
|
||||
|
@ -236,7 +237,6 @@ def test_log_repeated_print():
|
|||
logger._global_logger = None
|
||||
|
||||
|
||||
|
||||
def test_log_getconfig():
|
||||
_rm_env_config()
|
||||
os.environ['GLOG_v'] = '3'
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
"""setup for pytest"""
|
||||
import mindspore.context as context
|
||||
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def setup_module(module):
|
||||
context.set_context(mode=context.GRAPH_MODE)
|
||||
|
|
|
@ -13,15 +13,16 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
""" test_graph_summary """
|
||||
import os
|
||||
import logging
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore import Model, context
|
||||
from mindspore.train.summary.summary_record import SummaryRecord
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore.train.callback import SummaryStep
|
||||
from mindspore.train.summary.summary_record import SummaryRecord
|
||||
from .....dataset_mock import MindData
|
||||
|
||||
CUR_DIR = os.getcwd()
|
||||
|
|
|
@ -21,8 +21,8 @@ import tempfile
|
|||
import numpy as np
|
||||
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data
|
||||
from mindspore.train.summary._summary_adapter import _calc_histogram_bins
|
||||
from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data
|
||||
from .summary_reader import SummaryReader
|
||||
|
||||
CUR_DIR = os.getcwd()
|
||||
|
|
|
@ -18,16 +18,18 @@
|
|||
@Date : 2019-07-4
|
||||
@Desc : test summary function
|
||||
"""
|
||||
import os
|
||||
import logging
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.train.summary.summary_record import SummaryRecord, \
|
||||
_cache_summary_tensor_data
|
||||
from mindspore import Model, context
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore import Model, context
|
||||
from mindspore.train.callback import SummaryStep
|
||||
from mindspore.train.summary.summary_record import SummaryRecord, \
|
||||
_cache_summary_tensor_data
|
||||
from .....dataset_mock import MindData
|
||||
|
||||
CUR_DIR = os.getcwd()
|
||||
|
|
|
@ -18,16 +18,18 @@
|
|||
@Date : 2019-07-4
|
||||
@Desc : test summary function
|
||||
"""
|
||||
import os
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data
|
||||
from mindspore.train.callback import SummaryStep
|
||||
from mindspore.common.tensor import Tensor
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.train.callback import SummaryStep
|
||||
from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data
|
||||
|
||||
CUR_DIR = os.getcwd()
|
||||
SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/"
|
||||
|
@ -129,7 +131,8 @@ def test_scalar_summary_sample_with_shape_1():
|
|||
# Test: test with ge
|
||||
class SummaryDemo(nn.Cell):
|
||||
""" SummaryDemo definition """
|
||||
def __init__(self,):
|
||||
|
||||
def __init__(self, ):
|
||||
super(SummaryDemo, self).__init__()
|
||||
self.s = P.ScalarSummary()
|
||||
self.histogram_summary = P.HistogramSummary()
|
||||
|
@ -218,9 +221,9 @@ def test_validate():
|
|||
with pytest.raises(ValueError):
|
||||
sr.record(2.0)
|
||||
with pytest.raises(ValueError):
|
||||
sr.record((1,3))
|
||||
sr.record((1, 3))
|
||||
with pytest.raises(ValueError):
|
||||
sr.record([2,3])
|
||||
sr.record([2, 3])
|
||||
with pytest.raises(ValueError):
|
||||
sr.record("str")
|
||||
with pytest.raises(ValueError):
|
||||
|
@ -235,8 +238,8 @@ def test_validate():
|
|||
with pytest.raises(ValueError):
|
||||
SummaryStep(sr, "str")
|
||||
with pytest.raises(ValueError):
|
||||
SummaryStep(sr, (1,2))
|
||||
SummaryStep(sr, (1, 2))
|
||||
with pytest.raises(ValueError):
|
||||
SummaryStep(sr, [3,4])
|
||||
SummaryStep(sr, [3, 4])
|
||||
with pytest.raises(ValueError):
|
||||
SummaryStep(sr, sr)
|
||||
|
|
|
@ -18,11 +18,13 @@
|
|||
@Date : 2019-08-5
|
||||
@Desc : test summary function of abnormal input
|
||||
"""
|
||||
import os
|
||||
import logging
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from mindspore.train.summary.summary_record import SummaryRecord
|
||||
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.train.summary.summary_record import SummaryRecord
|
||||
|
||||
CUR_DIR = os.getcwd()
|
||||
SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/"
|
||||
|
@ -65,6 +67,7 @@ def test_summaryrecord_input_null_string():
|
|||
assert False
|
||||
log.debug("finished test_summaryrecord_input_null_string")
|
||||
|
||||
|
||||
def test_summaryrecord_input_None():
|
||||
log.debug("begin test_summaryrecord_input_None")
|
||||
# step 0: create the thread
|
||||
|
@ -76,6 +79,7 @@ def test_summaryrecord_input_None():
|
|||
assert False
|
||||
log.debug("finished test_summaryrecord_input_None")
|
||||
|
||||
|
||||
def test_summaryrecord_input_relative_dir_1():
|
||||
log.debug("begin test_summaryrecord_input_relative_dir_1")
|
||||
# step 0: create the thread
|
||||
|
|
|
@ -18,16 +18,17 @@
|
|||
@Date : 2019-08-5
|
||||
@Desc : test summary function of ops params valid check
|
||||
"""
|
||||
import os
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from mindspore.train.summary.summary_record import SummaryRecord
|
||||
from mindspore.common.tensor import Tensor
|
||||
import mindspore.nn as nn
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.train.summary.summary_record import SummaryRecord
|
||||
|
||||
CUR_DIR = os.getcwd()
|
||||
SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/"
|
||||
|
@ -38,6 +39,7 @@ log.setLevel(level=logging.ERROR)
|
|||
|
||||
class SummaryDemoTag(nn.Cell):
|
||||
""" SummaryDemoTag definition """
|
||||
|
||||
def __init__(self, tag1, tag2, tag3):
|
||||
super(SummaryDemoTag, self).__init__()
|
||||
self.s = P.ScalarSummary()
|
||||
|
@ -58,6 +60,7 @@ class SummaryDemoTag(nn.Cell):
|
|||
|
||||
class SummaryDemoTagForSet(nn.Cell):
|
||||
""" SummaryDemoTagForSet definition """
|
||||
|
||||
def __init__(self, tag_tuple):
|
||||
super(SummaryDemoTagForSet, self).__init__()
|
||||
self.s = P.ScalarSummary()
|
||||
|
@ -75,6 +78,7 @@ class SummaryDemoTagForSet(nn.Cell):
|
|||
|
||||
class SummaryDemoValue(nn.Cell):
|
||||
""" SummaryDemoValue definition """
|
||||
|
||||
def __init__(self, value):
|
||||
super(SummaryDemoValue, self).__init__()
|
||||
self.s = P.ScalarSummary()
|
||||
|
@ -88,8 +92,10 @@ class SummaryDemoValue(nn.Cell):
|
|||
self.s("y", self.v)
|
||||
return z
|
||||
|
||||
|
||||
class SummaryDemoValueForSet(nn.Cell):
|
||||
""" SummaryDemoValueForSet definition """
|
||||
|
||||
def __init__(self, value, tag_tuple):
|
||||
super(SummaryDemoValueForSet, self).__init__()
|
||||
self.s = P.ScalarSummary()
|
||||
|
@ -106,6 +112,7 @@ class SummaryDemoValueForSet(nn.Cell):
|
|||
|
||||
class HistogramSummaryNet(nn.Cell):
|
||||
"HistogramSummaryNet definition"
|
||||
|
||||
def __init__(self, value):
|
||||
self.histogram_summary = P.HistogramSummary()
|
||||
self.add = P.TensorAdd()
|
||||
|
@ -246,7 +253,7 @@ def test_histogram_summary_use_valid_value():
|
|||
"""Test histogram summary with valid value"""
|
||||
log.debug("Begin test_histogram_summary_use_valid_value")
|
||||
try:
|
||||
net = HistogramSummaryNet(Tensor(np.array([1,2,3])))
|
||||
net = HistogramSummaryNet(Tensor(np.array([1, 2, 3])))
|
||||
run_case(net)
|
||||
except:
|
||||
assert True
|
||||
|
|
|
@ -18,13 +18,15 @@
|
|||
@Date : 2019-07-4
|
||||
@Desc : test summary function
|
||||
"""
|
||||
import os
|
||||
import logging
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data
|
||||
from mindspore.common.tensor import Tensor
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.ops import operations as P
|
||||
from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data
|
||||
|
||||
CUR_DIR = os.getcwd()
|
||||
SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/"
|
||||
|
@ -93,7 +95,6 @@ def test_tensor_summary_sample():
|
|||
log.debug("finished test_tensor_summary_sample")
|
||||
|
||||
|
||||
|
||||
def get_test_data_check(step):
|
||||
""" get_test_data_check """
|
||||
test_data_list = []
|
||||
|
@ -111,7 +112,8 @@ def get_test_data_check(step):
|
|||
# Test: test with ge
|
||||
class SummaryDemo(nn.Cell):
|
||||
""" SummaryDemo definition """
|
||||
def __init__(self,):
|
||||
|
||||
def __init__(self, ):
|
||||
super(SummaryDemo, self).__init__()
|
||||
self.s = P.TensorSummary()
|
||||
self.add = P.TensorAdd()
|
||||
|
@ -123,6 +125,7 @@ class SummaryDemo(nn.Cell):
|
|||
self.s("y1", y)
|
||||
return z
|
||||
|
||||
|
||||
def test_tensor_summary_with_ge():
|
||||
""" test_tensor_summary_with_ge """
|
||||
log.debug("begin test_tensor_summary_with_ge")
|
||||
|
@ -140,7 +143,7 @@ def test_tensor_summary_with_ge():
|
|||
steps = 100
|
||||
for i in range(1, steps):
|
||||
x = Tensor(np.array([[i], [i]]).astype(np.float32))
|
||||
y = Tensor(np.array([[i+1], [i+1]]).astype(np.float32))
|
||||
y = Tensor(np.array([[i + 1], [i + 1]]).astype(np.float32))
|
||||
net(x, y)
|
||||
test_writer.record(i)
|
||||
|
||||
|
|
|
@ -15,12 +15,11 @@
|
|||
""" auto mixed precision """
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.context as context
|
||||
from mindspore import Tensor
|
||||
from mindspore import amp
|
||||
from mindspore import nn
|
||||
from mindspore import Tensor
|
||||
from mindspore.common import dtype as mstype
|
||||
import mindspore.context as context
|
||||
from mindspore.model_zoo.resnet import resnet50
|
||||
from mindspore.train import Model
|
||||
from ....dataset_mock import MindData
|
||||
|
||||
|
@ -96,6 +95,7 @@ class MindDataSet(MindData):
|
|||
np_types=dataset_types,
|
||||
output_shapes=dataset_shapes,
|
||||
input_indexs=(0, 1))
|
||||
|
||||
def __next__(self):
|
||||
if self._size < self._iter_num:
|
||||
raise StopIteration
|
||||
|
@ -122,6 +122,7 @@ def test_compile_model_train_O0():
|
|||
# not actual run, the metrics step will fail, check if compile ok.
|
||||
model.eval(dataset)
|
||||
|
||||
|
||||
def test_compile_model_train_O2():
|
||||
dataset_types = (np.float32, np.float32)
|
||||
dataset_shapes = ((16, 16), (16, 16))
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# ============================================================================
|
||||
""" test_run_config """
|
||||
import pytest
|
||||
|
||||
from mindspore.train.callback import CheckpointConfig
|
||||
|
||||
|
||||
|
|
|
@ -14,12 +14,14 @@
|
|||
# ============================================================================
|
||||
""" test_training """
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Model, context
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore import Model, context
|
||||
from mindspore.train.callback import SummaryStep
|
||||
from ..ut_filter import non_graph_engine
|
||||
from ....dataset_mock import MindData
|
||||
|
|
|
@ -19,12 +19,12 @@
|
|||
@Desc : test mindspore compile method
|
||||
"""
|
||||
import logging
|
||||
import numpy as np
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter, Model
|
||||
from mindspore.ops import operations as P
|
||||
from ..ut_filter import non_graph_engine
|
||||
|
||||
import numpy as np
|
||||
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor, Parameter
|
||||
from mindspore.ops import operations as P
|
||||
|
||||
log = logging.getLogger("test")
|
||||
log.setLevel(level=logging.ERROR)
|
||||
|
@ -104,6 +104,7 @@ class ResidualBlock(nn.Cell):
|
|||
|
||||
class ResNet(nn.Cell):
|
||||
""" ResNet definition """
|
||||
|
||||
def __init__(self, tensor):
|
||||
super(ResNet, self).__init__()
|
||||
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
|
||||
|
@ -118,6 +119,7 @@ class ResNet(nn.Cell):
|
|||
|
||||
class LeNet(nn.Cell):
|
||||
""" LeNet definition """
|
||||
|
||||
def __init__(self):
|
||||
super(LeNet, self).__init__()
|
||||
self.relu = nn.ReLU()
|
||||
|
@ -165,4 +167,3 @@ class Net(nn.Cell):
|
|||
|
||||
def construct(self, input_x):
|
||||
return self.softmax(input_x)
|
||||
|
||||
|
|
|
@ -15,19 +15,19 @@
|
|||
"""test callback function."""
|
||||
import os
|
||||
import stat
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore.nn as nn
|
||||
import mindspore.common.dtype as mstype
|
||||
from mindspore import context
|
||||
import mindspore.nn as nn
|
||||
from mindspore.common.api import ms_function
|
||||
from mindspore.common.tensor import Tensor
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore.nn import TrainOneStepCell, WithLossCell
|
||||
from mindspore.nn.optim import Momentum
|
||||
from mindspore.train.callback import ModelCheckpoint, _check_file_name_prefix, RunContext, _checkpoint_cb_for_save_op, \
|
||||
LossMonitor, _InternalCallbackParam, _chg_ckpt_file_name_if_same_exist, \
|
||||
_build_callbacks, CheckpointConfig, _set_cur_net
|
||||
from mindspore.common.api import ms_function
|
||||
|
||||
|
||||
class Net(nn.Cell):
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
""" test_checkparam """
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import mindspore
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Model, context
|
||||
|
@ -23,6 +24,7 @@ from mindspore.common.tensor import Tensor
|
|||
|
||||
class LeNet5(nn.Cell):
|
||||
""" LeNet5 definition """
|
||||
|
||||
def __init__(self):
|
||||
super(LeNet5, self).__init__()
|
||||
self.conv1 = nn.Conv2d(3, 6, 5, pad_mode="valid")
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue