forked from mindspore-Ecosystem/mindspore
fix pylint check issues
This commit is contained in:
parent
848d19207f
commit
f6c20178d2
|
@ -48,15 +48,15 @@ def test_argmax():
|
||||||
expect2 = np.array([1, 0, 0, 0]).astype(np.int32)
|
expect2 = np.array([1, 0, 0, 0]).astype(np.int32)
|
||||||
|
|
||||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
||||||
Argmax = NetArgmax()
|
argmax = NetArgmax()
|
||||||
output = Argmax(x)
|
output = argmax(x)
|
||||||
assert (output[0].asnumpy() == expect1).all()
|
assert (output[0].asnumpy() == expect1).all()
|
||||||
assert (output[1].asnumpy() == expect2).all()
|
assert (output[1].asnumpy() == expect2).all()
|
||||||
assert (output[2].asnumpy() == expect2).all()
|
assert (output[2].asnumpy() == expect2).all()
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
Argmax1 = NetArgmax()
|
argmax1 = NetArgmax()
|
||||||
output1 = Argmax(x)
|
output1 = argmax1(x)
|
||||||
assert (output1[0].asnumpy() == expect1).all()
|
assert (output1[0].asnumpy() == expect1).all()
|
||||||
assert (output1[1].asnumpy() == expect2).all()
|
assert (output1[1].asnumpy() == expect2).all()
|
||||||
assert (output1[2].asnumpy() == expect2).all()
|
assert (output1[2].asnumpy() == expect2).all()
|
||||||
|
|
|
@ -23,13 +23,13 @@ from mindspore.ops import operations as P
|
||||||
|
|
||||||
|
|
||||||
class Net(nn.Cell):
|
class Net(nn.Cell):
|
||||||
def __init__(self, value):
|
def __init__(self, param):
|
||||||
super(Net, self).__init__()
|
super(Net, self).__init__()
|
||||||
self.var = Parameter(value, name="var")
|
self.var = Parameter(param, name="var")
|
||||||
self.assign = P.Assign()
|
self.assign = P.Assign()
|
||||||
|
|
||||||
def construct(self, value):
|
def construct(self, param):
|
||||||
return self.assign(self.var, value)
|
return self.assign(self.var, param)
|
||||||
|
|
||||||
|
|
||||||
x = np.array([[1.2, 1], [1, 0]]).astype(np.float32)
|
x = np.array([[1.2, 1], [1, 0]]).astype(np.float32)
|
||||||
|
|
|
@ -20,9 +20,6 @@ import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.common import dtype as mstype
|
from mindspore.common import dtype as mstype
|
||||||
from mindspore.common.api import ms_function
|
|
||||||
from mindspore.common.initializer import initializer
|
|
||||||
from mindspore.common.parameter import Parameter
|
|
||||||
from mindspore.ops import operations as P
|
from mindspore.ops import operations as P
|
||||||
|
|
||||||
|
|
||||||
|
@ -38,7 +35,7 @@ class BatchMatMulNet(nn.Cell):
|
||||||
return self.batch_matmul(x, y)
|
return self.batch_matmul(x, y)
|
||||||
|
|
||||||
|
|
||||||
def test_4D():
|
def test_4d():
|
||||||
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
|
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
|
||||||
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
|
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
|
||||||
|
|
||||||
|
@ -60,7 +57,7 @@ def test_4D():
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_gpu_training
|
@pytest.mark.platform_x86_gpu_training
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_4D_transpose_a():
|
def test_4d_transpose_a():
|
||||||
input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
|
input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
|
||||||
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
|
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float32)
|
||||||
|
|
||||||
|
@ -82,7 +79,7 @@ def test_4D_transpose_a():
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_gpu_training
|
@pytest.mark.platform_x86_gpu_training
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_4D_transpose_b():
|
def test_4d_transpose_b():
|
||||||
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
|
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float32)
|
||||||
input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
|
input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
|
||||||
|
|
||||||
|
@ -104,7 +101,7 @@ def test_4D_transpose_b():
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
@pytest.mark.platform_x86_gpu_training
|
@pytest.mark.platform_x86_gpu_training
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_4D_transpose_ab():
|
def test_4d_transpose_ab():
|
||||||
input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
|
input_x = Tensor(np.arange(2 * 4 * 3 * 1).reshape(2, 4, 3, 1), mstype.float32)
|
||||||
input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
|
input_y = Tensor(np.arange(2 * 4 * 4 * 3).reshape(2, 4, 4, 3), mstype.float32)
|
||||||
|
|
||||||
|
@ -122,17 +119,7 @@ def test_4D_transpose_ab():
|
||||||
[[5612, 5810, 6008, 6206]]]]
|
[[5612, 5810, 6008, 6206]]]]
|
||||||
assert (output.asnumpy() == expect).all()
|
assert (output.asnumpy() == expect).all()
|
||||||
|
|
||||||
|
def test_4d_fp16():
|
||||||
class BatchMatMulNet(nn.Cell):
|
|
||||||
def __init__(self, transpose_a=False, transpose_b=False):
|
|
||||||
super(BatchMatMulNet, self).__init__()
|
|
||||||
self.batch_matmul = P.BatchMatMul(transpose_a, transpose_b)
|
|
||||||
|
|
||||||
def construct(self, x, y):
|
|
||||||
return self.batch_matmul(x, y)
|
|
||||||
|
|
||||||
|
|
||||||
def test_4D_fp16():
|
|
||||||
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float16)
|
input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float16)
|
||||||
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float16)
|
input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float16)
|
||||||
|
|
||||||
|
|
|
@ -68,10 +68,10 @@ def test_batchnrom_fold2():
|
||||||
current_step = np.array([0]).astype('int32')
|
current_step = np.array([0]).astype('int32')
|
||||||
output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean),
|
output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean),
|
||||||
Tensor(running_std), Tensor(running_mean), Tensor(current_step))
|
Tensor(running_std), Tensor(running_mean), Tensor(current_step))
|
||||||
expect = (x + beta.reshape(-1, 1, 1) - (gamma * running_mean / running_std).reshape(-1, 1,
|
expect = ((x + beta.reshape(-1, 1, 1) -
|
||||||
1) if current_step >= freeze_bn else
|
(gamma * running_mean / running_std).reshape(-1, 1, 1) if current_step >= freeze_bn else
|
||||||
x * (running_std / batch_std).reshape(-1, 1, 1) + (beta - gamma * batch_mean / batch_std).reshape(-1, 1,
|
x * (running_std / batch_std).reshape(-1, 1, 1) +
|
||||||
1))
|
(beta - gamma * batch_mean / batch_std).reshape(-1, 1, 1)))
|
||||||
error = np.ones(shape=expect.shape) * 1.0e-6
|
error = np.ones(shape=expect.shape) * 1.0e-6
|
||||||
diff = output.asnumpy() - expect
|
diff = output.asnumpy() - expect
|
||||||
assert np.all(diff < error)
|
assert np.all(diff < error)
|
||||||
|
@ -80,10 +80,9 @@ def test_batchnrom_fold2():
|
||||||
current_step = np.array([100000]).astype('int32')
|
current_step = np.array([100000]).astype('int32')
|
||||||
output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean), Tensor(running_std),
|
output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean), Tensor(running_std),
|
||||||
Tensor(running_mean), Tensor(current_step))
|
Tensor(running_mean), Tensor(current_step))
|
||||||
expect = (x + beta.reshape(-1, 1, 1) - (gamma * running_mean / running_std).reshape(-1, 1,
|
expect = ((x + beta.reshape(-1, 1, 1) - (gamma * running_mean / running_std).reshape(-1, 1, 1)
|
||||||
1) if current_step >= freeze_bn else
|
if current_step >= freeze_bn else x * (batch_std / running_std).reshape(-1, 1, 1) +
|
||||||
x * (batch_std / running_std).reshape(-1, 1, 1) + (beta - gamma * batch_mean / batch_std).reshape(-1, 1,
|
(beta - gamma * batch_mean / batch_std).reshape(-1, 1, 1)))
|
||||||
1))
|
|
||||||
error = np.ones(shape=expect.shape) * 1.0e-6
|
error = np.ones(shape=expect.shape) * 1.0e-6
|
||||||
diff = output.asnumpy() - expect
|
diff = output.asnumpy() - expect
|
||||||
assert np.all(diff < error)
|
assert np.all(diff < error)
|
||||||
|
|
|
@ -38,8 +38,8 @@ class Net(nn.Cell):
|
||||||
|
|
||||||
def np_result(d_batch_mean, d_batch_std, x, batch_mean, batch_std):
|
def np_result(d_batch_mean, d_batch_std, x, batch_mean, batch_std):
|
||||||
n = x.shape[0] * x.shape[2] * x.shape[3]
|
n = x.shape[0] * x.shape[2] * x.shape[3]
|
||||||
dx = d_batch_mean.reshape(1, -1, 1, 1) / n + d_batch_std.reshape(1, -1, 1, 1) * (
|
dx = (d_batch_mean.reshape(1, -1, 1, 1) / n + d_batch_std.reshape(1, -1, 1, 1) *
|
||||||
x - batch_mean.reshape(1, -1, 1, 1)) / batch_std.reshape(1, -1, 1, 1) / n
|
(x - batch_mean.reshape(1, -1, 1, 1)) / batch_std.reshape(1, -1, 1, 1) / n)
|
||||||
return dx
|
return dx
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ def test_batchnorm_fold2():
|
||||||
ms_var = Tensor(variance)
|
ms_var = Tensor(variance)
|
||||||
batch_mean, batch_var, delay_mean, delay_std = net(Tensor(x), ms_mean, ms_var,
|
batch_mean, batch_var, delay_mean, delay_std = net(Tensor(x), ms_mean, ms_var,
|
||||||
Tensor(current_step))
|
Tensor(current_step))
|
||||||
expect1, expect2, expect3, expect4, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12)
|
expect1, expect2, expect3, _, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12)
|
||||||
assert np.allclose(batch_mean.asnumpy(), expect1, rtol=1.e-7, atol=1.e-5)
|
assert np.allclose(batch_mean.asnumpy(), expect1, rtol=1.e-7, atol=1.e-5)
|
||||||
assert np.allclose(batch_var.asnumpy(), expect2, rtol=1.e-7, atol=1.e-5)
|
assert np.allclose(batch_var.asnumpy(), expect2, rtol=1.e-7, atol=1.e-5)
|
||||||
assert np.allclose(ms_mean.asnumpy(), expect3, rtol=1.e-7, atol=1.e-5)
|
assert np.allclose(ms_mean.asnumpy(), expect3, rtol=1.e-7, atol=1.e-5)
|
||||||
|
@ -108,7 +108,7 @@ def test_batchnorm_fold_freeze():
|
||||||
ms_var = Tensor(variance)
|
ms_var = Tensor(variance)
|
||||||
batch_mean, batch_var, delay_mean, delay_std = net(Tensor(x), ms_mean, ms_var,
|
batch_mean, batch_var, delay_mean, delay_std = net(Tensor(x), ms_mean, ms_var,
|
||||||
Tensor(current_step))
|
Tensor(current_step))
|
||||||
expect1, expect2, expect3, expect4, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12)
|
_, _, _, _, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12)
|
||||||
assert np.allclose(batch_mean.asnumpy(), np.zeros_like(mean), rtol=1.e-7, atol=1.e-5)
|
assert np.allclose(batch_mean.asnumpy(), np.zeros_like(mean), rtol=1.e-7, atol=1.e-5)
|
||||||
assert np.allclose(batch_var.asnumpy(), np.ones_like(mean), rtol=1.e-7, atol=1.e-5)
|
assert np.allclose(batch_var.asnumpy(), np.ones_like(mean), rtol=1.e-7, atol=1.e-5)
|
||||||
assert np.allclose(ms_mean.asnumpy(), mean, rtol=1.e-7, atol=1.e-5)
|
assert np.allclose(ms_mean.asnumpy(), mean, rtol=1.e-7, atol=1.e-5)
|
||||||
|
|
|
@ -61,9 +61,6 @@ def test_train_forward():
|
||||||
[-0.0281, 0.9119, 1.3819, 1.8518],
|
[-0.0281, 0.9119, 1.3819, 1.8518],
|
||||||
[2.7918, 0.4419, -0.4981, 0.9119],
|
[2.7918, 0.4419, -0.4981, 0.9119],
|
||||||
[1.8518, 0.9119, 2.3218, -0.9680]]]]).astype(np.float32)
|
[1.8518, 0.9119, 2.3218, -0.9680]]]]).astype(np.float32)
|
||||||
grad = np.array([[
|
|
||||||
[[1, 2, 7, 1], [4, 2, 1, 3], [1, 6, 5, 2], [2, 4, 3, 2]],
|
|
||||||
[[9, 4, 3, 5], [1, 3, 7, 6], [5, 7, 9, 9], [1, 4, 6, 8]]]]).astype(np.float32)
|
|
||||||
|
|
||||||
weight = np.ones(2).astype(np.float32)
|
weight = np.ones(2).astype(np.float32)
|
||||||
bias = np.ones(2).astype(np.float32)
|
bias = np.ones(2).astype(np.float32)
|
||||||
|
|
|
@ -16,10 +16,8 @@
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
import mindspore.common.dtype as mstype
|
|
||||||
import mindspore.context as context
|
import mindspore.context as context
|
||||||
from mindspore.common.tensor import Tensor
|
from mindspore.common.tensor import Tensor
|
||||||
from mindspore.nn import Cell
|
|
||||||
from mindspore.ops import operations as P
|
from mindspore.ops import operations as P
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -47,9 +47,9 @@ def test_cast():
|
||||||
net = Net()
|
net = Net()
|
||||||
output = net(x0, t0, x1, t1)
|
output = net(x0, t0, x1, t1)
|
||||||
type0 = output[0].asnumpy().dtype
|
type0 = output[0].asnumpy().dtype
|
||||||
assert (type0 == 'float16')
|
assert type0 == 'float16'
|
||||||
type1 = output[1].asnumpy().dtype
|
type1 = output[1].asnumpy().dtype
|
||||||
assert (type1 == 'float32')
|
assert type1 == 'float32'
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
|
@ -65,6 +65,6 @@ def test_cast1():
|
||||||
net = Net()
|
net = Net()
|
||||||
output = net(x0, t0, x1, t1)
|
output = net(x0, t0, x1, t1)
|
||||||
type0 = output[0].asnumpy().dtype
|
type0 = output[0].asnumpy().dtype
|
||||||
assert (type0 == 'float32')
|
assert type0 == 'float32'
|
||||||
type1 = output[1].asnumpy().dtype
|
type1 = output[1].asnumpy().dtype
|
||||||
assert (type1 == 'float32')
|
assert type1 == 'float32'
|
||||||
|
|
|
@ -66,13 +66,6 @@ def test_conv2d_backprop_filter():
|
||||||
[-3, -2, -3, -16]]]]).astype(np.float32))
|
[-3, -2, -3, -16]]]]).astype(np.float32))
|
||||||
conv2d_filter = Conv2dFilter()
|
conv2d_filter = Conv2dFilter()
|
||||||
output = conv2d_filter(out, x, w)
|
output = conv2d_filter(out, x, w)
|
||||||
print("================================")
|
|
||||||
"""
|
|
||||||
expect output:
|
|
||||||
[[[[ -60, -142, -265]
|
|
||||||
[-104, -211, -322]
|
|
||||||
[-102, -144, -248]]]]
|
|
||||||
"""
|
|
||||||
expect = np.array([[[[-60, -142, -265],
|
expect = np.array([[[[-60, -142, -265],
|
||||||
[-104, -211, -322],
|
[-104, -211, -322],
|
||||||
[-102, -144, -248]]]]).astype(np.float32)
|
[-102, -144, -248]]]]).astype(np.float32)
|
||||||
|
|
|
@ -20,8 +20,6 @@ import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.common.api import ms_function
|
from mindspore.common.api import ms_function
|
||||||
from mindspore.common.initializer import initializer
|
|
||||||
from mindspore.common.parameter import Parameter
|
|
||||||
from mindspore.ops import operations as P
|
from mindspore.ops import operations as P
|
||||||
|
|
||||||
context.set_context(device_target='GPU')
|
context.set_context(device_target='GPU')
|
||||||
|
|
|
@ -57,16 +57,6 @@ def test_conv2d():
|
||||||
conv2d = NetConv2d()
|
conv2d = NetConv2d()
|
||||||
output = conv2d(x, w)
|
output = conv2d(x, w)
|
||||||
assert (output.asnumpy() == expect).all()
|
assert (output.asnumpy() == expect).all()
|
||||||
"""
|
|
||||||
expect output:
|
|
||||||
[[[[ 45. 48. 51.]
|
|
||||||
[ 54. 57. 60.]
|
|
||||||
[ 63. 66. 69.]]
|
|
||||||
|
|
||||||
[[126. 138. 150.]
|
|
||||||
[162. 174. 186.]
|
|
||||||
[198. 210. 222.]]]]
|
|
||||||
"""
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
conv2d = NetConv2d()
|
conv2d = NetConv2d()
|
||||||
output = conv2d(x, w)
|
output = conv2d(x, w)
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import os
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
import mindspore.context as context
|
import mindspore.context as context
|
||||||
|
@ -51,5 +50,5 @@ def test_correction_mul_grad():
|
||||||
expect = [0, 0]
|
expect = [0, 0]
|
||||||
expect[0] = (dout * np.reshape(batch_std / running_std, (co, 1, 1, 1)))
|
expect[0] = (dout * np.reshape(batch_std / running_std, (co, 1, 1, 1)))
|
||||||
expect[1] = (np.sum(dout * x, (1, 2, 3)) / running_std)
|
expect[1] = (np.sum(dout * x, (1, 2, 3)) / running_std)
|
||||||
for i, v in enumerate(output):
|
for i, _ in enumerate(output):
|
||||||
assert (np.allclose(output[i].asnumpy(), expect[i], rtol=1.e-5, atol=1.e-5))
|
assert np.allclose(output[i].asnumpy(), expect[i], rtol=1.e-5, atol=1.e-5)
|
||||||
|
|
|
@ -50,4 +50,4 @@ def test_correction_mul():
|
||||||
diff = output.asnumpy() - expect
|
diff = output.asnumpy() - expect
|
||||||
assert np.all(diff < error)
|
assert np.all(diff < error)
|
||||||
assert np.all(diff > error * -1)
|
assert np.all(diff > error * -1)
|
||||||
assert (output.shape() == expect.shape)
|
assert output.shape() == expect.shape
|
||||||
|
|
|
@ -68,8 +68,8 @@ class GradData(nn.Cell):
|
||||||
self.grad = GradOperation(name="get_all", get_all=True, sens_param=True)
|
self.grad = GradOperation(name="get_all", get_all=True, sens_param=True)
|
||||||
self.network = network
|
self.network = network
|
||||||
|
|
||||||
def construct(self, input, output_grad):
|
def construct(self, inputs, output_grad):
|
||||||
return self.grad(self.network)(input, output_grad)
|
return self.grad(self.network)(inputs, output_grad)
|
||||||
|
|
||||||
|
|
||||||
class GradWeight(nn.Cell):
|
class GradWeight(nn.Cell):
|
||||||
|
@ -172,8 +172,8 @@ class Grad(nn.Cell):
|
||||||
self.grad = GradOperation(name="get_all", get_all=True, sens_param=True)
|
self.grad = GradOperation(name="get_all", get_all=True, sens_param=True)
|
||||||
self.network = network
|
self.network = network
|
||||||
|
|
||||||
def construct(self, input, bias, dy):
|
def construct(self, inputs, bias, dy):
|
||||||
return self.grad(self.network)(input, bias, dy)
|
return self.grad(self.network)(inputs, bias, dy)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.level0
|
@pytest.mark.level0
|
||||||
|
|
|
@ -50,16 +50,16 @@ def test_equal():
|
||||||
equal = NetEqual()
|
equal = NetEqual()
|
||||||
output0 = equal(x0, y0)
|
output0 = equal(x0, y0)
|
||||||
assert np.all(output0.asnumpy() == expect0)
|
assert np.all(output0.asnumpy() == expect0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
output1 = equal(x1, y1)
|
output1 = equal(x1, y1)
|
||||||
assert np.all(output1.asnumpy() == expect1)
|
assert np.all(output1.asnumpy() == expect1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
equal = NetEqual()
|
equal = NetEqual()
|
||||||
output0 = equal(x0, y0)
|
output0 = equal(x0, y0)
|
||||||
assert np.all(output0.asnumpy() == expect0)
|
assert np.all(output0.asnumpy() == expect0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
output1 = equal(x1, y1)
|
output1 = equal(x1, y1)
|
||||||
assert np.all(output1.asnumpy() == expect1)
|
assert np.all(output1.asnumpy() == expect1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
|
@ -49,19 +49,19 @@ def test_exp():
|
||||||
output0 = exp(x0)
|
output0 = exp(x0)
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
output1 = exp(x1)
|
output1 = exp(x1)
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
||||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
||||||
exp = NetExp()
|
exp = NetExp()
|
||||||
output0 = exp(x0)
|
output0 = exp(x0)
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
output1 = exp(x1)
|
output1 = exp(x1)
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
|
@ -19,7 +19,6 @@ import pytest
|
||||||
import mindspore.context as context
|
import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.ops import operations as P
|
|
||||||
from mindspore.ops.operations import _grad_ops as G
|
from mindspore.ops.operations import _grad_ops as G
|
||||||
|
|
||||||
|
|
||||||
|
@ -38,11 +37,6 @@ class NetFlattenGrad(nn.Cell):
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_flatten_grad():
|
def test_flatten_grad():
|
||||||
x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype(np.float32))
|
x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype(np.float32))
|
||||||
"""
|
|
||||||
expect output:
|
|
||||||
[ [-0.1 0.3 3.6]
|
|
||||||
[ 0.4 0.5 -3.2] ]
|
|
||||||
"""
|
|
||||||
expect = np.array([[-0.1, 0.3, 3.6],
|
expect = np.array([[-0.1, 0.3, 3.6],
|
||||||
[0.4, 0.5, -3.2]]).astype(np.float32)
|
[0.4, 0.5, -3.2]]).astype(np.float32)
|
||||||
|
|
||||||
|
|
|
@ -37,11 +37,6 @@ class NetFlatten(nn.Cell):
|
||||||
def test_flatten():
|
def test_flatten():
|
||||||
x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype(np.float32))
|
x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype(np.float32))
|
||||||
expect = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype(np.float32)
|
expect = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype(np.float32)
|
||||||
"""
|
|
||||||
expect output:
|
|
||||||
[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]
|
|
||||||
"""
|
|
||||||
|
|
||||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
||||||
flatten = NetFlatten()
|
flatten = NetFlatten()
|
||||||
output = flatten(x)
|
output = flatten(x)
|
||||||
|
|
|
@ -68,7 +68,7 @@ x3 = np.array([[1, 2], [3, 4], [5.0, 88.0]]).astype(np.float32)
|
||||||
@pytest.mark.platform_x86_gpu_training
|
@pytest.mark.platform_x86_gpu_training
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_status():
|
def test_status():
|
||||||
ms_status = Net();
|
ms_status = Net()
|
||||||
output1 = ms_status(Tensor(x1))
|
output1 = ms_status(Tensor(x1))
|
||||||
output2 = ms_status(Tensor(x2))
|
output2 = ms_status(Tensor(x2))
|
||||||
output3 = ms_status(Tensor(x3))
|
output3 = ms_status(Tensor(x3))
|
||||||
|
@ -84,7 +84,7 @@ def test_status():
|
||||||
@pytest.mark.platform_x86_gpu_training
|
@pytest.mark.platform_x86_gpu_training
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_nan():
|
def test_nan():
|
||||||
ms_isnan = Netnan();
|
ms_isnan = Netnan()
|
||||||
output1 = ms_isnan(Tensor(x1))
|
output1 = ms_isnan(Tensor(x1))
|
||||||
output2 = ms_isnan(Tensor(x2))
|
output2 = ms_isnan(Tensor(x2))
|
||||||
output3 = ms_isnan(Tensor(x3))
|
output3 = ms_isnan(Tensor(x3))
|
||||||
|
@ -100,7 +100,7 @@ def test_nan():
|
||||||
@pytest.mark.platform_x86_gpu_training
|
@pytest.mark.platform_x86_gpu_training
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_inf():
|
def test_inf():
|
||||||
ms_isinf = Netinf();
|
ms_isinf = Netinf()
|
||||||
output1 = ms_isinf(Tensor(x1))
|
output1 = ms_isinf(Tensor(x1))
|
||||||
output2 = ms_isinf(Tensor(x2))
|
output2 = ms_isinf(Tensor(x2))
|
||||||
output3 = ms_isinf(Tensor(x3))
|
output3 = ms_isinf(Tensor(x3))
|
||||||
|
@ -116,7 +116,7 @@ def test_inf():
|
||||||
@pytest.mark.platform_x86_gpu_training
|
@pytest.mark.platform_x86_gpu_training
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_finite():
|
def test_finite():
|
||||||
ms_isfinite = Netfinite();
|
ms_isfinite = Netfinite()
|
||||||
output1 = ms_isfinite(Tensor(x1))
|
output1 = ms_isfinite(Tensor(x1))
|
||||||
output2 = ms_isfinite(Tensor(x2))
|
output2 = ms_isfinite(Tensor(x2))
|
||||||
output3 = ms_isfinite(Tensor(x3))
|
output3 = ms_isfinite(Tensor(x3))
|
||||||
|
|
|
@ -913,16 +913,16 @@ class GatherNet2(nn.Cell):
|
||||||
@pytest.mark.platform_x86_gpu_training
|
@pytest.mark.platform_x86_gpu_training
|
||||||
@pytest.mark.env_onecard
|
@pytest.mark.env_onecard
|
||||||
def test_gather2():
|
def test_gather2():
|
||||||
x = Tensor(np.array([[4., 5., 4., 1., 5., ],
|
x = Tensor(np.array([[4., 5., 4., 1., 5.,],
|
||||||
[4., 9., 5., 6., 4., ],
|
[4., 9., 5., 6., 4.,],
|
||||||
[9., 8., 4., 3., 6., ],
|
[9., 8., 4., 3., 6.,],
|
||||||
[0., 4., 2., 2., 8., ],
|
[0., 4., 2., 2., 8.,],
|
||||||
[1., 8., 6., 2., 8., ],
|
[1., 8., 6., 2., 8.,],
|
||||||
[8., 1., 9., 7., 3., ],
|
[8., 1., 9., 7., 3.,],
|
||||||
[7., 9., 2., 5., 7., ],
|
[7., 9., 2., 5., 7.,],
|
||||||
[9., 8., 6., 8., 5., ],
|
[9., 8., 6., 8., 5.,],
|
||||||
[3., 7., 2., 7., 4., ],
|
[3., 7., 2., 7., 4.,],
|
||||||
[4., 2., 8., 2., 9., ]]
|
[4., 2., 8., 2., 9.,]]
|
||||||
).astype(np.float32))
|
).astype(np.float32))
|
||||||
|
|
||||||
indices = Tensor(np.array([[4000, 1, 300000]]).astype(np.int32))
|
indices = Tensor(np.array([[4000, 1, 300000]]).astype(np.int32))
|
||||||
|
|
|
@ -19,8 +19,6 @@ import pytest
|
||||||
import mindspore.context as context
|
import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.ops import composite as C
|
|
||||||
from mindspore.ops import operations as P
|
|
||||||
from mindspore.ops.operations import _grad_ops as G
|
from mindspore.ops.operations import _grad_ops as G
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
|
|
|
@ -50,10 +50,10 @@ def test_log():
|
||||||
output1 = log(x1)
|
output1 = log(x1)
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
log = NetLog()
|
log = NetLog()
|
||||||
|
@ -61,7 +61,7 @@ def test_log():
|
||||||
output1 = log(x1)
|
output1 = log(x1)
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
|
@ -27,8 +27,8 @@ class NetAnd(Cell):
|
||||||
super(NetAnd, self).__init__()
|
super(NetAnd, self).__init__()
|
||||||
self.logicaland = P.LogicalAnd()
|
self.logicaland = P.LogicalAnd()
|
||||||
|
|
||||||
def construct(self, x, y):
|
def construct(self, input_x, input_y):
|
||||||
return self.logicaland(x, y)
|
return self.logicaland(input_x, input_y)
|
||||||
|
|
||||||
|
|
||||||
class NetOr(Cell):
|
class NetOr(Cell):
|
||||||
|
@ -36,8 +36,8 @@ class NetOr(Cell):
|
||||||
super(NetOr, self).__init__()
|
super(NetOr, self).__init__()
|
||||||
self.logicalor = P.LogicalOr()
|
self.logicalor = P.LogicalOr()
|
||||||
|
|
||||||
def construct(self, x, y):
|
def construct(self, input_x, input_y):
|
||||||
return self.logicalor(x, y)
|
return self.logicalor(input_x, input_y)
|
||||||
|
|
||||||
|
|
||||||
class NetNot(Cell):
|
class NetNot(Cell):
|
||||||
|
@ -45,8 +45,8 @@ class NetNot(Cell):
|
||||||
super(NetNot, self).__init__()
|
super(NetNot, self).__init__()
|
||||||
self.logicalnot = P.LogicalNot()
|
self.logicalnot = P.LogicalNot()
|
||||||
|
|
||||||
def construct(self, x):
|
def construct(self, input_x):
|
||||||
return self.logicalnot(x)
|
return self.logicalnot(input_x)
|
||||||
|
|
||||||
|
|
||||||
x = np.array([True, False, False]).astype(np.bool)
|
x = np.array([True, False, False]).astype(np.bool)
|
||||||
|
|
|
@ -35,8 +35,8 @@ def test_logsoftmax():
|
||||||
[-3.452001, -1.2546989, -1.4618242, -0.79552734]]).astype(np.float32)
|
[-3.452001, -1.2546989, -1.4618242, -0.79552734]]).astype(np.float32)
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
LogSoftmax = P.LogSoftmax()
|
logSoftmax = P.LogSoftmax()
|
||||||
output = LogSoftmax(Tensor(x))
|
output = logSoftmax(Tensor(x))
|
||||||
assert np.allclose(output.asnumpy(), expect)
|
assert np.allclose(output.asnumpy(), expect)
|
||||||
|
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ def test_logsoftmaxgrad1():
|
||||||
[-0.01768187, 0.26872346, -0.5037259, -0.3376058, -0.3291146, 1.4752979, -0.25972134, 0.8869053,
|
[-0.01768187, 0.26872346, -0.5037259, -0.3376058, -0.3291146, 1.4752979, -0.25972134, 0.8869053,
|
||||||
0.25325722, -0.13946185],
|
0.25325722, -0.13946185],
|
||||||
[-0.5247209, 0.70192003, -1.0808672, 1.4858199, -1.1273282, 0.20728993, 0.38918605, 0.08162117,
|
[-0.5247209, 0.70192003, -1.0808672, 1.4858199, -1.1273282, 0.20728993, 0.38918605, 0.08162117,
|
||||||
0.10445589, 0.3220427]], ).astype(np.float32)
|
0.10445589, 0.3220427]],).astype(np.float32)
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
net = LogSoftmax(0)
|
net = LogSoftmax(0)
|
||||||
|
|
|
@ -23,7 +23,6 @@ from mindspore.common.initializer import initializer
|
||||||
from mindspore.common.parameter import ParameterTuple, Parameter
|
from mindspore.common.parameter import ParameterTuple, Parameter
|
||||||
from mindspore.common.tensor import Tensor
|
from mindspore.common.tensor import Tensor
|
||||||
from mindspore.ops import composite as C
|
from mindspore.ops import composite as C
|
||||||
from mindspore.ops import functional as F
|
|
||||||
from mindspore.ops import operations as P
|
from mindspore.ops import operations as P
|
||||||
|
|
||||||
context.set_context(device_target='GPU')
|
context.set_context(device_target='GPU')
|
||||||
|
@ -81,7 +80,7 @@ class LstmNet(nn.Cell):
|
||||||
-2.9055e-01, -2.8129e-01, 6.0219e-01, 4.9193e-01, 3.3115e-01],
|
-2.9055e-01, -2.8129e-01, 6.0219e-01, 4.9193e-01, 3.3115e-01],
|
||||||
[-5.6894e-01, -5.0359e-01, 4.7491e-01, 5.8110e-01, -5.4921e-01,
|
[-5.6894e-01, -5.0359e-01, 4.7491e-01, 5.8110e-01, -5.4921e-01,
|
||||||
-6.1343e-01, -5.8236e-02, -3.7682e-01, 4.8338e-01, -2.1551e-01]]).astype(np.float32).reshape(
|
-6.1343e-01, -5.8236e-02, -3.7682e-01, 4.8338e-01, -2.1551e-01]]).astype(np.float32).reshape(
|
||||||
[1, -1])
|
[1, -1])
|
||||||
|
|
||||||
whh = np.array([[-0.4820, -0.2350],
|
whh = np.array([[-0.4820, -0.2350],
|
||||||
[-0.1195, 0.0519],
|
[-0.1195, 0.0519],
|
||||||
|
@ -205,7 +204,7 @@ class BiLstmNet(nn.Cell):
|
||||||
[0.0299, -0.6071, -0.4683, -0.3363, -0.0044, -0.0007, 0.2700, 0.0202, -0.2880, -0.6869],
|
[0.0299, -0.6071, -0.4683, -0.3363, -0.0044, -0.0007, 0.2700, 0.0202, -0.2880, -0.6869],
|
||||||
[0.3025, -0.2461, -0.5128, 0.6327, -0.1438, -0.5100, 0.1924, 0.2023, 0.3129, 0.2271],
|
[0.3025, -0.2461, -0.5128, 0.6327, -0.1438, -0.5100, 0.1924, 0.2023, 0.3129, 0.2271],
|
||||||
[0.3777, 0.0546, 0.4790, -0.1895, 0.3588, 0.4490, 0.6850, 0.6240, -0.2739, -0.4474]]).astype(
|
[0.3777, 0.0546, 0.4790, -0.1895, 0.3588, 0.4490, 0.6850, 0.6240, -0.2739, -0.4474]]).astype(
|
||||||
np.float32).reshape([1, -1])
|
np.float32).reshape([1, -1])
|
||||||
|
|
||||||
whh = np.array([[0.6346, -0.6366],
|
whh = np.array([[0.6346, -0.6366],
|
||||||
[-0.0248, -0.6156],
|
[-0.0248, -0.6156],
|
||||||
|
@ -394,7 +393,7 @@ class MultiLayerBiLstmNet(nn.Cell):
|
||||||
5.5428e-01, 1.0429e-01, 5.1322e-01, 1.9406e-01],
|
5.5428e-01, 1.0429e-01, 5.1322e-01, 1.9406e-01],
|
||||||
[3.9698e-01, -5.2101e-01, 5.1372e-01, -3.9866e-01, 1.0115e-01, -4.1290e-02,
|
[3.9698e-01, -5.2101e-01, 5.1372e-01, -3.9866e-01, 1.0115e-01, -4.1290e-02,
|
||||||
-3.0980e-01, 2.1607e-01, 4.8420e-01, -1.9267e-01]]).astype(np.float32).reshape(
|
-3.0980e-01, 2.1607e-01, 4.8420e-01, -1.9267e-01]]).astype(np.float32).reshape(
|
||||||
[1, -1])
|
[1, -1])
|
||||||
|
|
||||||
whh_reverse_l0 = np.array([[-0.3231, -0.3960],
|
whh_reverse_l0 = np.array([[-0.3231, -0.3960],
|
||||||
[-0.1625, -0.3032],
|
[-0.1625, -0.3032],
|
||||||
|
@ -662,7 +661,7 @@ class Net(nn.Cell):
|
||||||
[-0.4520, 0.4201, -0.2374, -0.1556, -0.4175, -0.6834, 0.3096, -0.1581, 0.0127, 0.6872],
|
[-0.4520, 0.4201, -0.2374, -0.1556, -0.4175, -0.6834, 0.3096, -0.1581, 0.0127, 0.6872],
|
||||||
[0.1788, -0.5442, -0.3675, -0.2887, -0.3004, 0.5813, 0.1618, 0.6875, -0.4678, 0.0071],
|
[0.1788, -0.5442, -0.3675, -0.2887, -0.3004, 0.5813, 0.1618, 0.6875, -0.4678, 0.0071],
|
||||||
[-0.6453, -0.2528, 0.5675, -0.5154, -0.4129, -0.0214, 0.5539, 0.0343, 0.1712, 0.5644]]).astype(
|
[-0.6453, -0.2528, 0.5675, -0.5154, -0.4129, -0.0214, 0.5539, 0.0343, 0.1712, 0.5644]]).astype(
|
||||||
np.float32).reshape([1, -1])
|
np.float32).reshape([1, -1])
|
||||||
|
|
||||||
whh_reverse_l0 = np.array([[-0.6657, 0.6330],
|
whh_reverse_l0 = np.array([[-0.6657, 0.6330],
|
||||||
[-0.2290, 0.6556],
|
[-0.2290, 0.6556],
|
||||||
|
@ -927,7 +926,7 @@ class LstmNetWithDropout(nn.Cell):
|
||||||
[0.5142, 0.0790, -0.1123, -0.2351, 0.3982, -0.6351, 0.5906, 0.3917, -0.0850, -0.5397],
|
[0.5142, 0.0790, -0.1123, -0.2351, 0.3982, -0.6351, 0.5906, 0.3917, -0.0850, -0.5397],
|
||||||
[-0.4795, -0.6576, 0.5693, 0.0047, -0.6626, 0.1013, -0.4015, -0.4040, -0.2817, 0.4430],
|
[-0.4795, -0.6576, 0.5693, 0.0047, -0.6626, 0.1013, -0.4015, -0.4040, -0.2817, 0.4430],
|
||||||
[0.0251, -0.3035, -0.6026, 0.2693, -0.2749, 0.1501, -0.5778, 0.5570, -0.7065, -0.6196]]).astype(
|
[0.0251, -0.3035, -0.6026, 0.2693, -0.2749, 0.1501, -0.5778, 0.5570, -0.7065, -0.6196]]).astype(
|
||||||
np.float32).reshape([1, -1])
|
np.float32).reshape([1, -1])
|
||||||
|
|
||||||
whh = np.array([[-0.4344, -0.2529],
|
whh = np.array([[-0.4344, -0.2529],
|
||||||
[0.0377, 0.7046],
|
[0.0377, 0.7046],
|
||||||
|
|
|
@ -53,15 +53,15 @@ def test_maximum():
|
||||||
error = np.ones(shape=[1, 3]) * 1.0e-5
|
error = np.ones(shape=[1, 3]) * 1.0e-5
|
||||||
|
|
||||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
||||||
max = Net()
|
max_op = Net()
|
||||||
output = max(x, y)
|
output = max_op(x, y)
|
||||||
diff = output.asnumpy() - expect
|
diff = output.asnumpy() - expect
|
||||||
assert np.all(diff < error)
|
assert np.all(diff < error)
|
||||||
assert np.all(-diff < error)
|
assert np.all(-diff < error)
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
max = Net()
|
max_op_2 = Net()
|
||||||
output = max(x, y)
|
output = max_op_2(x, y)
|
||||||
diff = output.asnumpy() - expect
|
diff = output.asnumpy() - expect
|
||||||
assert np.all(diff < error)
|
assert np.all(diff < error)
|
||||||
assert np.all(-diff < error)
|
assert np.all(-diff < error)
|
||||||
|
|
|
@ -19,7 +19,6 @@ import pytest
|
||||||
import mindspore.context as context
|
import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.ops import operations as P
|
|
||||||
from mindspore.ops.operations import _grad_ops as G
|
from mindspore.ops.operations import _grad_ops as G
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,6 @@
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
import mindspore.common.dtype as mstype
|
|
||||||
import mindspore.context as context
|
import mindspore.context as context
|
||||||
from mindspore.common.tensor import Tensor
|
from mindspore.common.tensor import Tensor
|
||||||
from mindspore.nn import Cell
|
from mindspore.nn import Cell
|
||||||
|
|
|
@ -57,17 +57,12 @@ def test_momentum():
|
||||||
train_network = TrainOneStepCell(net_with_criterion, optimizer) # optimizer
|
train_network = TrainOneStepCell(net_with_criterion, optimizer) # optimizer
|
||||||
train_network.set_train()
|
train_network.set_train()
|
||||||
losses = []
|
losses = []
|
||||||
for i in range(epoch):
|
for _ in range(epoch):
|
||||||
data = Tensor(np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32) * 0.01)
|
data = Tensor(np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32) * 0.01)
|
||||||
label = Tensor(np.array([0]).astype(np.int32))
|
label = Tensor(np.array([0]).astype(np.int32))
|
||||||
loss = train_network(data, label)
|
loss = train_network(data, label)
|
||||||
losses.append(loss)
|
losses.append(loss)
|
||||||
|
|
||||||
"""
|
_ = np.ones(shape=[1, 10]) * 1.0e-6
|
||||||
expect output:
|
|
||||||
[[0.04132498 0.00874167 0.00874167 0.00874167 0.00874167
|
|
||||||
0.00874167 0.00874167 0.00874167 0.00874167 0.00874167]]
|
|
||||||
"""
|
|
||||||
error = np.ones(shape=[1, 10]) * 1.0e-6
|
|
||||||
|
|
||||||
return losses
|
return losses
|
||||||
|
|
|
@ -64,35 +64,35 @@ def test_mul():
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
|
|
||||||
output1 = mul(x1, y1)
|
output1 = mul(x1, y1)
|
||||||
expect1 = np.multiply(x1_np, y1_np)
|
expect1 = np.multiply(x1_np, y1_np)
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
||||||
output2 = mul(x2, y2)
|
output2 = mul(x2, y2)
|
||||||
expect2 = np.multiply(x2_np, y2_np)
|
expect2 = np.multiply(x2_np, y2_np)
|
||||||
diff2 = output2.asnumpy() - expect2
|
diff2 = output2.asnumpy() - expect2
|
||||||
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
||||||
assert np.all(diff2 < error2)
|
assert np.all(diff2 < error2)
|
||||||
assert (output2.shape() == expect2.shape)
|
assert output2.shape() == expect2.shape
|
||||||
|
|
||||||
output3 = mul(x3, y3)
|
output3 = mul(x3, y3)
|
||||||
expect3 = np.multiply(x3_np, y3_np)
|
expect3 = np.multiply(x3_np, y3_np)
|
||||||
diff3 = output3.asnumpy() - expect3
|
diff3 = output3.asnumpy() - expect3
|
||||||
error3 = np.ones(shape=expect3.shape) * 1.0e-5
|
error3 = np.ones(shape=expect3.shape) * 1.0e-5
|
||||||
assert np.all(diff3 < error3)
|
assert np.all(diff3 < error3)
|
||||||
assert (output3.shape() == expect3.shape)
|
assert output3.shape() == expect3.shape
|
||||||
|
|
||||||
output4 = mul(x4, y4)
|
output4 = mul(x4, y4)
|
||||||
expect4 = np.multiply(x4_np, y4_np)
|
expect4 = np.multiply(x4_np, y4_np)
|
||||||
diff4 = output4.asnumpy() - expect4
|
diff4 = output4.asnumpy() - expect4
|
||||||
error4 = np.ones(shape=expect4.shape) * 1.0e-5
|
error4 = np.ones(shape=expect4.shape) * 1.0e-5
|
||||||
assert np.all(diff4 < error4)
|
assert np.all(diff4 < error4)
|
||||||
assert (output4.shape() == expect4.shape)
|
assert output4.shape() == expect4.shape
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
mul = NetMul()
|
mul = NetMul()
|
||||||
|
@ -101,32 +101,32 @@ def test_mul():
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
|
|
||||||
output1 = mul(x1, y1)
|
output1 = mul(x1, y1)
|
||||||
expect1 = np.multiply(x1_np, y1_np)
|
expect1 = np.multiply(x1_np, y1_np)
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
||||||
output2 = mul(x2, y2)
|
output2 = mul(x2, y2)
|
||||||
expect2 = np.multiply(x2_np, y2_np)
|
expect2 = np.multiply(x2_np, y2_np)
|
||||||
diff2 = output2.asnumpy() - expect2
|
diff2 = output2.asnumpy() - expect2
|
||||||
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
||||||
assert np.all(diff2 < error2)
|
assert np.all(diff2 < error2)
|
||||||
assert (output2.shape() == expect2.shape)
|
assert output2.shape() == expect2.shape
|
||||||
|
|
||||||
output3 = mul(x3, y3)
|
output3 = mul(x3, y3)
|
||||||
expect3 = np.multiply(x3_np, y3_np)
|
expect3 = np.multiply(x3_np, y3_np)
|
||||||
diff3 = output3.asnumpy() - expect3
|
diff3 = output3.asnumpy() - expect3
|
||||||
error3 = np.ones(shape=expect3.shape) * 1.0e-5
|
error3 = np.ones(shape=expect3.shape) * 1.0e-5
|
||||||
assert np.all(diff3 < error3)
|
assert np.all(diff3 < error3)
|
||||||
assert (output3.shape() == expect3.shape)
|
assert output3.shape() == expect3.shape
|
||||||
|
|
||||||
output4 = mul(x4, y4)
|
output4 = mul(x4, y4)
|
||||||
expect4 = np.multiply(x4_np, y4_np)
|
expect4 = np.multiply(x4_np, y4_np)
|
||||||
diff4 = output4.asnumpy() - expect4
|
diff4 = output4.asnumpy() - expect4
|
||||||
error4 = np.ones(shape=expect4.shape) * 1.0e-5
|
error4 = np.ones(shape=expect4.shape) * 1.0e-5
|
||||||
assert np.all(diff4 < error4)
|
assert np.all(diff4 < error4)
|
||||||
assert (output4.shape() == expect4.shape)
|
assert output4.shape() == expect4.shape
|
||||||
|
|
|
@ -49,19 +49,19 @@ def test_neg():
|
||||||
output0 = neg(x0)
|
output0 = neg(x0)
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
output1 = neg(x1)
|
output1 = neg(x1)
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
neg = NetNeg()
|
neg = NetNeg()
|
||||||
output0 = neg(x0)
|
output0 = neg(x0)
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
output1 = neg(x1)
|
output1 = neg(x1)
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
|
@ -20,7 +20,6 @@ import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.common.api import ms_function
|
from mindspore.common.api import ms_function
|
||||||
from mindspore.ops import operations as P
|
|
||||||
|
|
||||||
context.set_context(device_target='GPU')
|
context.set_context(device_target='GPU')
|
||||||
|
|
||||||
|
|
|
@ -19,9 +19,6 @@ import pytest
|
||||||
import mindspore.context as context
|
import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.common.api import ms_function
|
|
||||||
from mindspore.common.initializer import initializer
|
|
||||||
from mindspore.common.parameter import Parameter
|
|
||||||
from mindspore.ops import operations as P
|
from mindspore.ops import operations as P
|
||||||
|
|
||||||
|
|
||||||
|
@ -67,35 +64,35 @@ def test_real_div():
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
|
|
||||||
output1 = real_div(x1, y1)
|
output1 = real_div(x1, y1)
|
||||||
expect1 = np.divide(x1_np, y1_np)
|
expect1 = np.divide(x1_np, y1_np)
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
||||||
output2 = real_div(x2, y2)
|
output2 = real_div(x2, y2)
|
||||||
expect2 = np.divide(x2_np, y2_np)
|
expect2 = np.divide(x2_np, y2_np)
|
||||||
diff2 = output2.asnumpy() - expect2
|
diff2 = output2.asnumpy() - expect2
|
||||||
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
||||||
assert np.all(diff2 < error2)
|
assert np.all(diff2 < error2)
|
||||||
assert (output2.shape() == expect2.shape)
|
assert output2.shape() == expect2.shape
|
||||||
|
|
||||||
output3 = real_div(x3, y3)
|
output3 = real_div(x3, y3)
|
||||||
expect3 = np.divide(x3_np, y3_np)
|
expect3 = np.divide(x3_np, y3_np)
|
||||||
diff3 = output3.asnumpy() - expect3
|
diff3 = output3.asnumpy() - expect3
|
||||||
error3 = np.ones(shape=expect3.shape) * 1.0e-5
|
error3 = np.ones(shape=expect3.shape) * 1.0e-5
|
||||||
assert np.all(diff3 < error3)
|
assert np.all(diff3 < error3)
|
||||||
assert (output3.shape() == expect3.shape)
|
assert output3.shape() == expect3.shape
|
||||||
|
|
||||||
output4 = real_div(x4, y4)
|
output4 = real_div(x4, y4)
|
||||||
expect4 = np.divide(x4_np, y4_np)
|
expect4 = np.divide(x4_np, y4_np)
|
||||||
diff4 = output4.asnumpy() - expect4
|
diff4 = output4.asnumpy() - expect4
|
||||||
error4 = np.ones(shape=expect4.shape) * 1.0e-5
|
error4 = np.ones(shape=expect4.shape) * 1.0e-5
|
||||||
assert np.all(diff4 < error4)
|
assert np.all(diff4 < error4)
|
||||||
assert (output4.shape() == expect4.shape)
|
assert output4.shape() == expect4.shape
|
||||||
|
|
||||||
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
|
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
|
||||||
real_div = NetRealDiv()
|
real_div = NetRealDiv()
|
||||||
|
@ -104,32 +101,32 @@ def test_real_div():
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
|
|
||||||
output1 = real_div(x1, y1)
|
output1 = real_div(x1, y1)
|
||||||
expect1 = np.divide(x1_np, y1_np)
|
expect1 = np.divide(x1_np, y1_np)
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
||||||
output2 = real_div(x2, y2)
|
output2 = real_div(x2, y2)
|
||||||
expect2 = np.divide(x2_np, y2_np)
|
expect2 = np.divide(x2_np, y2_np)
|
||||||
diff2 = output2.asnumpy() - expect2
|
diff2 = output2.asnumpy() - expect2
|
||||||
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
||||||
assert np.all(diff2 < error2)
|
assert np.all(diff2 < error2)
|
||||||
assert (output2.shape() == expect2.shape)
|
assert output2.shape() == expect2.shape
|
||||||
|
|
||||||
output3 = real_div(x3, y3)
|
output3 = real_div(x3, y3)
|
||||||
expect3 = np.divide(x3_np, y3_np)
|
expect3 = np.divide(x3_np, y3_np)
|
||||||
diff3 = output3.asnumpy() - expect3
|
diff3 = output3.asnumpy() - expect3
|
||||||
error3 = np.ones(shape=expect3.shape) * 1.0e-5
|
error3 = np.ones(shape=expect3.shape) * 1.0e-5
|
||||||
assert np.all(diff3 < error3)
|
assert np.all(diff3 < error3)
|
||||||
assert (output3.shape() == expect3.shape)
|
assert output3.shape() == expect3.shape
|
||||||
|
|
||||||
output4 = real_div(x4, y4)
|
output4 = real_div(x4, y4)
|
||||||
expect4 = np.divide(x4_np, y4_np)
|
expect4 = np.divide(x4_np, y4_np)
|
||||||
diff4 = output4.asnumpy() - expect4
|
diff4 = output4.asnumpy() - expect4
|
||||||
error4 = np.ones(shape=expect4.shape) * 1.0e-5
|
error4 = np.ones(shape=expect4.shape) * 1.0e-5
|
||||||
assert np.all(diff4 < error4)
|
assert np.all(diff4 < error4)
|
||||||
assert (output4.shape() == expect4.shape)
|
assert output4.shape() == expect4.shape
|
||||||
|
|
|
@ -49,19 +49,19 @@ def test_Reciprocal():
|
||||||
output0 = reciprocal(x0)
|
output0 = reciprocal(x0)
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
output1 = reciprocal(x1)
|
output1 = reciprocal(x1)
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
reciprocal = NetReciprocal()
|
reciprocal = NetReciprocal()
|
||||||
output0 = reciprocal(x0)
|
output0 = reciprocal(x0)
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
output1 = reciprocal(x1)
|
output1 = reciprocal(x1)
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
|
@ -128,43 +128,43 @@ def test_ReduceMax():
|
||||||
diff0 = abs(output[0].asnumpy() - expect0)
|
diff0 = abs(output[0].asnumpy() - expect0)
|
||||||
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output[0].shape() == expect0.shape)
|
assert output[0].shape() == expect0.shape
|
||||||
|
|
||||||
expect1 = np.max(x1, axis=axis1, keepdims=keep_dims1)
|
expect1 = np.max(x1, axis=axis1, keepdims=keep_dims1)
|
||||||
diff1 = abs(output[1].asnumpy() - expect1)
|
diff1 = abs(output[1].asnumpy() - expect1)
|
||||||
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output[1].shape() == expect1.shape)
|
assert output[1].shape() == expect1.shape
|
||||||
|
|
||||||
expect2 = np.max(x2, axis=axis2, keepdims=keep_dims2)
|
expect2 = np.max(x2, axis=axis2, keepdims=keep_dims2)
|
||||||
diff2 = abs(output[2].asnumpy() - expect2)
|
diff2 = abs(output[2].asnumpy() - expect2)
|
||||||
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
||||||
assert np.all(diff2 < error2)
|
assert np.all(diff2 < error2)
|
||||||
assert (output[2].shape() == expect2.shape)
|
assert output[2].shape() == expect2.shape
|
||||||
|
|
||||||
expect3 = np.max(x3, axis=axis3, keepdims=keep_dims3)
|
expect3 = np.max(x3, axis=axis3, keepdims=keep_dims3)
|
||||||
diff3 = abs(output[3].asnumpy() - expect3)
|
diff3 = abs(output[3].asnumpy() - expect3)
|
||||||
error3 = np.ones(shape=expect3.shape) * 1.0e-5
|
error3 = np.ones(shape=expect3.shape) * 1.0e-5
|
||||||
assert np.all(diff3 < error3)
|
assert np.all(diff3 < error3)
|
||||||
assert (output[3].shape() == expect3.shape)
|
assert output[3].shape() == expect3.shape
|
||||||
|
|
||||||
expect4 = np.max(x4, axis=np_axis4, keepdims=keep_dims4)
|
expect4 = np.max(x4, axis=np_axis4, keepdims=keep_dims4)
|
||||||
diff4 = abs(output[4].asnumpy() - expect4)
|
diff4 = abs(output[4].asnumpy() - expect4)
|
||||||
error4 = np.ones(shape=expect4.shape) * 1.0e-5
|
error4 = np.ones(shape=expect4.shape) * 1.0e-5
|
||||||
assert np.all(diff4 < error4)
|
assert np.all(diff4 < error4)
|
||||||
assert (output[4].shape() == expect4.shape)
|
assert output[4].shape() == expect4.shape
|
||||||
|
|
||||||
expect5 = np.max(x5, axis=np_axis5, keepdims=keep_dims5)
|
expect5 = np.max(x5, axis=np_axis5, keepdims=keep_dims5)
|
||||||
diff5 = abs(output[5].asnumpy() - expect5)
|
diff5 = abs(output[5].asnumpy() - expect5)
|
||||||
error5 = np.ones(shape=expect5.shape) * 1.0e-5
|
error5 = np.ones(shape=expect5.shape) * 1.0e-5
|
||||||
assert np.all(diff5 < error5)
|
assert np.all(diff5 < error5)
|
||||||
assert (output[5].shape() == expect5.shape)
|
assert output[5].shape() == expect5.shape
|
||||||
|
|
||||||
expect6 = np.max(x6, axis=axis6, keepdims=keep_dims6)
|
expect6 = np.max(x6, axis=axis6, keepdims=keep_dims6)
|
||||||
diff6 = abs(output[6].asnumpy() - expect6)
|
diff6 = abs(output[6].asnumpy() - expect6)
|
||||||
error6 = np.ones(shape=expect6.shape) * 1.0e-5
|
error6 = np.ones(shape=expect6.shape) * 1.0e-5
|
||||||
assert np.all(diff6 < error6)
|
assert np.all(diff6 < error6)
|
||||||
assert (output[6].shape() == expect6.shape)
|
assert output[6].shape() == expect6.shape
|
||||||
|
|
||||||
expect7 = np.max(x7, axis=axis7, keepdims=keep_dims7)
|
expect7 = np.max(x7, axis=axis7, keepdims=keep_dims7)
|
||||||
diff7 = abs(output[7].asnumpy() - expect7)
|
diff7 = abs(output[7].asnumpy() - expect7)
|
||||||
|
|
|
@ -180,88 +180,88 @@ def test_ReduceMean():
|
||||||
diff0 = abs(output[0].asnumpy() - expect0)
|
diff0 = abs(output[0].asnumpy() - expect0)
|
||||||
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output[0].shape() == expect0.shape)
|
assert output[0].shape() == expect0.shape
|
||||||
|
|
||||||
expect1 = np.mean(x1, axis=axis1, keepdims=keep_dims1)
|
expect1 = np.mean(x1, axis=axis1, keepdims=keep_dims1)
|
||||||
diff1 = abs(output[1].asnumpy() - expect1)
|
diff1 = abs(output[1].asnumpy() - expect1)
|
||||||
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output[1].shape() == expect1.shape)
|
assert output[1].shape() == expect1.shape
|
||||||
|
|
||||||
expect2 = np.mean(x2, axis=axis2, keepdims=keep_dims2)
|
expect2 = np.mean(x2, axis=axis2, keepdims=keep_dims2)
|
||||||
diff2 = abs(output[2].asnumpy() - expect2)
|
diff2 = abs(output[2].asnumpy() - expect2)
|
||||||
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
||||||
assert np.all(diff2 < error2)
|
assert np.all(diff2 < error2)
|
||||||
assert (output[2].shape() == expect2.shape)
|
assert output[2].shape() == expect2.shape
|
||||||
|
|
||||||
expect3 = np.mean(x3, axis=axis3, keepdims=keep_dims3)
|
expect3 = np.mean(x3, axis=axis3, keepdims=keep_dims3)
|
||||||
diff3 = abs(output[3].asnumpy() - expect3)
|
diff3 = abs(output[3].asnumpy() - expect3)
|
||||||
error3 = np.ones(shape=expect3.shape) * 1.0e-5
|
error3 = np.ones(shape=expect3.shape) * 1.0e-5
|
||||||
assert np.all(diff3 < error3)
|
assert np.all(diff3 < error3)
|
||||||
assert (output[3].shape() == expect3.shape)
|
assert output[3].shape() == expect3.shape
|
||||||
|
|
||||||
expect4 = np.mean(x4, axis=axis4, keepdims=keep_dims4)
|
expect4 = np.mean(x4, axis=axis4, keepdims=keep_dims4)
|
||||||
diff4 = abs(output[4].asnumpy() - expect4)
|
diff4 = abs(output[4].asnumpy() - expect4)
|
||||||
error4 = np.ones(shape=expect4.shape) * 1.0e-5
|
error4 = np.ones(shape=expect4.shape) * 1.0e-5
|
||||||
assert np.all(diff4 < error4)
|
assert np.all(diff4 < error4)
|
||||||
assert (output[4].shape() == expect4.shape)
|
assert output[4].shape() == expect4.shape
|
||||||
|
|
||||||
expect5 = np.mean(x5, axis=axis5, keepdims=keep_dims5)
|
expect5 = np.mean(x5, axis=axis5, keepdims=keep_dims5)
|
||||||
diff5 = abs(output[5].asnumpy() - expect5)
|
diff5 = abs(output[5].asnumpy() - expect5)
|
||||||
error5 = np.ones(shape=expect5.shape) * 1.0e-5
|
error5 = np.ones(shape=expect5.shape) * 1.0e-5
|
||||||
assert np.all(diff5 < error5)
|
assert np.all(diff5 < error5)
|
||||||
assert (output[5].shape() == expect5.shape)
|
assert output[5].shape() == expect5.shape
|
||||||
|
|
||||||
expect6 = np.mean(x6, axis=axis6, keepdims=keep_dims6)
|
expect6 = np.mean(x6, axis=axis6, keepdims=keep_dims6)
|
||||||
diff6 = abs(output[6].asnumpy() - expect6)
|
diff6 = abs(output[6].asnumpy() - expect6)
|
||||||
error6 = np.ones(shape=expect6.shape) * 1.0e-5
|
error6 = np.ones(shape=expect6.shape) * 1.0e-5
|
||||||
assert np.all(diff6 < error6)
|
assert np.all(diff6 < error6)
|
||||||
assert (output[6].shape() == expect6.shape)
|
assert output[6].shape() == expect6.shape
|
||||||
|
|
||||||
expect7 = np.mean(x7, axis=axis7, keepdims=keep_dims7)
|
expect7 = np.mean(x7, axis=axis7, keepdims=keep_dims7)
|
||||||
diff7 = abs(output[7].asnumpy() - expect7)
|
diff7 = abs(output[7].asnumpy() - expect7)
|
||||||
error7 = np.ones(shape=expect7.shape) * 1.0e-5
|
error7 = np.ones(shape=expect7.shape) * 1.0e-5
|
||||||
assert np.all(diff7 < error7)
|
assert np.all(diff7 < error7)
|
||||||
assert (output[7].shape() == expect7.shape)
|
assert output[7].shape() == expect7.shape
|
||||||
|
|
||||||
expect8 = np.mean(x8, axis=axis8, keepdims=keep_dims8)
|
expect8 = np.mean(x8, axis=axis8, keepdims=keep_dims8)
|
||||||
diff8 = abs(output[8].asnumpy() - expect8)
|
diff8 = abs(output[8].asnumpy() - expect8)
|
||||||
error8 = np.ones(shape=expect8.shape) * 1.0e-5
|
error8 = np.ones(shape=expect8.shape) * 1.0e-5
|
||||||
assert np.all(diff8 < error8)
|
assert np.all(diff8 < error8)
|
||||||
assert (output[8].shape() == expect8.shape)
|
assert output[8].shape() == expect8.shape
|
||||||
|
|
||||||
expect9 = np.mean(x9, axis=axis9, keepdims=keep_dims9)
|
expect9 = np.mean(x9, axis=axis9, keepdims=keep_dims9)
|
||||||
diff9 = abs(output[9].asnumpy() - expect9)
|
diff9 = abs(output[9].asnumpy() - expect9)
|
||||||
error9 = np.ones(shape=expect9.shape) * 1.0e-5
|
error9 = np.ones(shape=expect9.shape) * 1.0e-5
|
||||||
assert np.all(diff9 < error9)
|
assert np.all(diff9 < error9)
|
||||||
assert (output[9].shape() == expect9.shape)
|
assert output[9].shape() == expect9.shape
|
||||||
|
|
||||||
expect10 = np.mean(x10, axis=axis10, keepdims=keep_dims10)
|
expect10 = np.mean(x10, axis=axis10, keepdims=keep_dims10)
|
||||||
diff10 = abs(output[10].asnumpy() - expect10)
|
diff10 = abs(output[10].asnumpy() - expect10)
|
||||||
error10 = np.ones(shape=expect10.shape) * 1.0e-5
|
error10 = np.ones(shape=expect10.shape) * 1.0e-5
|
||||||
assert np.all(diff10 < error10)
|
assert np.all(diff10 < error10)
|
||||||
assert (output[10].shape() == expect10.shape)
|
assert output[10].shape() == expect10.shape
|
||||||
|
|
||||||
expect11 = np.mean(x11, axis=axis11, keepdims=keep_dims11)
|
expect11 = np.mean(x11, axis=axis11, keepdims=keep_dims11)
|
||||||
diff11 = abs(output[11].asnumpy() - expect11)
|
diff11 = abs(output[11].asnumpy() - expect11)
|
||||||
error11 = np.ones(shape=expect11.shape) * 1.0e-5
|
error11 = np.ones(shape=expect11.shape) * 1.0e-5
|
||||||
assert np.all(diff11 < error11)
|
assert np.all(diff11 < error11)
|
||||||
assert (output[11].shape() == expect11.shape)
|
assert output[11].shape() == expect11.shape
|
||||||
|
|
||||||
expect12 = np.mean(x12, axis=axis12, keepdims=keep_dims12)
|
expect12 = np.mean(x12, axis=axis12, keepdims=keep_dims12)
|
||||||
diff12 = abs(output[12].asnumpy() - expect12)
|
diff12 = abs(output[12].asnumpy() - expect12)
|
||||||
error12 = np.ones(shape=expect12.shape) * 1.0e-5
|
error12 = np.ones(shape=expect12.shape) * 1.0e-5
|
||||||
assert np.all(diff12 < error12)
|
assert np.all(diff12 < error12)
|
||||||
assert (output[12].shape() == expect12.shape)
|
assert output[12].shape() == expect12.shape
|
||||||
|
|
||||||
expect13 = np.mean(x13, axis=axis13, keepdims=keep_dims13)
|
expect13 = np.mean(x13, axis=axis13, keepdims=keep_dims13)
|
||||||
diff13 = abs(output[13].asnumpy() - expect13)
|
diff13 = abs(output[13].asnumpy() - expect13)
|
||||||
error13 = np.ones(shape=expect13.shape) * 1.0e-5
|
error13 = np.ones(shape=expect13.shape) * 1.0e-5
|
||||||
assert np.all(diff13 < error13)
|
assert np.all(diff13 < error13)
|
||||||
assert (output[13].shape() == expect13.shape)
|
assert output[13].shape() == expect13.shape
|
||||||
|
|
||||||
expect14 = np.mean(x14, axis=np_axis14, keepdims=keep_dims14)
|
expect14 = np.mean(x14, axis=np_axis14, keepdims=keep_dims14)
|
||||||
diff14 = abs(output[14].asnumpy() - expect14)
|
diff14 = abs(output[14].asnumpy() - expect14)
|
||||||
error14 = np.ones(shape=expect14.shape) * 1.0e-5
|
error14 = np.ones(shape=expect14.shape) * 1.0e-5
|
||||||
assert np.all(diff14 < error14)
|
assert np.all(diff14 < error14)
|
||||||
assert (output[14].shape() == expect14.shape)
|
assert output[14].shape() == expect14.shape
|
||||||
|
|
|
@ -19,7 +19,6 @@ import pytest
|
||||||
import mindspore.context as context
|
import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.ops import operations as P
|
|
||||||
from mindspore.ops.operations import _grad_ops as G
|
from mindspore.ops.operations import _grad_ops as G
|
||||||
|
|
||||||
|
|
||||||
|
@ -42,9 +41,9 @@ def test_relu6_grad():
|
||||||
dy = Tensor(np.array([[[[1, 1, 1],
|
dy = Tensor(np.array([[[[1, 1, 1],
|
||||||
[1, 1, 1],
|
[1, 1, 1],
|
||||||
[1, 1, 1]]]]).astype(np.float32))
|
[1, 1, 1]]]]).astype(np.float32))
|
||||||
expect = np.array([[[[0, 1, 0, ],
|
expect = np.array([[[[0, 1, 0,],
|
||||||
[1, 0, 0, ],
|
[1, 0, 0,],
|
||||||
[0, 1, 0, ]]]]).astype(np.float32)
|
[0, 1, 0,]]]]).astype(np.float32)
|
||||||
error = np.ones(shape=[3, 3]) * 1.0e-6
|
error = np.ones(shape=[3, 3]) * 1.0e-6
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
|
|
|
@ -38,8 +38,8 @@ def test_relu6():
|
||||||
x = Tensor(np.array([[[[-1, 1, 10],
|
x = Tensor(np.array([[[[-1, 1, 10],
|
||||||
[5.9, 6.1, 6],
|
[5.9, 6.1, 6],
|
||||||
[10, 1, -1]]]]).astype(np.float32))
|
[10, 1, -1]]]]).astype(np.float32))
|
||||||
expect = np.array([[[[0, 1, 6, ],
|
expect = np.array([[[[0, 1, 6,],
|
||||||
[5.9, 6, 6, ],
|
[5.9, 6, 6,],
|
||||||
[6, 1, 0.]]]]).astype(np.float32)
|
[6, 1, 0.]]]]).astype(np.float32)
|
||||||
|
|
||||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
||||||
|
|
|
@ -38,8 +38,8 @@ def test_relu():
|
||||||
x = Tensor(np.array([[[[-1, 1, 10],
|
x = Tensor(np.array([[[[-1, 1, 10],
|
||||||
[1, -1, 1],
|
[1, -1, 1],
|
||||||
[10, 1, -1]]]]).astype(np.float32))
|
[10, 1, -1]]]]).astype(np.float32))
|
||||||
expect = np.array([[[[0, 1, 10, ],
|
expect = np.array([[[[0, 1, 10,],
|
||||||
[1, 0, 1, ],
|
[1, 0, 1,],
|
||||||
[10, 1, 0.]]]]).astype(np.float32)
|
[10, 1, 0.]]]]).astype(np.float32)
|
||||||
|
|
||||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
||||||
|
|
|
@ -27,8 +27,8 @@ class Net(nn.Cell):
|
||||||
super(Net, self).__init__()
|
super(Net, self).__init__()
|
||||||
self.select = P.Select()
|
self.select = P.Select()
|
||||||
|
|
||||||
def construct(self, cond, x, y):
|
def construct(self, cond, input_x, input_y):
|
||||||
return self.select(cond, x, y)
|
return self.select(cond, input_x, input_y)
|
||||||
|
|
||||||
|
|
||||||
cond = np.array([[True, False], [True, False]]).astype(np.bool)
|
cond = np.array([[True, False], [True, False]]).astype(np.bool)
|
||||||
|
|
|
@ -41,8 +41,8 @@ def test_slice():
|
||||||
[[4., -4., 4.]]]
|
[[4., -4., 4.]]]
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
slice = Slice()
|
slice_op = Slice()
|
||||||
output = slice(x)
|
output = slice_op(x)
|
||||||
assert (output.asnumpy() == expect).all()
|
assert (output.asnumpy() == expect).all()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@ import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.common.api import ms_function
|
from mindspore.common.api import ms_function
|
||||||
from mindspore.ops import operations as P
|
|
||||||
from mindspore.ops.operations import _grad_ops as G
|
from mindspore.ops.operations import _grad_ops as G
|
||||||
|
|
||||||
context.set_context(device_target='GPU')
|
context.set_context(device_target='GPU')
|
||||||
|
|
|
@ -19,10 +19,6 @@ import pytest
|
||||||
import mindspore.context as context
|
import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.common.api import ms_function
|
|
||||||
from mindspore.common.initializer import initializer
|
|
||||||
from mindspore.common.parameter import Parameter
|
|
||||||
|
|
||||||
|
|
||||||
class NetSoftmaxCrossEntropyWithLogits(nn.Cell):
|
class NetSoftmaxCrossEntropyWithLogits(nn.Cell):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|
|
@ -116,7 +116,7 @@ def test_softmax_4d():
|
||||||
[-1.5992336e-01, -5.9647644e-01, 1.2957820e+00, -1.0650631e-01, 7.0879894e-01],
|
[-1.5992336e-01, -5.9647644e-01, 1.2957820e+00, -1.0650631e-01, 7.0879894e-01],
|
||||||
[4.1372257e-01, 3.6408889e-01, -6.3091749e-01, 1.0573713e+00, 1.0981073e+00],
|
[4.1372257e-01, 3.6408889e-01, -6.3091749e-01, 1.0573713e+00, 1.0981073e+00],
|
||||||
[-1.9162457e-01, 3.6392561e-05, -1.8338780e-01, 1.7549801e+00, -9.3534666e-01]]]]).astype(
|
[-1.9162457e-01, 3.6392561e-05, -1.8338780e-01, 1.7549801e+00, -9.3534666e-01]]]]).astype(
|
||||||
np.float32)
|
np.float32)
|
||||||
|
|
||||||
dy = np.array([[[[2.98213929e-01, 3.10518718e+00, -1.64306939e-01, -7.33681679e-01, 5.23136854e-02],
|
dy = np.array([[[[2.98213929e-01, 3.10518718e+00, -1.64306939e-01, -7.33681679e-01, 5.23136854e-02],
|
||||||
[-3.47142726e-01, -1.52662742e+00, 5.26977003e-01, 5.29672280e-02, -4.34386432e-01],
|
[-3.47142726e-01, -1.52662742e+00, 5.26977003e-01, 5.29672280e-02, -4.34386432e-01],
|
||||||
|
@ -142,7 +142,7 @@ def test_softmax_4d():
|
||||||
[4.14457440e-01, -8.74118507e-01, -4.21902031e-01, 7.87168801e-01, -1.48280108e+00],
|
[4.14457440e-01, -8.74118507e-01, -4.21902031e-01, 7.87168801e-01, -1.48280108e+00],
|
||||||
[1.42688036e+00, -2.02695489e+00, 9.26816165e-01, 9.37691629e-01, 7.85577714e-01],
|
[1.42688036e+00, -2.02695489e+00, 9.26816165e-01, 9.37691629e-01, 7.85577714e-01],
|
||||||
[-6.59893751e-01, 1.14681525e-02, -5.79456389e-01, -1.65206456e+00, 4.37116653e-01]]]]).astype(
|
[-6.59893751e-01, 1.14681525e-02, -5.79456389e-01, -1.65206456e+00, 4.37116653e-01]]]]).astype(
|
||||||
np.float32)
|
np.float32)
|
||||||
|
|
||||||
expect_x = np.array([[[[0.21919312, 0.3903627, 0.12594244, 0.07031325, 0.19418849],
|
expect_x = np.array([[[[0.21919312, 0.3903627, 0.12594244, 0.07031325, 0.19418849],
|
||||||
[0.19778392, 0.36304963, 0.16719443, 0.1646197, 0.10735231],
|
[0.19778392, 0.36304963, 0.16719443, 0.1646197, 0.10735231],
|
||||||
|
|
|
@ -19,8 +19,6 @@ import pytest
|
||||||
import mindspore.context as context
|
import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.common.api import ms_function
|
|
||||||
|
|
||||||
|
|
||||||
class NetSparseSoftmaxCrossEntropyWithLogits(nn.Cell):
|
class NetSparseSoftmaxCrossEntropyWithLogits(nn.Cell):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|
|
@ -17,7 +17,6 @@ import numpy as np
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
import mindspore.context as context
|
import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.ops import operations as P
|
from mindspore.ops import operations as P
|
||||||
|
|
||||||
|
|
|
@ -76,19 +76,19 @@ def test_Sub():
|
||||||
output4 = sub(x4, y4)
|
output4 = sub(x4, y4)
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
diff2 = output2.asnumpy() - expect2
|
diff2 = output2.asnumpy() - expect2
|
||||||
assert np.all(diff2 < error2)
|
assert np.all(diff2 < error2)
|
||||||
assert (output2.shape() == expect2.shape)
|
assert output2.shape() == expect2.shape
|
||||||
diff3 = output3.asnumpy() - expect3
|
diff3 = output3.asnumpy() - expect3
|
||||||
assert np.all(diff3 < error3)
|
assert np.all(diff3 < error3)
|
||||||
assert (output3.shape() == expect3.shape)
|
assert output3.shape() == expect3.shape
|
||||||
diff4 = output4.asnumpy() - expect4
|
diff4 = output4.asnumpy() - expect4
|
||||||
assert np.all(diff4 < error4)
|
assert np.all(diff4 < error4)
|
||||||
assert (output4.shape() == expect4.shape)
|
assert output4.shape() == expect4.shape
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
sub = Net()
|
sub = Net()
|
||||||
|
@ -99,16 +99,16 @@ def test_Sub():
|
||||||
output4 = sub(x4, y4)
|
output4 = sub(x4, y4)
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
diff2 = output2.asnumpy() - expect2
|
diff2 = output2.asnumpy() - expect2
|
||||||
assert np.all(diff2 < error2)
|
assert np.all(diff2 < error2)
|
||||||
assert (output2.shape() == expect2.shape)
|
assert output2.shape() == expect2.shape
|
||||||
diff3 = output3.asnumpy() - expect3
|
diff3 = output3.asnumpy() - expect3
|
||||||
assert np.all(diff3 < error3)
|
assert np.all(diff3 < error3)
|
||||||
assert (output3.shape() == expect3.shape)
|
assert output3.shape() == expect3.shape
|
||||||
diff4 = output4.asnumpy() - expect4
|
diff4 = output4.asnumpy() - expect4
|
||||||
assert np.all(diff4 < error4)
|
assert np.all(diff4 < error4)
|
||||||
assert (output4.shape() == expect4.shape)
|
assert output4.shape() == expect4.shape
|
||||||
|
|
|
@ -65,16 +65,16 @@ def test_tile():
|
||||||
diff0 = output[0].asnumpy() - expect0
|
diff0 = output[0].asnumpy() - expect0
|
||||||
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output[0].shape() == expect0.shape)
|
assert output[0].shape() == expect0.shape
|
||||||
|
|
||||||
expect1 = np.tile(input_x1, mul1)
|
expect1 = np.tile(input_x1, mul1)
|
||||||
diff1 = output[1].asnumpy() - expect1
|
diff1 = output[1].asnumpy() - expect1
|
||||||
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output[1].shape() == expect1.shape)
|
assert output[1].shape() == expect1.shape
|
||||||
|
|
||||||
expect2 = np.tile(input_x2, mul2)
|
expect2 = np.tile(input_x2, mul2)
|
||||||
diff2 = output[2].asnumpy() - expect2
|
diff2 = output[2].asnumpy() - expect2
|
||||||
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
error2 = np.ones(shape=expect2.shape) * 1.0e-5
|
||||||
assert np.all(diff2 < error2)
|
assert np.all(diff2 < error2)
|
||||||
assert (output[2].shape() == expect2.shape)
|
assert output[2].shape() == expect2.shape
|
||||||
|
|
|
@ -20,9 +20,6 @@ import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.common import dtype as mstype
|
from mindspore.common import dtype as mstype
|
||||||
from mindspore.common.api import ms_function
|
|
||||||
from mindspore.common.initializer import initializer
|
|
||||||
from mindspore.common.parameter import Parameter
|
|
||||||
from mindspore.ops import operations as P
|
from mindspore.ops import operations as P
|
||||||
|
|
||||||
context.set_context(device_target='GPU')
|
context.set_context(device_target='GPU')
|
||||||
|
|
|
@ -19,9 +19,6 @@ import pytest
|
||||||
import mindspore.context as context
|
import mindspore.context as context
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore.common.api import ms_function
|
|
||||||
from mindspore.common.initializer import initializer
|
|
||||||
from mindspore.common.parameter import Parameter
|
|
||||||
from mindspore.ops import operations as P
|
from mindspore.ops import operations as P
|
||||||
|
|
||||||
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
|
||||||
|
@ -53,14 +50,14 @@ def test_ZerosLike():
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
|
|
||||||
output1 = zeros_like(x1)
|
output1 = zeros_like(x1)
|
||||||
expect1 = np.zeros_like(x1_np)
|
expect1 = np.zeros_like(x1_np)
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
||||||
zeros_like = NetZerosLike()
|
zeros_like = NetZerosLike()
|
||||||
|
@ -69,11 +66,11 @@ def test_ZerosLike():
|
||||||
diff0 = output0.asnumpy() - expect0
|
diff0 = output0.asnumpy() - expect0
|
||||||
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
error0 = np.ones(shape=expect0.shape) * 1.0e-5
|
||||||
assert np.all(diff0 < error0)
|
assert np.all(diff0 < error0)
|
||||||
assert (output0.shape() == expect0.shape)
|
assert output0.shape() == expect0.shape
|
||||||
|
|
||||||
output1 = zeros_like(x1)
|
output1 = zeros_like(x1)
|
||||||
expect1 = np.zeros_like(x1_np)
|
expect1 = np.zeros_like(x1_np)
|
||||||
diff1 = output1.asnumpy() - expect1
|
diff1 = output1.asnumpy() - expect1
|
||||||
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
error1 = np.ones(shape=expect1.shape) * 1.0e-5
|
||||||
assert np.all(diff1 < error1)
|
assert np.all(diff1 < error1)
|
||||||
assert (output1.shape() == expect1.shape)
|
assert output1.shape() == expect1.shape
|
||||||
|
|
Loading…
Reference in New Issue