!23061 modify test example of probability

Merge pull request !23061 from byweng/master
This commit is contained in:
i-robot 2021-09-08 07:17:06 +00:00 committed by Gitee
commit 76e8e01467
11 changed files with 34 additions and 32 deletions

View File

@ -32,9 +32,12 @@ class ELBO(nn.Cell):
def construct(self, *inputs, **kwargs):
if len(inputs) >= 2:
x, y = inputs[0], inputs[1]
else:
elif len(inputs) >= 1:
x = inputs[0]
y = None
else:
x = None
y = None
z, log_prob_z = self.variational(x, None, y)
_, log_prob_x_, _, log_prob_z_ = self.generator(x, z, y)

View File

@ -32,7 +32,7 @@ class Net(nn.Cell):
def construct(self, x):
return self.det_triangle(x)
@pytest.mark.level1
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_1D():

View File

@ -20,7 +20,7 @@ import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
from mindspore.dataset.vision import Inter
from mindspore.common import dtype as mstype
from mindspore import dtype as mstype
def create_dataset(data_path, batch_size=32, repeat_size=1,

View File

@ -20,7 +20,7 @@ from mindspore.common.initializer import TruncatedNormal
import mindspore.nn as nn
from mindspore.nn import TrainOneStepCell
from mindspore.nn.probability import bnn_layers
from mindspore.ops import operations as P
import mindspore.ops as ops
from mindspore import context
from dataset import create_dataset
@ -71,7 +71,7 @@ class BNNLeNet5(nn.Cell):
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
self.reshape = P.Reshape()
self.reshape = ops.Reshape()
def construct(self, x):
x = self.conv1(x)
@ -97,7 +97,7 @@ def train_model(train_net, net, dataset):
label = Tensor(data['label'].astype(np.int32))
loss = train_net(train_x, label)
output = net(train_x)
log_output = P.LogSoftmax(axis=1)(output)
log_output = ops.LogSoftmax(axis=1)(output)
acc = np.mean(log_output.asnumpy().argmax(axis=1) == label.asnumpy())
accs.append(acc)
loss_sum += loss.asnumpy()
@ -113,7 +113,7 @@ def validate_model(net, dataset):
train_x = Tensor(data['image'].astype(np.float32))
label = Tensor(data['label'].astype(np.int32))
output = net(train_x)
log_output = P.LogSoftmax(axis=1)(output)
log_output = ops.LogSoftmax(axis=1)(output)
acc = np.mean(log_output.asnumpy().argmax(axis=1) == label.asnumpy())
accs.append(acc)

View File

@ -14,12 +14,12 @@
# ============================================================================
import os
import mindspore.common.dtype as mstype
from mindspore import dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as CV
import mindspore.nn as nn
from mindspore import context, Tensor
from mindspore.ops import operations as P
import mindspore.ops as ops
from mindspore.nn.probability.dpn import ConditionalVAE
from mindspore.nn.probability.infer import ELBO, SVI
@ -34,7 +34,7 @@ class Encoder(nn.Cell):
self.fc1 = nn.Dense(1024 + num_classes, 400)
self.relu = nn.ReLU()
self.flatten = nn.Flatten()
self.concat = P.Concat(axis=1)
self.concat = ops.Concat(axis=1)
self.one_hot = nn.OneHot(depth=num_classes)
def construct(self, x, y):
@ -51,7 +51,7 @@ class Decoder(nn.Cell):
super(Decoder, self).__init__()
self.fc2 = nn.Dense(400, 1024)
self.sigmoid = nn.Sigmoid()
self.reshape = P.Reshape()
self.reshape = ops.Reshape()
def construct(self, z):
z = self.fc2(z)

View File

@ -14,12 +14,12 @@
# ============================================================================
import os
import mindspore.common.dtype as mstype
from mindspore import dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as CV
import mindspore.nn as nn
from mindspore import context, Tensor
from mindspore.ops import operations as P
import mindspore.ops as ops
from mindspore.nn.probability.dpn import VAE
from mindspore.nn.probability.infer import ELBO, SVI
@ -50,7 +50,7 @@ class Decoder(nn.Cell):
super(Decoder, self).__init__()
self.fc1 = nn.Dense(400, 1024)
self.sigmoid = nn.Sigmoid()
self.reshape = P.Reshape()
self.reshape = ops.Reshape()
def construct(self, z):
z = self.fc1(z)

View File

@ -21,8 +21,7 @@ import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as CV
import mindspore.nn as nn
from mindspore import context
from mindspore.ops import operations as P
from mindspore.ops import composite as C
import mindspore.ops as ops
from mindspore.nn.probability.dpn import VAE
from mindspore.nn.probability.infer import ELBO, SVI
@ -51,7 +50,7 @@ class Decoder(nn.Cell):
self.fc1 = nn.Dense(400, 1024)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.reshape = P.Reshape()
self.reshape = ops.Reshape()
def construct(self, z):
z = self.fc1(z)
@ -93,9 +92,9 @@ class VaeGan(nn.Cell):
self.D = Discriminator()
self.dense = nn.Dense(20, 400)
self.vae = VAE(self.E, self.G, 400, 20)
self.shape = P.Shape()
self.normal = C.normal
self.to_tensor = P.ScalarToArray()
self.shape = ops.Shape()
self.normal = ops.normal
self.to_tensor = ops.ScalarToArray()
def construct(self, x):
recon_x, x, mu, std = self.vae(x)
@ -111,7 +110,7 @@ class VaeGan(nn.Cell):
class VaeGanLoss(ELBO):
def __init__(self):
super(VaeGanLoss, self).__init__()
self.zeros = P.ZerosLike()
self.zeros = ops.ZerosLike()
self.mse = nn.MSELoss(reduction='sum')
def construct(self, data, label):

View File

@ -18,11 +18,11 @@ import mindspore.dataset.transforms.c_transforms as C
import mindspore.dataset.vision.c_transforms as CV
import mindspore.nn as nn
from mindspore import context, Tensor
from mindspore.common import dtype as mstype
from mindspore import dtype as mstype
from mindspore.common.initializer import TruncatedNormal
from mindspore.dataset.vision import Inter
from mindspore.nn.probability.toolbox.uncertainty_evaluation import UncertaintyEvaluation
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train import load_checkpoint, load_param_into_net
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")

View File

@ -20,7 +20,7 @@ import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
from mindspore.dataset.vision import Inter
from mindspore.common import dtype as mstype
from mindspore import dtype as mstype
def create_dataset(data_path, batch_size=32, repeat_size=1,

View File

@ -20,7 +20,7 @@ from mindspore.common.initializer import TruncatedNormal
import mindspore.nn as nn
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.nn.probability import transforms, bnn_layers
from mindspore.ops import operations as P
import mindspore.ops as ops
from mindspore import context
from dataset import create_dataset
@ -72,7 +72,7 @@ class LeNet5(nn.Cell):
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
self.reshape = P.Reshape()
self.reshape = ops.Reshape()
def construct(self, x):
x = self.conv1(x)
@ -98,7 +98,7 @@ def train_model(train_net, net, dataset):
label = Tensor(data['label'].astype(np.int32))
loss = train_net(train_x, label)
output = net(train_x)
log_output = P.LogSoftmax(axis=1)(output)
log_output = ops.LogSoftmax(axis=1)(output)
acc = np.mean(log_output.asnumpy().argmax(axis=1) == label.asnumpy())
accs.append(acc)
loss_sum += loss.asnumpy()
@ -114,7 +114,7 @@ def validate_model(net, dataset):
train_x = Tensor(data['image'].astype(np.float32))
label = Tensor(data['label'].astype(np.int32))
output = net(train_x)
log_output = P.LogSoftmax(axis=1)(output)
log_output = ops.LogSoftmax(axis=1)(output)
acc = np.mean(log_output.asnumpy().argmax(axis=1) == label.asnumpy())
accs.append(acc)

View File

@ -19,7 +19,7 @@ from mindspore.common.initializer import TruncatedNormal
import mindspore.nn as nn
from mindspore.nn import WithLossCell, TrainOneStepCell
from mindspore.nn.probability import transforms
from mindspore.ops import operations as P
import mindspore.ops as ops
from mindspore import context
from dataset import create_dataset
@ -71,7 +71,7 @@ class LeNet5(nn.Cell):
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
self.reshape = P.Reshape()
self.reshape = ops.Reshape()
def construct(self, x):
x = self.conv1(x)
@ -97,7 +97,7 @@ def train_model(train_net, net, dataset):
label = Tensor(data['label'].astype(np.int32))
loss = train_net(train_x, label)
output = net(train_x)
log_output = P.LogSoftmax(axis=1)(output)
log_output = ops.LogSoftmax(axis=1)(output)
acc = np.mean(log_output.asnumpy().argmax(axis=1) == label.asnumpy())
accs.append(acc)
loss_sum += loss.asnumpy()
@ -113,7 +113,7 @@ def validate_model(net, dataset):
train_x = Tensor(data['image'].astype(np.float32))
label = Tensor(data['label'].astype(np.int32))
output = net(train_x)
log_output = P.LogSoftmax(axis=1)(output)
log_output = ops.LogSoftmax(axis=1)(output)
acc = np.mean(log_output.asnumpy().argmax(axis=1) == label.asnumpy())
accs.append(acc)