fix the api comments

This commit is contained in:
zhangxinfeng3 2020-08-21 10:19:54 +08:00
parent 11e670c54b
commit 7b0dea9184
5 changed files with 17 additions and 16 deletions

View File

@ -22,15 +22,16 @@ from ....layer.basic import Dense, OneHot
class ConditionalVAE(Cell):
r"""
Conditional Variational auto-encoder (CVAE).
Conditional Variational Auto-Encoder (CVAE).
The difference with VAE is that CVAE uses labels information.
see more details in `<http://papers.nips.cc/paper/5775-learning-structured-output-representation-using-deep-
conditional-generative-models>`.
see more details in `Learning Structured Output Representation using Deep Conditional Generative Models
<http://papers.nips.cc/paper/5775-learning-structured-output-representation-using-deep-conditional-
generative-models>`_.
Note:
When define the encoder and decoder, the shape of the encoder's output tensor and decoder's input tensor
should be math:`(N, hidden_size)`.
should be :math:`(N, hidden_size)`.
The latent_size should be less than or equal to the hidden_size.
Args:
@ -42,7 +43,7 @@ class ConditionalVAE(Cell):
Inputs:
- **input_x** (Tensor) - the same shape as the input of encoder.
- **input_y** (Tensor) - the tensor of the target data, the shape is math:`(N, 1)`.
- **input_y** (Tensor) - the tensor of the target data, the shape is :math:`(N, 1)`.
Outputs:
- **output** (tuple) - (recon_x(Tensor), x(Tensor), mu(Tensor), std(Tensor)).
@ -100,13 +101,13 @@ class ConditionalVAE(Cell):
Args:
sample_y (Tensor): Define the label of sample, int tensor.
generate_nums (int): The number of samples to generate.
shape(tuple): The shape of sample, it should be math:`(generate_nums, C, H, W)` or math:`(-1, C, H, W)`.
shape(tuple): The shape of sample, it should be (generate_nums, C, H, W) or (-1, C, H, W).
Returns:
Tensor, the generated sample.
"""
generate_nums = check_int_positive(generate_nums)
if not isinstance(shape, tuple) or len(shape) != 4 or shape[0] != generate_nums or shape[0] != -1:
if not isinstance(shape, tuple) or len(shape) != 4 or (shape[0] != -1 and shape[0] != generate_nums):
raise ValueError('The shape should be (generate_nums, C, H, W) or (-1, C, H, W).')
sample_z = self.normal((generate_nums, self.latent_size), self.to_tensor(0.0), self.to_tensor(1.0), seed=0)
sample_y = self.one_hot(sample_y)

View File

@ -22,14 +22,14 @@ from ....layer.basic import Dense
class VAE(Cell):
r"""
Variational auto-encoder (VAE).
Variational Auto-Encoder (VAE).
The VAE defines a generative model, `Z` is sampled from the prior, then used to reconstruct `X` by a decoder.
see more details in `Auto-Encoding Variational Bayes<https://arxiv.org/abs/1312.6114>`_.
see more details in `Auto-Encoding Variational Bayes <https://arxiv.org/abs/1312.6114>`_.
Note:
When define the encoder and decoder, the shape of the encoder's output tensor and decoder's input tensor
should be math:`(N, hidden_size)`.
should be :math:`(N, hidden_size)`.
The latent_size should be less than or equal to the hidden_size.
Args:
@ -88,13 +88,13 @@ class VAE(Cell):
Args:
generate_nums (int): The number of samples to generate.
shape(tuple): The shape of sample, it should be math:`(generate_nums, C, H, W)` or math:`(-1, C, H, W)`.
shape(tuple): The shape of sample, it should be (generate_nums, C, H, W) or (-1, C, H, W).
Returns:
Tensor, the generated sample.
"""
generate_nums = check_int_positive(generate_nums)
if not isinstance(shape, tuple) or len(shape) != 4 or shape[0] != generate_nums or shape[0] != -1:
if not isinstance(shape, tuple) or len(shape) != 4 or (shape[0] != -1 and shape[0] != generate_nums):
raise ValueError('The shape should be (generate_nums, C, H, W) or (-1, C, H, W).')
sample_z = self.normal((generate_nums, self.latent_size), self.to_tensor(0.0), self.to_tensor(1.0), seed=0)
sample = self._decode(sample_z)

View File

@ -27,7 +27,7 @@ class ELBO(Cell):
the posterior distribution. It maximizes the evidence lower bound (ELBO), a lower bound on the logarithm of
the marginal probability of the observations log p(x). The ELBO is equal to the negative KL divergence up to
an additive constant.
see more details in `Variational Inference: A Review for Statisticians<https://arxiv.org/abs/1601.00670>`_.
see more details in `Variational Inference: A Review for Statisticians <https://arxiv.org/abs/1601.00670>`_.
Args:
latent_prior(str): The prior distribution of latent space. Default: Normal.

View File

@ -28,7 +28,7 @@ class SVI:
Variational inference casts the inference problem as an optimization. Some distributions over the hidden
variables that is indexed by a set of free parameters, and then optimize the parameters to make it closest to
the posterior of interest.
see more details in `Variational Inference: A Review for Statisticians<https://arxiv.org/abs/1601.00670>`_.
see more details in `Variational Inference: A Review for Statisticians <https://arxiv.org/abs/1601.00670>`_.
Args:
net_with_loss(Cell): Cell with loss function.

View File

@ -219,7 +219,7 @@ class EpistemicUncertaintyModel(Cell):
after Dense layer or Conv layer, then use dropout during train and eval time.
See more details in `Dropout as a Bayesian Approximation: Representing Model uncertainty in Deep Learning
<https://arxiv.org/abs/1506.02142>`.
<https://arxiv.org/abs/1506.02142>`_.
"""
def __init__(self, epi_model):
@ -257,7 +257,7 @@ class AleatoricUncertaintyModel(Cell):
uncertainty, the loss function should be modified in order to add variance into loss.
See more details in `What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?
<https://arxiv.org/abs/1703.04977>`.
<https://arxiv.org/abs/1703.04977>`_.
"""
def __init__(self, ale_model, num_classes, task):