forked from mindspore-Ecosystem/mindspore
!5789 Add private interface specification in distribution docs
Merge pull request !5789 from XunDeng/pp_issue_branch
This commit is contained in:
commit
d27de69dfa
|
@ -33,7 +33,7 @@ class Bernoulli(Distribution):
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
probs should be proper probabilities (0 < p < 1).
|
probs should be proper probabilities (0 < p < 1).
|
||||||
Dist_spec_args is probs.
|
dist_spec_args is probs.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> # To initialize a Bernoulli distribution of prob 0.5
|
>>> # To initialize a Bernoulli distribution of prob 0.5
|
||||||
|
@ -57,32 +57,50 @@ class Bernoulli(Distribution):
|
||||||
>>> # All the following calls in construct are valid
|
>>> # All the following calls in construct are valid
|
||||||
>>> def construct(self, value, probs_b, probs_a):
|
>>> def construct(self, value, probs_b, probs_a):
|
||||||
>>>
|
>>>
|
||||||
|
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||||
|
>>> # 'prob', 'log_prob', 'cdf', 'log_cdf', 'survival_function', 'log_survival', have the form:
|
||||||
|
>>> # Args:
|
||||||
|
>>> # value (Tensor): value to be evaluated.
|
||||||
|
>>> # probs1 (Tensor): probability of success. Default: self.probs.
|
||||||
|
>>>
|
||||||
|
>>> # Example of prob.
|
||||||
>>> # Similar calls can be made to other probability functions
|
>>> # Similar calls can be made to other probability functions
|
||||||
>>> # by replacing 'prob' with the name of the function
|
>>> # by replacing 'prob' with the name of the function
|
||||||
>>> ans = self.b1.prob(value)
|
>>> ans = self.b1.prob(value)
|
||||||
>>> # Evaluate with the respect to distribution b
|
>>> # Evaluate with the respect to distribution b
|
||||||
>>> ans = self.b1.prob(value, probs_b)
|
>>> ans = self.b1.prob(value, probs_b)
|
||||||
>>>
|
|
||||||
>>> # probs must be passed in during function calls
|
>>> # probs must be passed in during function calls
|
||||||
>>> ans = self.b2.prob(value, probs_a)
|
>>> ans = self.b2.prob(value, probs_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Functions 'sd', 'var', 'entropy' have the same usage as 'mean'
|
|
||||||
>>> # Will return 0.5
|
|
||||||
>>> ans = self.b1.mean()
|
|
||||||
>>> # Will return probs_b
|
|
||||||
>>> ans = self.b1.mean(probs_b)
|
|
||||||
>>>
|
>>>
|
||||||
|
>>> # Functions 'sd', 'var', 'entropy' have the same args.
|
||||||
|
>>> # Args:
|
||||||
|
>>> # probs1 (Tensor): probability of success. Default: self.probs.
|
||||||
|
>>>
|
||||||
|
>>> # Example of mean. sd, var have similar usage.
|
||||||
|
>>> ans = self.b1.mean() # return 0.5
|
||||||
|
>>> ans = self.b1.mean(probs_b) # return probs_b
|
||||||
>>> # probs must be passed in during function calls
|
>>> # probs must be passed in during function calls
|
||||||
>>> ans = self.b2.mean(probs_a)
|
>>> ans = self.b2.mean(probs_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Usage of 'kl_loss' and 'cross_entropy' are similar
|
>>>
|
||||||
|
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are similar:
|
||||||
|
>>> # Args:
|
||||||
|
>>> # dist (str): name of the distribution. Only 'Bernoulli' is supported.
|
||||||
|
>>> # probs1_b (Tensor): probability of success of distribution b.
|
||||||
|
>>> # probs1_a (Tensor): probability of success of distribution a. Default: self.probs.
|
||||||
|
>>>
|
||||||
|
>>> # Example of kl_loss (cross_entropy is similar):
|
||||||
>>> ans = self.b1.kl_loss('Bernoulli', probs_b)
|
>>> ans = self.b1.kl_loss('Bernoulli', probs_b)
|
||||||
>>> ans = self.b1.kl_loss('Bernoulli', probs_b, probs_a)
|
>>> ans = self.b1.kl_loss('Bernoulli', probs_b, probs_a)
|
||||||
>>>
|
>>> # Additional probs_a must be passed in
|
||||||
>>> # Additional probs_a must be passed in through
|
|
||||||
>>> ans = self.b2.kl_loss('Bernoulli', probs_b, probs_a)
|
>>> ans = self.b2.kl_loss('Bernoulli', probs_b, probs_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Sample
|
>>>
|
||||||
|
>>> # sample
|
||||||
|
>>> # Args:
|
||||||
|
>>> # shape (tuple): shape of the sample. Default: ()
|
||||||
|
>>> # probs1 (Tensor): probability of success. Default: self.probs.
|
||||||
>>> ans = self.b1.sample()
|
>>> ans = self.b1.sample()
|
||||||
>>> ans = self.b1.sample((2,3))
|
>>> ans = self.b1.sample((2,3))
|
||||||
>>> ans = self.b1.sample((2,3), probs_b)
|
>>> ans = self.b1.sample((2,3), probs_b)
|
||||||
|
|
|
@ -34,7 +34,8 @@ class Exponential(Distribution):
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
rate should be strictly greater than 0.
|
rate should be strictly greater than 0.
|
||||||
Dist_spec_args is rate.
|
dist_spec_args is rate.
|
||||||
|
dtype should be float type because Exponential distributions are continuous.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> # To initialize an Exponential distribution of rate 0.5
|
>>> # To initialize an Exponential distribution of rate 0.5
|
||||||
|
@ -58,32 +59,50 @@ class Exponential(Distribution):
|
||||||
>>> # All the following calls in construct are valid
|
>>> # All the following calls in construct are valid
|
||||||
>>> def construct(self, value, rate_b, rate_a):
|
>>> def construct(self, value, rate_b, rate_a):
|
||||||
>>>
|
>>>
|
||||||
|
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||||
|
>>> # 'prob', 'log_prob', 'cdf', 'log_cdf', 'survival_function', 'log_survival', have the form:
|
||||||
|
>>> # Args:
|
||||||
|
>>> # value (Tensor): value to be evaluated.
|
||||||
|
>>> # rate (Tensor): rate of the distribution. Default: self.rate.
|
||||||
|
>>>
|
||||||
|
>>> # Example of prob.
|
||||||
>>> # Similar calls can be made to other probability functions
|
>>> # Similar calls can be made to other probability functions
|
||||||
>>> # by replacing 'prob' with the name of the function
|
>>> # by replacing 'prob' with the name of the function
|
||||||
>>> ans = self.e1.prob(value)
|
>>> ans = self.e1.prob(value)
|
||||||
>>> # Evaluate with the respect to distribution b
|
>>> # Evaluate with the respect to distribution b
|
||||||
>>> ans = self.e1.prob(value, rate_b)
|
>>> ans = self.e1.prob(value, rate_b)
|
||||||
>>>
|
|
||||||
>>> # Rate must be passed in during function calls
|
>>> # Rate must be passed in during function calls
|
||||||
>>> ans = self.e2.prob(value, rate_a)
|
>>> ans = self.e2.prob(value, rate_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Functions 'sd', 'var', 'entropy' have the same usage as'mean'
|
|
||||||
>>> # Will return 2
|
|
||||||
>>> ans = self.e1.mean()
|
|
||||||
>>> # Will return 1 / rate_b
|
|
||||||
>>> ans = self.e1.mean(rate_b)
|
|
||||||
>>>
|
>>>
|
||||||
|
>>> # Functions 'sd', 'var', 'entropy' have the same args.
|
||||||
|
>>> # Args:
|
||||||
|
>>> # rate (Tensor): rate of the distribution. Default: self.rate.
|
||||||
|
>>>
|
||||||
|
>>> # Example of mean. sd, var have similar usage.
|
||||||
|
>>> ans = self.e1.mean() # return 2
|
||||||
|
>>> ans = self.e1.mean(rate_b) # return 1 / rate_b
|
||||||
>>> # Rate must be passed in during function calls
|
>>> # Rate must be passed in during function calls
|
||||||
>>> ans = self.e2.mean(rate_a)
|
>>> ans = self.e2.mean(rate_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Usage of 'kl_loss' and 'cross_entropy' are similar
|
>>>
|
||||||
|
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are similar:
|
||||||
|
>>> # Args:
|
||||||
|
>>> # dist (str): name of the distribution. Only 'Exponential' is supported.
|
||||||
|
>>> # rate_b (Tensor): rate of distribution b.
|
||||||
|
>>> # rate_a (Tensor): rate of distribution a. Default: self.rate.
|
||||||
|
>>>
|
||||||
|
>>> # Example of kl_loss (cross_entropy is similar):
|
||||||
>>> ans = self.e1.kl_loss('Exponential', rate_b)
|
>>> ans = self.e1.kl_loss('Exponential', rate_b)
|
||||||
>>> ans = self.e1.kl_loss('Exponential', rate_b, rate_a)
|
>>> ans = self.e1.kl_loss('Exponential', rate_b, rate_a)
|
||||||
>>>
|
|
||||||
>>> # Additional rate must be passed in
|
>>> # Additional rate must be passed in
|
||||||
>>> ans = self.e2.kl_loss('Exponential', rate_b, rate_a)
|
>>> ans = self.e2.kl_loss('Exponential', rate_b, rate_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Sample
|
>>>
|
||||||
|
>>> # sample
|
||||||
|
>>> # Args:
|
||||||
|
>>> # shape (tuple): shape of the sample. Default: ()
|
||||||
|
>>> # probs1 (Tensor): rate of distribution. Default: self.rate.
|
||||||
>>> ans = self.e1.sample()
|
>>> ans = self.e1.sample()
|
||||||
>>> ans = self.e1.sample((2,3))
|
>>> ans = self.e1.sample((2,3))
|
||||||
>>> ans = self.e1.sample((2,3), rate_b)
|
>>> ans = self.e1.sample((2,3), rate_b)
|
||||||
|
|
|
@ -36,7 +36,7 @@ class Geometric(Distribution):
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
probs should be proper probabilities (0 < p < 1).
|
probs should be proper probabilities (0 < p < 1).
|
||||||
Dist_spec_args is probs.
|
dist_spec_args is probs.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> # To initialize a Geometric distribution of prob 0.5
|
>>> # To initialize a Geometric distribution of prob 0.5
|
||||||
|
@ -60,32 +60,50 @@ class Geometric(Distribution):
|
||||||
>>> # Tthe following calls are valid in construct
|
>>> # Tthe following calls are valid in construct
|
||||||
>>> def construct(self, value, probs_b, probs_a):
|
>>> def construct(self, value, probs_b, probs_a):
|
||||||
>>>
|
>>>
|
||||||
|
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||||
|
>>> # 'prob', 'log_prob', 'cdf', 'log_cdf', 'survival_function', 'log_survival', have the form:
|
||||||
|
>>> # Args:
|
||||||
|
>>> # value (Tensor): value to be evaluated.
|
||||||
|
>>> # probs1 (Tensor): probability of success of a Bernoulli trail. Default: self.probs.
|
||||||
|
>>>
|
||||||
|
>>> # Example of prob.
|
||||||
>>> # Similar calls can be made to other probability functions
|
>>> # Similar calls can be made to other probability functions
|
||||||
>>> # by replacing 'prob' with the name of the function
|
>>> # by replacing 'prob' with the name of the function
|
||||||
>>> ans = self.g1.prob(value)
|
>>> ans = self.g1.prob(value)
|
||||||
>>> # Evaluate with the respect to distribution b
|
>>> # Evaluate with the respect to distribution b
|
||||||
>>> ans = self.g1.prob(value, probs_b)
|
>>> ans = self.g1.prob(value, probs_b)
|
||||||
>>>
|
|
||||||
>>> # Probs must be passed in during function calls
|
>>> # Probs must be passed in during function calls
|
||||||
>>> ans = self.g2.prob(value, probs_a)
|
>>> ans = self.g2.prob(value, probs_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Functions 'sd', 'var', 'entropy' have the same usage as 'mean'
|
|
||||||
>>> # Will return 1.0
|
|
||||||
>>> ans = self.g1.mean()
|
|
||||||
>>> # Another possible usage
|
|
||||||
>>> ans = self.g1.mean(probs_b)
|
|
||||||
>>>
|
>>>
|
||||||
|
>>> # Functions 'sd', 'var', 'entropy' have the same args.
|
||||||
|
>>> # Args:
|
||||||
|
>>> # probs1 (Tensor): probability of success of a Bernoulli trail. Default: self.probs.
|
||||||
|
>>>
|
||||||
|
>>> # Example of mean. sd, var have similar usage.
|
||||||
|
>>> ans = self.g1.mean() # return 1.0
|
||||||
|
>>> ans = self.g1.mean(probs_b)
|
||||||
>>> # Probs must be passed in during function calls
|
>>> # Probs must be passed in during function calls
|
||||||
>>> ans = self.g2.mean(probs_a)
|
>>> ans = self.g2.mean(probs_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Usage of 'kl_loss' and 'cross_entropy' are similar
|
>>>
|
||||||
|
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are similar:
|
||||||
|
>>> # Args:
|
||||||
|
>>> # dist (str): name of the distribution. Only 'Geometric' is supported.
|
||||||
|
>>> # probs1_b (Tensor): probability of success of a Bernoulli trail of distribution b.
|
||||||
|
>>> # probs1_a (Tensor): probability of success of a Bernoulli trail of distribution a. Default: self.probs.
|
||||||
|
>>>
|
||||||
|
>>> # Example of kl_loss (cross_entropy is similar):
|
||||||
>>> ans = self.g1.kl_loss('Geometric', probs_b)
|
>>> ans = self.g1.kl_loss('Geometric', probs_b)
|
||||||
>>> ans = self.g1.kl_loss('Geometric', probs_b, probs_a)
|
>>> ans = self.g1.kl_loss('Geometric', probs_b, probs_a)
|
||||||
>>>
|
|
||||||
>>> # Additional probs must be passed in
|
>>> # Additional probs must be passed in
|
||||||
>>> ans = self.g2.kl_loss('Geometric', probs_b, probs_a)
|
>>> ans = self.g2.kl_loss('Geometric', probs_b, probs_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Sample
|
>>>
|
||||||
|
>>> # sample
|
||||||
|
>>> # Args:
|
||||||
|
>>> # shape (tuple): shape of the sample. Default: ()
|
||||||
|
>>> # probs1 (Tensor): probability of success of a Bernoulli trail. Default: self.probs.
|
||||||
>>> ans = self.g1.sample()
|
>>> ans = self.g1.sample()
|
||||||
>>> ans = self.g1.sample((2,3))
|
>>> ans = self.g1.sample((2,3))
|
||||||
>>> ans = self.g1.sample((2,3), probs_b)
|
>>> ans = self.g1.sample((2,3), probs_b)
|
||||||
|
|
|
@ -35,7 +35,8 @@ class Normal(Distribution):
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
Standard deviation should be greater than zero.
|
Standard deviation should be greater than zero.
|
||||||
Dist_spec_args are mean and sd.
|
dist_spec_args are mean and sd.
|
||||||
|
dtype should be float type because Normal distributions are continuous.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> # To initialize a Normal distribution of mean 3.0 and standard deviation 4.0
|
>>> # To initialize a Normal distribution of mean 3.0 and standard deviation 4.0
|
||||||
|
@ -59,32 +60,54 @@ class Normal(Distribution):
|
||||||
>>> # The following calls are valid in construct
|
>>> # The following calls are valid in construct
|
||||||
>>> def construct(self, value, mean_b, sd_b, mean_a, sd_a):
|
>>> def construct(self, value, mean_b, sd_b, mean_a, sd_a):
|
||||||
>>>
|
>>>
|
||||||
|
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||||
|
>>> # 'prob', 'log_prob', 'cdf', 'log_cdf', 'survival_function', 'log_survival', have the form:
|
||||||
|
>>> # Args:
|
||||||
|
>>> # value (Tensor): value to be evaluated.
|
||||||
|
>>> # mean (Tensor): mean of distribution. Default: self._mean_value.
|
||||||
|
>>> # sd (Tensor): standard deviation of distribution. Default: self._sd_value.
|
||||||
|
>>>
|
||||||
|
>>> # Example of prob.
|
||||||
>>> # Similar calls can be made to other probability functions
|
>>> # Similar calls can be made to other probability functions
|
||||||
>>> # by replacing 'prob' with the name of the function
|
>>> # by replacing 'prob' with the name of the function
|
||||||
>>> ans = self.n1.prob(value)
|
>>> ans = self.n1.prob(value)
|
||||||
>>> # Evaluate with the respect to distribution b
|
>>> # Evaluate with the respect to distribution b
|
||||||
>>> ans = self.n1.prob(value, mean_b, sd_b)
|
>>> ans = self.n1.prob(value, mean_b, sd_b)
|
||||||
>>>
|
|
||||||
>>> # mean and sd must be passed in during function calls
|
>>> # mean and sd must be passed in during function calls
|
||||||
>>> ans = self.n2.prob(value, mean_a, sd_a)
|
>>> ans = self.n2.prob(value, mean_a, sd_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Functions 'sd', 'var', 'entropy' have the same usage as 'mean'
|
|
||||||
>>> # will return [0.0]
|
|
||||||
>>> ans = self.n1.mean()
|
|
||||||
>>> # will return mean_b
|
|
||||||
>>> ans = self.n1.mean(mean_b, sd_b)
|
|
||||||
>>>
|
>>>
|
||||||
>>> # mean and sd must be passed during function calls
|
>>> # Functions 'sd', 'var', 'entropy' have the same args.
|
||||||
|
>>> # Args:
|
||||||
|
>>> # mean (Tensor): mean of distribution. Default: self._mean_value.
|
||||||
|
>>> # sd (Tensor): standard deviation of distribution. Default: self._sd_value.
|
||||||
|
>>>
|
||||||
|
>>> # Example of mean. sd, var have similar usage.
|
||||||
|
>>> ans = self.n1.mean() # return 0.0
|
||||||
|
>>> ans = self.n1.mean(mean_b, sd_b) # return mean_b
|
||||||
|
>>> # mean and sd must be passed in during function calls
|
||||||
>>> ans = self.n2.mean(mean_a, sd_a)
|
>>> ans = self.n2.mean(mean_a, sd_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Usage of 'kl_loss' and 'cross_entropy' are similar
|
>>>
|
||||||
|
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are similar:
|
||||||
|
>>> # Args:
|
||||||
|
>>> # dist (str): type of the distributions. Should be "Normal" in this case.
|
||||||
|
>>> # mean_b (Tensor): mean of distribution b.
|
||||||
|
>>> # sd_b (Tensor): standard deviation distribution b.
|
||||||
|
>>> # mean_a (Tensor): mean of distribution a. Default: self._mean_value.
|
||||||
|
>>> # sd_a (Tensor): standard deviation distribution a. Default: self._sd_value.
|
||||||
|
>>>
|
||||||
|
>>> # Example of kl_loss (cross_entropy is similar):
|
||||||
>>> ans = self.n1.kl_loss('Normal', mean_b, sd_b)
|
>>> ans = self.n1.kl_loss('Normal', mean_b, sd_b)
|
||||||
>>> ans = self.n1.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
|
>>> ans = self.n1.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
|
||||||
>>>
|
>>> # Additional mean and sd must be passed in
|
||||||
>>> # Additional mean and sd must be passed
|
|
||||||
>>> ans = self.n2.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
|
>>> ans = self.n2.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Sample
|
>>> # sample
|
||||||
|
>>> # Args:
|
||||||
|
>>> # shape (tuple): shape of the sample. Default: ()
|
||||||
|
>>> # mean (Tensor): mean of distribution. Default: self._mean_value.
|
||||||
|
>>> # sd (Tensor): standard deviation of distribution. Default: self._sd_value.
|
||||||
>>> ans = self.n1.sample()
|
>>> ans = self.n1.sample()
|
||||||
>>> ans = self.n1.sample((2,3))
|
>>> ans = self.n1.sample((2,3))
|
||||||
>>> ans = self.n1.sample((2,3), mean_b, sd_b)
|
>>> ans = self.n1.sample((2,3), mean_b, sd_b)
|
||||||
|
|
|
@ -34,7 +34,8 @@ class Uniform(Distribution):
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
low should be stricly less than high.
|
low should be stricly less than high.
|
||||||
Dist_spec_args are high and low.
|
dist_spec_args are high and low.
|
||||||
|
dtype should be float type because Uniform distributions are continuous.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> # To initialize a Uniform distribution of mean 3.0 and standard deviation 4.0
|
>>> # To initialize a Uniform distribution of mean 3.0 and standard deviation 4.0
|
||||||
|
@ -58,32 +59,54 @@ class Uniform(Distribution):
|
||||||
>>> # All the following calls in construct are valid
|
>>> # All the following calls in construct are valid
|
||||||
>>> def construct(self, value, low_b, high_b, low_a, high_a):
|
>>> def construct(self, value, low_b, high_b, low_a, high_a):
|
||||||
>>>
|
>>>
|
||||||
|
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||||
|
>>> # 'prob', 'log_prob', 'cdf', 'log_cdf', 'survival_function', 'log_survival', have the form:
|
||||||
|
>>> # Args:
|
||||||
|
>>> # value (Tensor): value to be evaluated.
|
||||||
|
>>> # low (Tensor): lower bound of distribution. Default: self.low.
|
||||||
|
>>> # high (Tensor): higher bound of distribution. Default: self.high.
|
||||||
|
>>>
|
||||||
|
>>> # Example of prob.
|
||||||
>>> # Similar calls can be made to other probability functions
|
>>> # Similar calls can be made to other probability functions
|
||||||
>>> # by replacing 'prob' with the name of the function
|
>>> # by replacing 'prob' with the name of the function
|
||||||
>>> ans = self.u1.prob(value)
|
>>> ans = self.u1.prob(value)
|
||||||
>>> # Evaluate with the respect to distribution b
|
>>> # Evaluate with the respect to distribution b
|
||||||
>>> ans = self.u1.prob(value, low_b, high_b)
|
>>> ans = self.u1.prob(value, low_b, high_b)
|
||||||
>>>
|
|
||||||
>>> # High and low must be passed in during function calls
|
>>> # High and low must be passed in during function calls
|
||||||
>>> ans = self.u2.prob(value, low_a, high_a)
|
>>> ans = self.u2.prob(value, low_a, high_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Functions 'sd', 'var', 'entropy' have the same usage as 'mean'
|
|
||||||
>>> # Will return 0.5
|
|
||||||
>>> ans = self.u1.mean()
|
|
||||||
>>> # Will return (low_b + high_b) / 2
|
|
||||||
>>> ans = self.u1.mean(low_b, high_b)
|
|
||||||
>>>
|
>>>
|
||||||
|
>>> # Functions 'sd', 'var', 'entropy' have the same args.
|
||||||
|
>>> # Args:
|
||||||
|
>>> # low (Tensor): lower bound of distribution. Default: self.low.
|
||||||
|
>>> # high (Tensor): higher bound of distribution. Default: self.high.
|
||||||
|
>>>
|
||||||
|
>>> # Example of mean. sd, var have similar usage.
|
||||||
|
>>> ans = self.u1.mean() # return 0.5
|
||||||
|
>>> ans = self.u1.mean(low_b, high_b) # return (low_b + high_b) / 2
|
||||||
>>> # High and low must be passed in during function calls
|
>>> # High and low must be passed in during function calls
|
||||||
>>> ans = self.u2.mean(low_a, high_a)
|
>>> ans = self.u2.mean(low_a, high_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Usage of 'kl_loss' and 'cross_entropy' are similar
|
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are similar:
|
||||||
|
>>> # Args:
|
||||||
|
>>> # dist (str): type of the distributions. Should be "Uniform" in this case.
|
||||||
|
>>> # low_b (Tensor): lower bound of distribution b.
|
||||||
|
>>> # high_b (Tensor): upper bound of distribution b.
|
||||||
|
>>> # low_a (Tensor): lower bound of distribution a. Default: self.low.
|
||||||
|
>>> # high_a (Tensor): upper bound of distribution a. Default: self.high.
|
||||||
|
>>>
|
||||||
|
>>> # Example of kl_loss (cross_entropy is similar):
|
||||||
>>> ans = self.u1.kl_loss('Uniform', low_b, high_b)
|
>>> ans = self.u1.kl_loss('Uniform', low_b, high_b)
|
||||||
>>> ans = self.u1.kl_loss('Uniform', low_b, high_b, low_a, high_a)
|
>>> ans = self.u1.kl_loss('Uniform', low_b, high_b, low_a, high_a)
|
||||||
>>>
|
>>> # Additional high and low must be passed in
|
||||||
>>> # Additional high and low must be passed
|
|
||||||
>>> ans = self.u2.kl_loss('Uniform', low_b, high_b, low_a, high_a)
|
>>> ans = self.u2.kl_loss('Uniform', low_b, high_b, low_a, high_a)
|
||||||
>>>
|
>>>
|
||||||
>>> # Sample
|
>>>
|
||||||
|
>>> # sample
|
||||||
|
>>> # Args:
|
||||||
|
>>> # shape (tuple): shape of the sample. Default: ()
|
||||||
|
>>> # low (Tensor): lower bound of distribution. Default: self.low.
|
||||||
|
>>> # high (Tensor): higher bound of distribution. Default: self.high.
|
||||||
>>> ans = self.u1.sample()
|
>>> ans = self.u1.sample()
|
||||||
>>> ans = self.u1.sample((2,3))
|
>>> ans = self.u1.sample((2,3))
|
||||||
>>> ans = self.u1.sample((2,3), low_b, high_b)
|
>>> ans = self.u1.sample((2,3), low_b, high_b)
|
||||||
|
|
Loading…
Reference in New Issue