Add Gamma distribution

This commit is contained in:
peixu_ren 2020-11-15 22:14:52 -05:00
parent 0344078a9c
commit 93d4ef428f
4 changed files with 882 additions and 0 deletions

View File

@ -22,6 +22,7 @@ from .bernoulli import Bernoulli
from .categorical import Categorical from .categorical import Categorical
from .cauchy import Cauchy from .cauchy import Cauchy
from .exponential import Exponential from .exponential import Exponential
from .gamma import Gamma
from .geometric import Geometric from .geometric import Geometric
from .gumbel import Gumbel from .gumbel import Gumbel
from .logistic import Logistic from .logistic import Logistic
@ -36,6 +37,7 @@ __all__ = ['Distribution',
'Categorical', 'Categorical',
'Cauchy', 'Cauchy',
'Exponential', 'Exponential',
'Gamma',
'Geometric', 'Geometric',
'Gumbel', 'Gumbel',
'Logistic', 'Logistic',

View File

@ -0,0 +1,338 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Gamma Distribution"""
import numpy as np
from mindspore.ops import operations as P
from mindspore.ops import composite as C
import mindspore.nn as nn
from mindspore._checkparam import Validator
from mindspore.common import dtype as mstype
from .distribution import Distribution
from ._utils.utils import check_greater_zero, check_distribution_name
from ._utils.custom_ops import log_generic
class Gamma(Distribution):
"""
Gamma distribution.
Args:
concentration (int, float, list, numpy.ndarray, Tensor, Parameter): The concentration,
also know as alpha of the Gamma distribution.
rate (int, float, list, numpy.ndarray, Tensor, Parameter): The rate, also know as
beta of the Gamma distribution.
seed (int): The seed used in sampling. The global seed is used if it is None. Default: None.
dtype (mindspore.dtype): The type of the event samples. Default: mstype.float32.
name (str): The name of the distribution. Default: 'Gamma'.
Note:
`concentration` and `rate` must be greater than zero.
`dist_spec_args` are `concentration` and `rate`.
`dtype` must be a float type because Gamma distributions are continuous.
Examples:
>>> # To initialize a Gamma distribution of the concentration 3.0 and the rate 4.0.
>>> import mindspore.nn.probability.distribution as msd
>>> g = msd.Gamma(3.0, 4.0, dtype=mstype.float32)
>>>
>>> # The following creates two independent Gamma distributions.
>>> g = msd.Gamma([3.0, 3.0], [4.0, 4.0], dtype=mstype.float32)
>>>
>>> # A Gamma distribution can be initilized without arguments.
>>> # In this case, `concentration` and `rate` must be passed in through arguments.
>>> g = msd.Gamma(dtype=mstype.float32)
>>>
>>> # To use a Gamma distribution in a network.
>>> class net(Cell):
... def __init__(self):
... super(net, self).__init__():
... self.g1 = msd.Gamma(1.0, 1.0, dtype=mstype.float32)
... self.g2 = msd.Gamma(dtype=mstype.float32)
...
... # The following calls are valid in construct.
... def construct(self, value, concentration_b, rate_b, concentration_a, rate_a):
...
... # Private interfaces of probability functions corresponding to public interfaces, including
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
... # Args:
... # value (Tensor): the value to be evaluated.
... # concentration (Tensor): the concentration of the distribution. Default: self._concentration.
... # rate (Tensor): the rate of the distribution. Default: self._rate.
...
... # Examples of `prob`.
... # Similar calls can be made to other probability functions
... # by replacing 'prob' by the name of the function
... ans = self.g1.prob(value)
... # Evaluate with respect to the distribution b.
... ans = self.g1.prob(value, concentration_b, rate_b)
... # `concentration` and `rate` must be passed in during function calls
... ans = self.g2.prob(value, concentration_a, rate_a)
...
...
... # Functions `concentration`, `rate`, `mean`, `sd`, `var`, and `entropy` have the same arguments.
... # Args:
... # concentration (Tensor): the concentration of the distribution. Default: self._concentration.
... # rate (Tensor): the rate of the distribution. Default: self._rate.
...
... # Example of `concentration`, `rate`, `mean`. `sd`, `var`, and `entropy` are similar.
... ans = self.g1.concentration() # return 1.0
... ans = self.g1.concentration(concentration_b, rate_b) # return concentration_b
... # `concentration` and `rate` must be passed in during function calls.
... ans = self.g2.concentration(concentration_a, rate_a)
...
...
... # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
... # Args:
... # dist (str): the type of the distributions. Only "Gamma" is supported.
... # concentration_b (Tensor): the concentration of distribution b.
... # rate_b (Tensor): the rate of distribution b.
... # concentration_a (Tensor): the concentration of distribution a. Default: self._concentration.
... # rate_a (Tensor): the rate of distribution a. Default: self._rate.
...
... # Examples of `kl_loss`. `cross_entropy` is similar.
... ans = self.g1.kl_loss('Gamma', concentration_b, rate_b)
... ans = self.g1.kl_loss('Gamma', concentration_b, rate_b, concentration_a, rate_a)
... # Additional `concentration` and `rate` must be passed in.
... ans = self.g2.kl_loss('Gamma', concentration_b, rate_b, concentration_a, rate_a)
...
...
... # Examples of `sample`.
... # Args:
... # shape (tuple): the shape of the sample. Default: ()
... # concentration (Tensor): the concentration of the distribution. Default: self._concentration.
... # rate (Tensor): the rate of the distribution. Default: self._rate.
... ans = self.g1.sample()
... ans = self.g1.sample((2,3))
... ans = self.g1.sample((2,3), concentration_b, rate_b)
... ans = self.g2.sample((2,3), concentration_a, rate_a)
"""
def __init__(self,
concentration=None,
rate=None,
seed=None,
dtype=mstype.float32,
name="Gamma"):
"""
Constructor of Gamma.
"""
param = dict(locals())
param['param_dict'] = {'concentration': concentration, 'rate': rate}
valid_dtype = mstype.float_type
Validator.check_type_name("dtype", dtype, valid_dtype, type(self).__name__)
super(Gamma, self).__init__(seed, dtype, name, param)
self._concentration = self._add_parameter(concentration, 'concentration')
self._rate = self._add_parameter(rate, 'rate')
if self._concentration is not None:
check_greater_zero(self._concentration, "concentration")
if self._rate is not None:
check_greater_zero(self._rate, "rate")
# ops needed for the class
self.log = log_generic
self.square = P.Square()
self.sqrt = P.Sqrt()
self.squeeze = P.Squeeze(0)
self.cast = P.Cast()
self.dtypeop = P.DType()
self.fill = P.Fill()
self.shape = P.Shape()
self.select = P.Select()
self.greater = P.Greater()
self.lgamma = nn.LGamma()
self.digamma = nn.DiGamma()
self.igamma = nn.IGamma()
def extend_repr(self):
if self.is_scalar_batch:
s = f'concentration = {self._concentration}, rate = {self._rate}'
else:
s = f'batch_shape = {self._broadcast_shape}'
return s
@property
def concentration(self):
"""
Return the concentration, also know as the alpha of the Gamma distribution.
"""
return self._concentration
@property
def rate(self):
"""
Return the rate, also know as the beta of the Gamma distribution.
"""
return self._rate
def _get_dist_type(self):
return "Gamma"
def _get_dist_args(self, concentration=None, rate=None):
if concentration is not None:
self.checktensor(concentration, 'concentration')
else:
concentration = self._concentration
if rate is not None:
self.checktensor(rate, 'rate')
else:
rate = self._rate
return concentration, rate
def _mean(self, concentration=None, rate=None):
"""
The mean of the distribution.
"""
concentration, rate = self._check_param_type(concentration, rate)
return concentration / rate
def _var(self, concentration=None, rate=None):
"""
The variance of the distribution.
"""
concentration, rate = self._check_param_type(concentration, rate)
return concentration / self.square(rate)
def _sd(self, concentration=None, rate=None):
"""
The standard deviation of the distribution.
"""
concentration, rate = self._check_param_type(concentration, rate)
return self.sqrt(concentration) / rate
def _mode(self, concentration=None, rate=None):
"""
The mode of the distribution.
"""
concentration, rate = self._check_param_type(concentration, rate)
mode = (concentration - 1.) / rate
nan = self.fill(self.dtypeop(concentration), self.shape(concentration), np.nan)
comp = self.greater(concentration, 1.)
return self.select(comp, mode, nan)
def _entropy(self, concentration=None, rate=None):
r"""
Evaluate entropy.
.. math::
H(X) = \alpha - \log(\beta) + \log(\Gamma(\alpha)) + (1 - \alpha) * \digamma(\alpha)
"""
concentration, rate = self._check_param_type(concentration, rate)
return concentration - self.log(rate) + self.lgamma(concentration) \
+ (1. - concentration) * self.digamma(concentration)
def _cross_entropy(self, dist, concentration_b, rate_b, concentration=None, rate=None):
r"""
Evaluate cross entropy between Gamma distributions.
Args:
dist (str): Type of the distributions. Should be "Gamma" in this case.
concentration_b (Tensor): concentration of distribution b.
rate_b (Tensor): rate of distribution b.
concentration_a (Tensor): concentration of distribution a. Default: self._concentration.
rate_a (Tensor): rate of distribution a. Default: self._rate.
"""
check_distribution_name(dist, 'Gamma')
return self._entropy(concentration, rate) + self._kl_loss(dist, concentration_b, rate_b, concentration, rate)
def _log_prob(self, value, concentration=None, rate=None):
r"""
Evaluate log probability.
Args:
value (Tensor): The value to be evaluated.
concentration (Tensor): The concentration of the distribution. Default: self._concentration.
rate (Tensor): The rate the distribution. Default: self._rate.
.. math::
L(x) = (\alpha - 1) * \log(x) - \beta * x - \log(\gamma(\alpha)) - \alpha * \log(\beta)
"""
value = self._check_value(value, 'value')
value = self.cast(value, self.dtype)
concentration, rate = self._check_param_type(concentration, rate)
unnormalized_log_prob = (concentration - 1.) * self.log(value) - rate * value
log_normalization = self.lgamma(concentration) - concentration * self.log(rate)
return unnormalized_log_prob - log_normalization
def _cdf(self, value, concentration=None, rate=None):
r"""
Evaluate the cumulative distribution function on the given value. Note that igamma returns
the regularized incomplete gamma function, which is what we want for the CDF.
Args:
value (Tensor): The value to be evaluated.
concentration (Tensor): The concentration of the distribution. Default: self._concentration.
rate (Tensor): The rate the distribution. Default: self._rate.
.. math::
cdf(x) = \igamma(\alpha, \beta * x)
"""
value = self._check_value(value, 'value')
value = self.cast(value, self.dtype)
concentration, rate = self._check_param_type(concentration, rate)
return self.igamma(concentration, rate * value)
def _kl_loss(self, dist, concentration_b, rate_b, concentration=None, rate=None):
r"""
Evaluate Gamma-Gamma KL divergence, i.e. KL(a||b).
Args:
dist (str): The type of the distributions. Should be "Gamma" in this case.
concentration_b (Tensor): The concentration of distribution b.
rate_b (Tensor): The rate distribution b.
concentration_a (Tensor): The concentration of distribution a. Default: self._concentration.
rate_a (Tensor): The rate distribution a. Default: self._rate.
.. math::
KL(a||b) = (\alpha_{a} - \alpha_{b}) * \digamma(\alpha_{a}) + \log(\gamma(\alpha_{b}))
- \log(\gamma(\alpha_{a})) + \alpha_{b} * \log(\beta{a}) - \alpha_{b} * \log(\beta{b})
+ \alpha_{a} * \frac{\beta{b}}{\beta{a} - 1}
"""
check_distribution_name(dist, 'Gamma')
concentration_b = self._check_value(concentration_b, 'concentration_b')
rate_b = self._check_value(rate_b, 'rate_b')
concentration_b = self.cast(concentration_b, self.parameter_type)
rate_b = self.cast(rate_b, self.parameter_type)
concentration_a, rate_a = self._check_param_type(concentration, rate)
return (concentration_a - concentration_b) * self.digamma(concentration_a) \
+ self.lgamma(concentration_b) - self.lgamma(concentration_a) \
+ concentration_b * self.log(rate_a) - concentration_b * self.log(rate_b) \
+ concentration_a * (rate_b / rate_a - 1.)
def _sample(self, shape=(), concentration=None, rate=None):
"""
Sampling.
Args:
shape (tuple): The shape of the sample. Default: ().
concentration (Tensor): The concentration of the samples. Default: self._concentration.
rate (Tensor): The rate of the samples. Default: self._rate.
Returns:
Tensor, with the shape being shape + batch_shape.
"""
shape = self.checktuple(shape, 'shape')
concentration, rate = self._check_param_type(concentration, rate)
batch_shape = self.shape(concentration + rate)
origin_shape = shape + batch_shape
if origin_shape == ():
sample_shape = (1,)
else:
sample_shape = origin_shape
sample_gamma = C.gamma(sample_shape, concentration, rate, self.seed)
value = self.cast(sample_gamma, self.dtype)
if origin_shape == ():
value = self.squeeze(value)
return value

View File

@ -0,0 +1,328 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test cases for Gamma distribution"""
import numpy as np
from scipy import stats
from scipy import special
import mindspore.context as context
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import Tensor
from mindspore import dtype
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Prob(nn.Cell):
"""
Test class: probability of Gamma distribution.
"""
def __init__(self):
super(Prob, self).__init__()
self.g = msd.Gamma(np.array([3.0]), np.array([1.0]), dtype=dtype.float32)
def construct(self, x_):
return self.g.prob(x_)
def test_pdf():
"""
Test pdf.
"""
gamma_benchmark = stats.gamma(np.array([3.0]))
expect_pdf = gamma_benchmark.pdf([1.0, 2.0]).astype(np.float32)
pdf = Prob()
output = pdf(Tensor([1.0, 2.0], dtype=dtype.float32))
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_pdf) < tol).all()
class LogProb(nn.Cell):
"""
Test class: log probability of Gamma distribution.
"""
def __init__(self):
super(LogProb, self).__init__()
self.g = msd.Gamma(np.array([3.0]), np.array([1.0]), dtype=dtype.float32)
def construct(self, x_):
return self.g.log_prob(x_)
def test_log_likelihood():
"""
Test log_pdf.
"""
gamma_benchmark = stats.gamma(np.array([3.0]))
expect_logpdf = gamma_benchmark.logpdf([1.0, 2.0]).astype(np.float32)
logprob = LogProb()
output = logprob(Tensor([1.0, 2.0], dtype=dtype.float32))
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logpdf) < tol).all()
class KL(nn.Cell):
"""
Test class: kl_loss of Gamma distribution.
"""
def __init__(self):
super(KL, self).__init__()
self.g = msd.Gamma(np.array([3.0]), np.array([4.0]), dtype=dtype.float32)
def construct(self, x_, y_):
return self.g.kl_loss('Gamma', x_, y_)
def test_kl_loss():
"""
Test kl_loss.
"""
concentration_a = np.array([3.0]).astype(np.float32)
rate_a = np.array([4.0]).astype(np.float32)
concentration_b = np.array([1.0]).astype(np.float32)
rate_b = np.array([1.0]).astype(np.float32)
expect_kl_loss = (concentration_a - concentration_b) * special.digamma(concentration_a) \
+ special.gammaln(concentration_b) - special.gammaln(concentration_a) \
+ concentration_b * np.log(rate_a) - concentration_b * np.log(rate_b) \
+ concentration_a * (rate_b / rate_a - 1.)
kl_loss = KL()
concentration = Tensor(concentration_b, dtype=dtype.float32)
rate = Tensor(rate_b, dtype=dtype.float32)
output = kl_loss(concentration, rate)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_kl_loss) < tol).all()
class Basics(nn.Cell):
"""
Test class: mean/sd/mode of Gamma distribution.
"""
def __init__(self):
super(Basics, self).__init__()
self.g = msd.Gamma(np.array([3.0]), np.array([1.0]), dtype=dtype.float32)
def construct(self):
return self.g.mean(), self.g.sd(), self.g.mode()
def test_basics():
"""
Test mean/standard deviation/mode.
"""
basics = Basics()
mean, sd, mode = basics()
gamma_benchmark = stats.gamma(np.array([3.0]))
expect_mean = gamma_benchmark.mean().astype(np.float32)
expect_sd = gamma_benchmark.std().astype(np.float32)
expect_mode = [2.0]
tol = 1e-6
assert (np.abs(mean.asnumpy() - expect_mean) < tol).all()
assert (np.abs(mode.asnumpy() - expect_mode) < tol).all()
assert (np.abs(sd.asnumpy() - expect_sd) < tol).all()
class Sampling(nn.Cell):
"""
Test class: sample of Gamma distribution.
"""
def __init__(self, shape, seed=0):
super(Sampling, self).__init__()
self.g = msd.Gamma(np.array([3.0]), np.array([1.0]), seed=seed, dtype=dtype.float32)
self.shape = shape
def construct(self, concentration=None, rate=None):
return self.g.sample(self.shape, concentration, rate)
def test_sample():
"""
Test sample.
"""
shape = (2, 3)
seed = 10
concentration = Tensor([2.0], dtype=dtype.float32)
rate = Tensor([2.0, 2.0, 2.0], dtype=dtype.float32)
sample = Sampling(shape, seed=seed)
output = sample(concentration, rate)
assert output.shape == (2, 3, 3)
class CDF(nn.Cell):
"""
Test class: cdf of Gamma distribution.
"""
def __init__(self):
super(CDF, self).__init__()
self.g = msd.Gamma(np.array([3.0]), np.array([1.0]), dtype=dtype.float32)
def construct(self, x_):
return self.g.cdf(x_)
def test_cdf():
"""
Test cdf.
"""
gamma_benchmark = stats.gamma(np.array([3.0]))
expect_cdf = gamma_benchmark.cdf([2.0]).astype(np.float32)
cdf = CDF()
output = cdf(Tensor([2.0], dtype=dtype.float32))
tol = 2e-5
assert (np.abs(output.asnumpy() - expect_cdf) < tol).all()
class LogCDF(nn.Cell):
"""
Test class: log_cdf of Mormal distribution.
"""
def __init__(self):
super(LogCDF, self).__init__()
self.g = msd.Gamma(np.array([3.0]), np.array([1.0]), dtype=dtype.float32)
def construct(self, x_):
return self.g.log_cdf(x_)
def test_log_cdf():
"""
Test log cdf.
"""
gamma_benchmark = stats.gamma(np.array([3.0]))
expect_logcdf = gamma_benchmark.logcdf([2.0]).astype(np.float32)
logcdf = LogCDF()
output = logcdf(Tensor([2.0], dtype=dtype.float32))
tol = 5e-5
assert (np.abs(output.asnumpy() - expect_logcdf) < tol).all()
class SF(nn.Cell):
"""
Test class: survival function of Gamma distribution.
"""
def __init__(self):
super(SF, self).__init__()
self.g = msd.Gamma(np.array([3.0]), np.array([1.0]), dtype=dtype.float32)
def construct(self, x_):
return self.g.survival_function(x_)
def test_survival():
"""
Test log_survival.
"""
gamma_benchmark = stats.gamma(np.array([3.0]))
expect_survival = gamma_benchmark.sf([2.0]).astype(np.float32)
survival_function = SF()
output = survival_function(Tensor([2.0], dtype=dtype.float32))
tol = 2e-5
assert (np.abs(output.asnumpy() - expect_survival) < tol).all()
class LogSF(nn.Cell):
"""
Test class: log survival function of Gamma distribution.
"""
def __init__(self):
super(LogSF, self).__init__()
self.g = msd.Gamma(np.array([3.0]), np.array([1.0]), dtype=dtype.float32)
def construct(self, x_):
return self.g.log_survival(x_)
def test_log_survival():
"""
Test log_survival.
"""
gamma_benchmark = stats.gamma(np.array([3.0]))
expect_log_survival = gamma_benchmark.logsf([2.0]).astype(np.float32)
log_survival = LogSF()
output = log_survival(Tensor([2.0], dtype=dtype.float32))
tol = 2e-5
assert (np.abs(output.asnumpy() - expect_log_survival) < tol).all()
class EntropyH(nn.Cell):
"""
Test class: entropy of Gamma distribution.
"""
def __init__(self):
super(EntropyH, self).__init__()
self.g = msd.Gamma(np.array([3.0]), np.array([1.0]), dtype=dtype.float32)
def construct(self):
return self.g.entropy()
def test_entropy():
"""
Test entropy.
"""
gamma_benchmark = stats.gamma(np.array([3.0]))
expect_entropy = gamma_benchmark.entropy().astype(np.float32)
entropy = EntropyH()
output = entropy()
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_entropy) < tol).all()
class CrossEntropy(nn.Cell):
"""
Test class: cross entropy between Gamma distributions.
"""
def __init__(self):
super(CrossEntropy, self).__init__()
self.g = msd.Gamma(np.array([3.0]), np.array([1.0]), dtype=dtype.float32)
def construct(self, x_, y_):
entropy = self.g.entropy()
kl_loss = self.g.kl_loss('Gamma', x_, y_)
h_sum_kl = entropy + kl_loss
cross_entropy = self.g.cross_entropy('Gamma', x_, y_)
return h_sum_kl - cross_entropy
def test_cross_entropy():
"""
Test cross_entropy.
"""
cross_entropy = CrossEntropy()
concentration = Tensor([3.0], dtype=dtype.float32)
rate = Tensor([2.0], dtype=dtype.float32)
diff = cross_entropy(concentration, rate)
tol = 1e-6
assert (np.abs(diff.asnumpy() - np.zeros(diff.shape)) < tol).all()
class Net(nn.Cell):
"""
Test class: expand single distribution instance to multiple graphs
by specifying the attributes.
"""
def __init__(self):
super(Net, self).__init__()
self.Gamma = msd.Gamma(np.array([3.0]), np.array([1.0]), dtype=dtype.float32)
def construct(self, x_, y_):
kl = self.Gamma.kl_loss('Gamma', x_, y_)
prob = self.Gamma.prob(kl)
return prob
def test_multiple_graphs():
"""
Test multiple graphs case.
"""
prob = Net()
concentration_a = np.array([3.0]).astype(np.float32)
rate_a = np.array([1.0]).astype(np.float32)
concentration_b = np.array([2.0]).astype(np.float32)
rate_b = np.array([1.0]).astype(np.float32)
ans = prob(Tensor(concentration_b), Tensor(rate_b))
expect_kl_loss = (concentration_a - concentration_b) * special.digamma(concentration_a) \
+ special.gammaln(concentration_b) - special.gammaln(concentration_a) \
+ concentration_b * np.log(rate_a) - concentration_b * np.log(rate_b) \
+ concentration_a * (rate_b / rate_a - 1.)
gamma_benchmark = stats.gamma(np.array([3.0]))
expect_prob = gamma_benchmark.pdf(expect_kl_loss).astype(np.float32)
tol = 1e-6
assert (np.abs(ans.asnumpy() - expect_prob) < tol).all()

View File

@ -0,0 +1,214 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Test nn.probability.distribution.Gamma.
"""
import numpy as np
import pytest
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import dtype
from mindspore import Tensor
def test_gamma_shape_errpr():
"""
Invalid shapes.
"""
with pytest.raises(ValueError):
msd.Gamma([[2.], [1.]], [[2.], [3.], [4.]], dtype=dtype.float32)
def test_type():
with pytest.raises(TypeError):
msd.Gamma(0., 1., dtype=dtype.int32)
def test_name():
with pytest.raises(TypeError):
msd.Gamma(0., 1., name=1.0)
def test_seed():
with pytest.raises(TypeError):
msd.Gamma(0., 1., seed='seed')
def test_rate():
with pytest.raises(ValueError):
msd.Gamma(0., 0.)
with pytest.raises(ValueError):
msd.Gamma(0., -1.)
def test_arguments():
"""
args passing during initialization.
"""
g = msd.Gamma()
assert isinstance(g, msd.Distribution)
g = msd.Gamma([3.0], [4.0], dtype=dtype.float32)
assert isinstance(g, msd.Distribution)
class GammaProb(nn.Cell):
"""
Gamma distribution: initialize with concentration/rate.
"""
def __init__(self):
super(GammaProb, self).__init__()
self.gamma = msd.Gamma([3.0, 4.0], [1.0, 1.0], dtype=dtype.float32)
def construct(self, value):
prob = self.gamma.prob(value)
log_prob = self.gamma.log_prob(value)
cdf = self.gamma.cdf(value)
log_cdf = self.gamma.log_cdf(value)
sf = self.gamma.survival_function(value)
log_sf = self.gamma.log_survival(value)
return prob + log_prob + cdf + log_cdf + sf + log_sf
def test_gamma_prob():
"""
Test probability functions: passing value through construct.
"""
net = GammaProb()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
ans = net(value)
assert isinstance(ans, Tensor)
class GammaProb1(nn.Cell):
"""
Gamma distribution: initialize without concentration/rate.
"""
def __init__(self):
super(GammaProb1, self).__init__()
self.gamma = msd.Gamma()
def construct(self, value, concentration, rate):
prob = self.gamma.prob(value, concentration, rate)
log_prob = self.gamma.log_prob(value, concentration, rate)
cdf = self.gamma.cdf(value, concentration, rate)
log_cdf = self.gamma.log_cdf(value, concentration, rate)
sf = self.gamma.survival_function(value, concentration, rate)
log_sf = self.gamma.log_survival(value, concentration, rate)
return prob + log_prob + cdf + log_cdf + sf + log_sf
def test_gamma_prob1():
"""
Test probability functions: passing concentration/rate, value through construct.
"""
net = GammaProb1()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
concentration = Tensor([2.0, 3.0], dtype=dtype.float32)
rate = Tensor([1.0], dtype=dtype.float32)
ans = net(value, concentration, rate)
assert isinstance(ans, Tensor)
class GammaKl(nn.Cell):
"""
Test class: kl_loss of Gamma distribution.
"""
def __init__(self):
super(GammaKl, self).__init__()
self.g1 = msd.Gamma(np.array([3.0]), np.array([4.0]), dtype=dtype.float32)
self.g2 = msd.Gamma(dtype=dtype.float32)
def construct(self, concentration_b, rate_b, concentration_a, rate_a):
kl1 = self.g1.kl_loss('Gamma', concentration_b, rate_b)
kl2 = self.g2.kl_loss('Gamma', concentration_b, rate_b, concentration_a, rate_a)
return kl1 + kl2
def test_kl():
"""
Test kl_loss.
"""
net = GammaKl()
concentration_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
rate_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration_a = Tensor(np.array([2.0]).astype(np.float32), dtype=dtype.float32)
rate_a = Tensor(np.array([3.0]).astype(np.float32), dtype=dtype.float32)
ans = net(concentration_b, rate_b, concentration_a, rate_a)
assert isinstance(ans, Tensor)
class GammaCrossEntropy(nn.Cell):
"""
Test class: cross_entropy of Gamma distribution.
"""
def __init__(self):
super(GammaCrossEntropy, self).__init__()
self.g1 = msd.Gamma(np.array([3.0]), np.array([4.0]), dtype=dtype.float32)
self.g2 = msd.Gamma(dtype=dtype.float32)
def construct(self, concentration_b, rate_b, concentration_a, rate_a):
h1 = self.g1.cross_entropy('Gamma', concentration_b, rate_b)
h2 = self.g2.cross_entropy('Gamma', concentration_b, rate_b, concentration_a, rate_a)
return h1 + h2
def test_cross_entropy():
"""
Test cross entropy between Gamma distributions.
"""
net = GammaCrossEntropy()
concentration_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
rate_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration_a = Tensor(np.array([2.0]).astype(np.float32), dtype=dtype.float32)
rate_a = Tensor(np.array([3.0]).astype(np.float32), dtype=dtype.float32)
ans = net(concentration_b, rate_b, concentration_a, rate_a)
assert isinstance(ans, Tensor)
class GammaBasics(nn.Cell):
"""
Test class: basic mean/sd function.
"""
def __init__(self):
super(GammaBasics, self).__init__()
self.g = msd.Gamma(np.array([3.0, 4.0]), np.array([4.0, 6.0]), dtype=dtype.float32)
def construct(self):
mean = self.g.mean()
sd = self.g.sd()
mode = self.g.mode()
return mean + sd + mode
def test_bascis():
"""
Test mean/sd/mode/entropy functionality of Gamma.
"""
net = GammaBasics()
ans = net()
assert isinstance(ans, Tensor)
class GammaConstruct(nn.Cell):
"""
Gamma distribution: going through construct.
"""
def __init__(self):
super(GammaConstruct, self).__init__()
self.gamma = msd.Gamma([3.0], [4.0])
self.gamma1 = msd.Gamma()
def construct(self, value, concentration, rate):
prob = self.gamma('prob', value)
prob1 = self.gamma('prob', value, concentration, rate)
prob2 = self.gamma1('prob', value, concentration, rate)
return prob + prob1 + prob2
def test_gamma_construct():
"""
Test probability function going through construct.
"""
net = GammaConstruct()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
concentration = Tensor([0.0], dtype=dtype.float32)
rate = Tensor([1.0], dtype=dtype.float32)
ans = net(value, concentration, rate)
assert isinstance(ans, Tensor)