Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix exponential and gamma logp / random link #4576

Merged
merged 1 commit into from
Apr 4, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 32 additions & 28 deletions pymc3/distributions/continuous.py
Original file line number Diff line number Diff line change
Expand Up @@ -1377,49 +1377,53 @@ class Exponential(PositiveContinuous):
@classmethod
def dist(cls, lam, *args, **kwargs):
lam = at.as_tensor_variable(floatX(lam))
# mean = 1.0 / lam
# median = mean * at.log(2)
# mode = at.zeros_like(lam)

# variance = lam ** -2

assert_negative_support(lam, "lam", "Exponential")
return super().dist([lam], **kwargs)

def logp(value, lam):
# Aesara exponential op is parametrized in terms of mu (1/lam)
return super().dist([at.inv(lam)], **kwargs)

def logp(value, mu):
"""
Calculate log-probability of Exponential distribution at specified value.

Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or aesara tensor
Value(s) for which log-probability is calculated. If the log
probabilities for multiple values are desired the values must be
provided in a numpy array or aesara tensor

Returns
-------
TensorVariable
"""
return bound(at.log(lam) - lam * value, value >= 0, lam > 0)
lam = at.inv(mu)
return bound(
at.log(lam) - lam * value,
value >= 0,
lam > 0,
)

def logcdf(value, lam):
def logcdf(value, mu):
r"""
Compute the log of cumulative distribution function for the Exponential distribution
at the specified value.

Parameters
----------
value: numeric or np.ndarray or aesara.tensor
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or aesara tensor.
Value(s) for which log CDF is calculated. If the log CDF for
multiple values are desired the values must be provided in a numpy
array or aesara tensor.

Returns
-------
TensorVariable
"""
a = lam * value
lam = at.inv(mu)
return bound(
log1mexp(a),
log1mexp(lam * value),
0 <= value,
0 <= lam,
)
Expand Down Expand Up @@ -2371,15 +2375,13 @@ def dist(cls, alpha=None, beta=None, mu=None, sigma=None, sd=None, no_assert=Fal
alpha, beta = cls.get_alpha_beta(alpha, beta, mu, sigma)
alpha = at.as_tensor_variable(floatX(alpha))
beta = at.as_tensor_variable(floatX(beta))
# mean = alpha / beta
# mode = at.maximum((alpha - 1) / beta, 0)
# variance = alpha / beta ** 2

if not no_assert:
assert_negative_support(alpha, "alpha", "Gamma")
assert_negative_support(beta, "beta", "Gamma")

return super().dist([alpha, at.inv(beta)], **kwargs)
# The Aesara `GammaRV` `Op` will invert the `beta` parameter itself
return super().dist([alpha, beta], **kwargs)

@classmethod
def get_alpha_beta(cls, alpha=None, beta=None, mu=None, sigma=None):
Expand All @@ -2397,45 +2399,47 @@ def get_alpha_beta(cls, alpha=None, beta=None, mu=None, sigma=None):

return alpha, beta

def _distr_parameters_for_repr(self):
return ["alpha", "beta"]

def logp(value, alpha, beta):
def logp(value, alpha, inv_beta):
"""
Calculate log-probability of Gamma distribution at specified value.

Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or `TensorVariable`.
Value(s) for which log-probability is calculated. If the log
probabilities for multiple values are desired the values must be
provided in a numpy array or `TensorVariable`.

Returns
-------
TensorVariable
"""
beta = at.inv(inv_beta)
return bound(
-gammaln(alpha) + logpow(beta, alpha) - beta * value + logpow(value, alpha - 1),
value >= 0,
alpha > 0,
beta > 0,
)

def logcdf(value, alpha, beta):
def logcdf(value, alpha, inv_beta):
"""
Compute the log of the cumulative distribution function for Gamma distribution
at the specified value.

Parameters
----------
value: numeric or np.ndarray or `TensorVariable`
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or `TensorVariable`.
Value(s) for which log CDF is calculated. If the log CDF for
multiple values are desired the values must be provided in a numpy
array or `TensorVariable`.

Returns
-------
TensorVariable
"""
beta = at.inv(inv_beta)

# Avoid C-assertion when the gammainc function is called with invalid values (#4340)
safe_alpha = at.switch(at.lt(alpha, 0), 0, alpha)
safe_beta = at.switch(at.lt(beta, 0), 0, beta)
Expand Down
59 changes: 36 additions & 23 deletions pymc3/tests/test_distributions_random.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import aesara
import numpy as np
import numpy.random as nr
import numpy.testing as npt
import pytest
import scipy.stats as st

Expand All @@ -32,7 +33,7 @@
from pymc3.distributions.dist_math import clipped_beta_rvs
from pymc3.distributions.shape_utils import to_tuple
from pymc3.exceptions import ShapeError
from pymc3.tests.helpers import SeededTest
from pymc3.tests.helpers import SeededTest, select_by_precision
from pymc3.tests.test_distributions import (
Domain,
I,
Expand Down Expand Up @@ -626,13 +627,6 @@ def ref_rand(size, alpha, beta):

pymc3_random(pm.Beta, {"alpha": Rplus, "beta": Rplus}, ref_rand=ref_rand)

@pytest.mark.skip(reason="This test is covered by Aesara")
def test_exponential(self):
def ref_rand(size, lam):
return nr.exponential(scale=1.0 / lam, size=size)

pymc3_random(pm.Exponential, {"lam": Rplus}, ref_rand=ref_rand)

@pytest.mark.xfail(reason="This distribution has not been refactored for v4")
def test_laplace(self):
def ref_rand(size, mu, b):
Expand Down Expand Up @@ -680,20 +674,6 @@ def ref_rand(size, beta):

pymc3_random(pm.HalfCauchy, {"beta": Rplusbig}, ref_rand=ref_rand)

@pytest.mark.skip(reason="This test is covered by Aesara")
def test_gamma_alpha_beta(self):
def ref_rand(size, alpha, beta):
return st.gamma.rvs(alpha, scale=1.0 / beta, size=size)

pymc3_random(pm.Gamma, {"alpha": Rplusbig, "beta": Rplusbig}, ref_rand=ref_rand)

@pytest.mark.skip(reason="This test is covered by Aesara")
def test_gamma_mu_sigma(self):
def ref_rand(size, mu, sigma):
return st.gamma.rvs(mu ** 2 / sigma ** 2, scale=sigma ** 2 / mu, size=size)

pymc3_random(pm.Gamma, {"mu": Rplusbig, "sigma": Rplusbig}, ref_rand=ref_rand)

@pytest.mark.skip(reason="This test is covered by Aesara")
def test_inverse_gamma(self):
def ref_rand(size, alpha, beta):
Expand Down Expand Up @@ -1787,7 +1767,7 @@ def test_issue_3758(self):

for var in "bcd":
std = np.std(samples[var] - samples["a"])
np.testing.assert_allclose(std, 1, rtol=1e-2)
npt.assert_allclose(std, 1, rtol=1e-2)

def test_issue_3829(self):
with pm.Model() as model:
Expand Down Expand Up @@ -1884,3 +1864,36 @@ def test_with_cov_rv(self, sample_shape, dist_shape, mu_shape):
prior = pm.sample_prior_predictive(samples=sample_shape)

assert prior["mv"].shape == to_tuple(sample_shape) + dist_shape


def test_exponential_parameterization():
test_lambda = floatX(10.0)

exp_pymc = pm.Exponential.dist(lam=test_lambda)
(rv_scale,) = exp_pymc.owner.inputs[3:]

npt.assert_almost_equal(rv_scale.eval(), 1 / test_lambda)


def test_gamma_parameterization():

test_alpha = floatX(10.0)
test_beta = floatX(100.0)

gamma_pymc = pm.Gamma.dist(alpha=test_alpha, beta=test_beta)
rv_alpha, rv_inv_beta = gamma_pymc.owner.inputs[3:]

assert np.array_equal(rv_alpha.eval(), test_alpha)

decimal = select_by_precision(float64=6, float32=3)

npt.assert_almost_equal(rv_inv_beta.eval(), 1.0 / test_beta, decimal)

test_mu = test_alpha / test_beta
test_sigma = np.sqrt(test_mu / test_beta)

gamma_pymc = pm.Gamma.dist(mu=test_mu, sigma=test_sigma)
rv_alpha, rv_inv_beta = gamma_pymc.owner.inputs[3:]

npt.assert_almost_equal(rv_alpha.eval(), test_alpha, decimal)
npt.assert_almost_equal(rv_inv_beta.eval(), 1.0 / test_beta, decimal)