Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename cov_func/cov to scale_func/scale for TP/MvStudentT #6068

Merged
merged 6 commits into from
Aug 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 21 additions & 14 deletions pymc/distributions/multivariate.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,42 +352,49 @@ class MvStudentT(Continuous):
nu : tensor_like of float
Degrees of freedom, should be a positive scalar.
Sigma : tensor_like of float, optional
Covariance matrix. Use `cov` in new code.
Scale matrix. Use `scale` in new code.
mu : tensor_like of float, optional
Vector of means.
cov : tensor_like of float, optional
The covariance matrix.
scale : tensor_like of float, optional
The scale matrix.
tau : tensor_like of float, optional
The precision matrix.
chol : tensor_like of float, optional
The cholesky factor of the covariance matrix.
The cholesky factor of the scale matrix.
lower : bool, default=True
Whether the cholesky fatcor is given as a lower triangular matrix.
"""
rv_op = mv_studentt

@classmethod
def dist(cls, nu, Sigma=None, mu=None, cov=None, tau=None, chol=None, lower=True, **kwargs):
def dist(cls, nu, Sigma=None, mu=None, scale=None, tau=None, chol=None, lower=True, **kwargs):
canyon289 marked this conversation as resolved.
Show resolved Hide resolved
if kwargs.get("cov") is not None:
warnings.warn(
"Use the scale argument to specify the scale matrix."
"cov will be removed in future versions.",
FutureWarning,
)
scale = kwargs.pop("cov")
if Sigma is not None:
if cov is not None:
raise ValueError("Specify only one of cov and Sigma")
cov = Sigma
if scale is not None:
raise ValueError("Specify only one of scale and Sigma")
scale = Sigma
nu = at.as_tensor_variable(floatX(nu))
mu = at.as_tensor_variable(floatX(mu))
cov = quaddist_matrix(cov, chol, tau, lower)
scale = quaddist_matrix(scale, chol, tau, lower)
# Aesara is stricter about the shape of mu, than PyMC used to be
mu = at.broadcast_arrays(mu, cov[..., -1])[0]
mu = at.broadcast_arrays(mu, scale[..., -1])[0]

return super().dist([nu, mu, cov], **kwargs)
return super().dist([nu, mu, scale], **kwargs)

def moment(rv, size, nu, mu, cov):
def moment(rv, size, nu, mu, scale):
moment = mu
if not rv_size_is_none(size):
moment_size = at.concatenate([size, [mu.shape[-1]]])
moment = at.full(moment_size, moment)
return moment

def logp(value, nu, mu, cov):
def logp(value, nu, mu, scale):
"""
Calculate log-probability of Multivariate Student's T distribution
at specified value.
Expand All @@ -401,7 +408,7 @@ def logp(value, nu, mu, cov):
-------
TensorVariable
"""
quaddist, logdet, ok = quaddist_parse(value, mu, cov)
quaddist, logdet, ok = quaddist_parse(value, mu, scale)
k = floatX(value.shape[-1])

norm = gammaln((nu + k) / 2.0) - gammaln(nu / 2.0) - 0.5 * k * at.log(nu * np.pi)
Expand Down
15 changes: 11 additions & 4 deletions pymc/gp/gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,8 +251,8 @@ class TP(Latent):

Parameters
----------
cov_func : None, 2D array, or instance of Covariance
The covariance function. Defaults to zero.
scale_func : None, 2D array, or instance of Covariance
The scale function. Defaults to zero.
mean_func : None, instance of Mean
The mean function. Defaults to zero.
nu : float
Expand All @@ -264,11 +264,18 @@ class TP(Latent):
Processes as Alternatives to Gaussian Processes. arXiv preprint arXiv:1402.4306.
"""

def __init__(self, *, mean_func=Zero(), cov_func=Constant(0.0), nu=None):
def __init__(self, *, mean_func=Zero(), scale_func=Constant(0.0), cov_func=None, nu=None):
if nu is None:
raise ValueError("Student's T process requires a degrees of freedom parameter, 'nu'")
if cov_func is not None:
warnings.warn(
"Use the scale_func argument to specify the scale function."
"cov_func will be removed in future versions.",
FutureWarning,
)
scale_func = cov_func
self.nu = nu
super().__init__(mean_func=mean_func, cov_func=cov_func)
super().__init__(mean_func=mean_func, cov_func=scale_func)

def __add__(self, other):
raise TypeError("Student's T processes aren't additive")
Expand Down
16 changes: 8 additions & 8 deletions pymc/tests/test_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -1068,8 +1068,8 @@ def setup_method(self):

def testTPvsLatent(self):
with pm.Model() as model:
cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])
tp = pm.gp.TP(cov_func=cov_func, nu=self.nu)
scale_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])
tp = pm.gp.TP(scale_func=scale_func, nu=self.nu)
f = tp.prior("f", self.X, reparameterize=False)
p = tp.conditional("p", self.Xnew)
assert tuple(f.shape.eval()) == (self.X.shape[0],)
Expand All @@ -1079,22 +1079,22 @@ def testTPvsLatent(self):

def testTPvsLatentReparameterized(self):
with pm.Model() as model:
cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])
tp = pm.gp.TP(cov_func=cov_func, nu=self.nu)
scale_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])
tp = pm.gp.TP(scale_func=scale_func, nu=self.nu)
f = tp.prior("f", self.X, reparameterize=True)
p = tp.conditional("p", self.Xnew)
assert tuple(f.shape.eval()) == (self.X.shape[0],)
assert tuple(p.shape.eval()) == (self.Xnew.shape[0],)
chol = np.linalg.cholesky(cov_func(self.X).eval())
chol = np.linalg.cholesky(scale_func(self.X).eval())
f_rotated = np.linalg.solve(chol, self.y)
tp_logp = model.compile_logp()({"f_rotated_": f_rotated, "p": self.pnew})
npt.assert_allclose(self.gp_latent_logp, tp_logp, atol=0, rtol=1e-2)

def testAdditiveTPRaises(self):
with pm.Model() as model:
cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])
gp1 = pm.gp.TP(cov_func=cov_func, nu=10)
gp2 = pm.gp.TP(cov_func=cov_func, nu=10)
scale_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])
gp1 = pm.gp.TP(scale_func=scale_func, nu=10)
gp2 = pm.gp.TP(scale_func=scale_func, nu=10)
with pytest.raises(Exception) as e_info:
gp1 + gp2

Expand Down