Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Derive logprob for hyperbolic and error transformations #6664

Merged
merged 15 commits into from
Apr 28, 2023
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
155 changes: 137 additions & 18 deletions pymc/logprob/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,28 +42,50 @@
import numpy as np
import pytensor.tensor as pt

from pytensor import scan
from pytensor.gradient import DisconnectedType, jacobian
from pytensor.graph.basic import Apply, Node, Variable
from pytensor.graph.features import AlreadyThere, Feature
from pytensor.graph.fg import FunctionGraph
from pytensor.graph.op import Op
from pytensor.graph.replace import clone_replace
from pytensor.graph.rewriting.basic import GraphRewriter, in2out, node_rewriter
from pytensor.scalar import Abs, Add, Exp, Log, Mul, Pow, Sqr, Sqrt
from pytensor.scalar import (
Abs,
Add,
Cosh,
Erf,
Erfc,
Erfcx,
Exp,
Log,
Mul,
Pow,
Sinh,
Sqr,
Sqrt,
Tanh,
)
from pytensor.scan.op import Scan
from pytensor.tensor.exceptions import NotScalarConstantError
from pytensor.tensor.math import (
abs,
add,
cosh,
erf,
erfc,
erfcx,
exp,
log,
mul,
neg,
pow,
reciprocal,
sinh,
sqr,
sqrt,
sub,
tanh,
true_div,
)
from pytensor.tensor.rewriting.basic import (
Expand Down Expand Up @@ -122,6 +144,8 @@ def remove_TransformedVariables(fgraph, node):


class RVTransform(abc.ABC):
ndim_supp = None

@abc.abstractmethod
def forward(self, value: TensorVariable, *inputs: Variable) -> TensorVariable:
"""Apply the transformation."""
Expand All @@ -135,12 +159,16 @@ def backward(

def log_jac_det(self, value: TensorVariable, *inputs) -> TensorVariable:
"""Construct the log of the absolute value of the Jacobian determinant."""
# jac = pt.reshape(
# gradient(pt.sum(self.backward(value, *inputs)), [value]), value.shape
# )
# return pt.log(pt.abs(jac))
phi_inv = self.backward(value, *inputs)
return pt.log(pt.abs(pt.nlinalg.det(pt.atleast_2d(jacobian(phi_inv, [value])[0]))))
if self.ndim_supp not in (0, 1):
raise NotImplementedError(
f"RVTransform default log_jac_det only implemented for ndim_supp in (0, 1), got {self.ndim_supp=}"
)
if self.ndim_supp == 0:
jac = pt.reshape(pt.grad(pt.sum(self.backward(value, *inputs)), [value]), value.shape)
return pt.log(pt.abs(jac))
else:
phi_inv = self.backward(value, *inputs)
return pt.log(pt.abs(pt.nlinalg.det(pt.atleast_2d(jacobian(phi_inv, [value])[0]))))


@node_rewriter(tracks=None)
Expand Down Expand Up @@ -340,7 +368,7 @@ def apply(self, fgraph: FunctionGraph):
class MeasurableTransform(MeasurableElemwise):
"""A placeholder used to specify a log-likelihood for a transformed measurable variable"""

valid_scalar_types = (Exp, Log, Add, Mul, Pow, Abs)
valid_scalar_types = (Exp, Log, Add, Mul, Pow, Abs, Sinh, Cosh, Tanh, Erf, Erfc, Erfcx)

# Cannot use `transform` as name because it would clash with the property added by
# the `TransformValuesRewrite`
Expand Down Expand Up @@ -391,7 +419,7 @@ def measurable_transform_logprob(op: MeasurableTransform, values, *inputs, **kwa
jacobian = jacobian.sum(axis=tuple(range(-ndim_supp, 0)))

# The jacobian is used to ensure a value in the supported domain was provided
return pt.switch(pt.isnan(jacobian), -np.inf, input_logprob + jacobian)
return pt.switch(pt.isnan(input_logprob + jacobian), -np.inf, input_logprob + jacobian)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we revert this change?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Potentially let me check. The reason I did that was because it meant that we return -np.inf consistently when input_logprob = nan, which is the case for some of the transforms.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay seems that isn't the case anymore and tests pass with the reverted change



@_logcdf.register(MeasurableTransform)
Expand Down Expand Up @@ -540,7 +568,7 @@ def measurable_sub_to_neg(fgraph, node):
return [pt.add(minuend, pt.neg(subtrahend))]


@node_rewriter([exp, log, add, mul, pow, abs])
@node_rewriter([exp, log, add, mul, pow, abs, sinh, cosh, tanh, erf, erfc, erfcx])
def find_measurable_transforms(fgraph: FunctionGraph, node: Node) -> Optional[List[Node]]:
"""Find measurable transformations from Elemwise operators."""

Expand Down Expand Up @@ -596,13 +624,20 @@ def find_measurable_transforms(fgraph: FunctionGraph, node: Node) -> Optional[Li
measurable_input_idx = 0
transform_inputs: Tuple[TensorVariable, ...] = (measurable_input,)
transform: RVTransform
if isinstance(scalar_op, Exp):
transform = ExpTransform()
elif isinstance(scalar_op, Log):
transform = LogTransform()
elif isinstance(scalar_op, Abs):
transform = AbsTransform()
elif isinstance(scalar_op, Pow):

transform_dict = {
Exp: ExpTransform(),
Log: LogTransform(),
Abs: AbsTransform(),
Sinh: SinhTransform(),
Cosh: CoshTransform(),
Tanh: TanhTransform(),
Erf: ErfTransform(),
Erfc: ErfcTransform(),
Erfcx: ErfcxTransform(),
}
transform = transform_dict.get(type(scalar_op), None)
if isinstance(scalar_op, Pow):
# We only allow for the base to be measurable
if measurable_input_idx != 0:
return None
Expand All @@ -619,7 +654,7 @@ def find_measurable_transforms(fgraph: FunctionGraph, node: Node) -> Optional[Li
transform = LocTransform(
transform_args_fn=lambda *inputs: inputs[-1],
)
else:
elif transform is None:
transform_inputs = (measurable_input, pt.mul(*other_inputs))
transform = ScaleTransform(
transform_args_fn=lambda *inputs: inputs[-1],
Expand Down Expand Up @@ -682,6 +717,90 @@ def find_measurable_transforms(fgraph: FunctionGraph, node: Node) -> Optional[Li
)


class SinhTransform(RVTransform):
name = "sinh"
ndim_supp = 0

def forward(self, value, *inputs):
return pt.sinh(value)

def backward(self, value, *inputs):
return pt.arcsinh(value)


class CoshTransform(RVTransform):
name = "cosh"
ndim_supp = 0

def forward(self, value, *inputs):
return pt.cosh(value)

def backward(self, value, *inputs):
return pt.arccosh(value)

# def log_jac_det(self, value, *inputs):
# return pt.log(pt.sinh(value))
LukeLB marked this conversation as resolved.
Show resolved Hide resolved


class TanhTransform(RVTransform):
name = "tanh"
ndim_supp = 0

def forward(self, value, *inputs):
return pt.tanh(value)

def backward(self, value, *inputs):
return pt.arctanh(value)


class ErfTransform(RVTransform):
name = "erf"
ndim_supp = 0

def forward(self, value, *inputs):
return pt.erf(value)

def backward(self, value, *inputs):
return pt.erfinv(value)


class ErfcTransform(RVTransform):
name = "erfc"
ndim_supp = 0

def forward(self, value, *inputs):
return pt.erfc(value)

def backward(self, value, *inputs):
return pt.erfcinv(value)


class ErfcxTransform(RVTransform):
name = "erfcx"
ndim_supp = 0

def forward(self, value, *inputs):
return pt.erfcx(value)

def backward(self, value, *inputs):
# computes the inverse of erfcx, this was adapted from
# https://tinyurl.com/4mxfd3cz
x = pt.switch(value <= 1, 1.0 / (value * pt.sqrt(np.pi)), -pt.sqrt(pt.log(value)))

def calc_delta_x(value, prior_result):
return prior_result - (pt.erfcx(prior_result) - value) / (
2 * prior_result * pt.erfcx(prior_result) - 2 / pt.sqrt(np.pi)
)

result, updates = scan(
fn=calc_delta_x,
outputs_info=pt.ones_like(x),
non_sequences=value,
n_steps=10,
)
return result[-1]


class LocTransform(RVTransform):
name = "loc"

Expand Down
2 changes: 1 addition & 1 deletion tests/distributions/test_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def check_jacobian_det(
)

for yval in domain.vals:
close_to(actual_ljd(yval), computed_ljd(yval), tol)
np.testing.assert_allclose(actual_ljd(yval), computed_ljd(yval), rtol=tol)


def test_simplex():
Expand Down
89 changes: 84 additions & 5 deletions tests/logprob/test_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,16 +49,22 @@

from pymc.distributions.transforms import _default_transform, log, logodds
from pymc.logprob.abstract import MeasurableVariable, _get_measurable_outputs, _logprob
from pymc.logprob.basic import factorized_joint_logprob
from pymc.logprob.basic import factorized_joint_logprob, logp
from pymc.logprob.transforms import (
ChainedTransform,
CoshTransform,
ErfcTransform,
ErfcxTransform,
ErfTransform,
ExpTransform,
IntervalTransform,
LocTransform,
LogOddsTransform,
LogTransform,
RVTransform,
ScaleTransform,
SinhTransform,
TanhTransform,
TransformValuesMapping,
TransformValuesRewrite,
transformed_variable,
Expand Down Expand Up @@ -327,6 +333,7 @@ def test_fallback_log_jac_det(ndim):

class SquareTransform(RVTransform):
name = "square"
ndim_supp = ndim

def forward(self, value, *inputs):
return pt.power(value, 2)
Expand All @@ -336,13 +343,31 @@ def backward(self, value, *inputs):

square_tr = SquareTransform()

value = pt.TensorType("float64", (None,) * ndim)("value")
value = pt.vector("value")
value_tr = square_tr.forward(value)
log_jac_det = square_tr.log_jac_det(value_tr)

test_value = np.full((2,) * ndim, 3)
expected_log_jac_det = -np.log(6) * test_value.size
assert np.isclose(log_jac_det.eval({value: test_value}), expected_log_jac_det)
test_value = np.r_[3, 4]
expected_log_jac_det = -np.log(2 * test_value)
if ndim == 1:
expected_log_jac_det = expected_log_jac_det.sum()
np.testing.assert_array_equal(log_jac_det.eval({value: test_value}), expected_log_jac_det)


@pytest.mark.parametrize("ndim", (None, 2))
def test_fallback_log_jac_det_undefined_ndim(ndim):
class SquareTransform(RVTransform):
name = "square"
ndim_supp = ndim

def forward(self, value, *inputs):
return pt.power(value, 2)

def backward(self, value, *inputs):
return pt.sqrt(value)

with pytest.raises(NotImplementedError, match=r"only implemented for ndim_supp in \(0, 1\)"):
SquareTransform().log_jac_det(0)


def test_hierarchical_uniform_transform():
Expand Down Expand Up @@ -989,3 +1014,57 @@ def test_multivariate_transform(shift, scale):
scale_mat @ cov @ scale_mat.T,
),
)


@pytest.mark.parametrize(
"pt_transform, transform",
[
(pt.erf, ErfTransform()),
(pt.erfc, ErfcTransform()),
(pt.erfcx, ErfcxTransform()),
(pt.sinh, SinhTransform()),
(pt.cosh, CoshTransform()),
(pt.tanh, TanhTransform()),
],
)
def test_erf_logp(pt_transform, transform):
base_rv = pt.random.normal(
0.5, 1, name="base_rv"
) # Something not centered around 0 is usually better
rv = pt_transform(base_rv)

vv = rv.clone()
rv_logp = logp(rv, vv)

expected_logp = logp(base_rv, transform.backward(vv)) + transform.log_jac_det(vv)

vv_test = np.array(0.25) # Arbitrary test value
np.testing.assert_almost_equal(
rv_logp.eval({vv: vv_test}), np.nan_to_num(expected_logp.eval({vv: vv_test}), nan=-np.inf)
)


from pymc.testing import Rplusbig, Vector
from tests.distributions.test_transform import check_jacobian_det


@pytest.mark.parametrize(
"transform",
[
ErfTransform(),
ErfcTransform(),
ErfcxTransform(),
SinhTransform(),
CoshTransform(),
TanhTransform(),
],
)
def test_check_jac_det(transform):
check_jacobian_det(
transform,
Vector(Rplusbig, 2),
pt.dvector,
[0.1, 0.1],
elemwise=True,
rv_var=pt.random.normal(0.5, 1, name="base_rv"),
)