diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 268072dd021..4f9f740bf59 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,8 +26,8 @@ repos: - types-filelock - types-setuptools - arviz - - aesara==2.7.9 - - aeppl==0.0.34 + - aesara==2.8.2 + - aeppl==0.0.35 always_run: true require_serial: true pass_filenames: false diff --git a/conda-envs/environment-dev.yml b/conda-envs/environment-dev.yml index 4a2a5161ae8..012a9b02d9e 100644 --- a/conda-envs/environment-dev.yml +++ b/conda-envs/environment-dev.yml @@ -5,8 +5,8 @@ channels: - defaults dependencies: # Base dependencies -- aeppl=0.0.34 -- aesara=2.7.9 +- aeppl=0.0.35 +- aesara=2.8.2 - arviz>=0.12.0 - blas - cachetools>=4.2.1 diff --git a/conda-envs/environment-test.yml b/conda-envs/environment-test.yml index e4fba70d9a6..b6ca7d53c9f 100644 --- a/conda-envs/environment-test.yml +++ b/conda-envs/environment-test.yml @@ -5,8 +5,8 @@ channels: - defaults dependencies: # Base dependencies -- aeppl=0.0.34 -- aesara=2.7.9 +- aeppl=0.0.35 +- aesara=2.8.2 - arviz>=0.12.0 - blas - cachetools>=4.2.1 diff --git a/conda-envs/windows-environment-dev.yml b/conda-envs/windows-environment-dev.yml index f46e6e83e98..5b4bc84e03b 100644 --- a/conda-envs/windows-environment-dev.yml +++ b/conda-envs/windows-environment-dev.yml @@ -5,8 +5,8 @@ channels: - defaults dependencies: # Base dependencies (see install guide for Windows) -- aeppl=0.0.34 -- aesara=2.7.9 +- aeppl=0.0.35 +- aesara=2.8.2 - arviz>=0.12.0 - blas - cachetools>=4.2.1 diff --git a/conda-envs/windows-environment-test.yml b/conda-envs/windows-environment-test.yml index 1f3432ffd07..6e9ec1437d6 100644 --- a/conda-envs/windows-environment-test.yml +++ b/conda-envs/windows-environment-test.yml @@ -5,8 +5,8 @@ channels: - defaults dependencies: # Base dependencies (see install guide for Windows) -- aeppl=0.0.34 -- aesara=2.7.9 +- aeppl=0.0.35 +- aesara=2.8.2 - arviz>=0.12.0 - blas - cachetools>=4.2.1 diff --git a/pymc/aesaraf.py b/pymc/aesaraf.py index 7deed9fe837..711031e2cae 100644 --- a/pymc/aesaraf.py +++ b/pymc/aesaraf.py @@ -37,7 +37,7 @@ from aesara import config, scalar from aesara.compile.mode import Mode, get_mode from aesara.gradient import grad -from aesara.graph import local_optimizer +from aesara.graph import node_rewriter from aesara.graph.basic import ( Apply, Constant, @@ -875,7 +875,7 @@ def largest_common_dtype(tensors): return np.stack([np.ones((), dtype=dtype) for dtype in dtypes]).dtype -@local_optimizer(tracks=[CheckParameterValue]) +@node_rewriter(tracks=[CheckParameterValue]) def local_remove_check_parameter(fgraph, node): """Rewrite that removes Aeppl's CheckParameterValue @@ -885,7 +885,7 @@ def local_remove_check_parameter(fgraph, node): return [node.inputs[0]] -@local_optimizer(tracks=[CheckParameterValue]) +@node_rewriter(tracks=[CheckParameterValue]) def local_check_parameter_to_ninf_switch(fgraph, node): if isinstance(node.op, CheckParameterValue): logp_expr, *logp_conds = node.inputs diff --git a/pymc/distributions/continuous.py b/pymc/distributions/continuous.py index 0dce8607865..8f95c1f287d 100644 --- a/pymc/distributions/continuous.py +++ b/pymc/distributions/continuous.py @@ -35,7 +35,6 @@ from aesara.tensor.math import tanh from aesara.tensor.random.basic import ( BetaRV, - WeibullRV, cauchy, chisquare, exponential, @@ -1464,7 +1463,7 @@ def dist(cls, lam, *args, **kwargs): lam = at.as_tensor_variable(floatX(lam)) # Aesara exponential op is parametrized in terms of mu (1/lam) - return super().dist([at.inv(lam)], **kwargs) + return super().dist([at.reciprocal(lam)], **kwargs) def moment(rv, size, mu): if not rv_size_is_none(size): @@ -1487,7 +1486,7 @@ def logcdf(value, mu): ------- TensorVariable """ - lam = at.inv(mu) + lam = at.reciprocal(mu) res = at.switch( at.lt(value, 0), -np.inf, @@ -2313,7 +2312,7 @@ def logcdf(value, alpha, inv_beta): ------- TensorVariable """ - beta = at.inv(inv_beta) + beta = at.reciprocal(inv_beta) res = at.switch( at.lt(value, 0), -np.inf, @@ -2518,8 +2517,15 @@ def logcdf(value, nu): # TODO: Remove this once logp for multiplication is working! -class WeibullBetaRV(WeibullRV): +class WeibullBetaRV(RandomVariable): + name = "weibull" + ndim_supp = 0 ndims_params = [0, 0] + dtype = "floatX" + _print_name = ("Weibull", "\\operatorname{Weibull}") + + def __call__(self, alpha, beta, size=None, **kwargs): + return super().__call__(alpha, beta, size=size, **kwargs) @classmethod def rng_fn(cls, rng, alpha, beta, size) -> np.ndarray: @@ -2615,6 +2621,16 @@ def logcdf(value, alpha, beta): return check_parameters(res, 0 < alpha, 0 < beta, msg="alpha > 0, beta > 0") + def logp(value, alpha, beta): + res = ( + at.log(alpha) + - at.log(beta) + + (alpha - 1.0) * at.log(value / beta) + - at.pow(value / beta, alpha) + ) + res = at.switch(at.ge(value, 0.0), res, -np.inf) + return check_parameters(res, 0 < alpha, 0 < beta, msg="alpha > 0, beta > 0") + class HalfStudentTRV(RandomVariable): name = "halfstudentt" diff --git a/pymc/distributions/dist_math.py b/pymc/distributions/dist_math.py index 2d3a8f5dc11..5f91b5bb830 100644 --- a/pymc/distributions/dist_math.py +++ b/pymc/distributions/dist_math.py @@ -157,7 +157,7 @@ def sigma2rho(sigma): """ `sigma -> rho` Aesara converter :math:`mu + sigma*e = mu + log(1+exp(rho))*e`""" - return at.log(at.exp(at.abs_(sigma)) - 1.0) + return at.log(at.exp(at.abs(sigma)) - 1.0) def rho2sigma(rho): @@ -213,7 +213,7 @@ def log_normal(x, mean, **kwargs): else: std = tau ** (-1) std += f(eps) - return f(c) - at.log(at.abs_(std)) - (x - mean) ** 2 / (2.0 * std**2) + return f(c) - at.log(at.abs(std)) - (x - mean) ** 2 / (2.0 * std**2) def MvNormalLogp(): diff --git a/pymc/distributions/logprob.py b/pymc/distributions/logprob.py index 088b76e2ae7..588450560b2 100644 --- a/pymc/distributions/logprob.py +++ b/pymc/distributions/logprob.py @@ -24,7 +24,7 @@ from aeppl.abstract import assign_custom_measurable_outputs from aeppl.logprob import logcdf as logcdf_aeppl from aeppl.logprob import logprob as logp_aeppl -from aeppl.transforms import TransformValuesOpt +from aeppl.transforms import TransformValuesRewrite from aesara.graph.basic import graph_inputs, io_toposort from aesara.tensor.random.op import RandomVariable from aesara.tensor.subtensor import ( @@ -231,7 +231,7 @@ def joint_logp( if original_value_var is not None and hasattr(original_value_var.tag, "transform"): transform_map[value_var] = original_value_var.tag.transform - transform_opt = TransformValuesOpt(transform_map) + transform_opt = TransformValuesRewrite(transform_map) temp_logp_var_dict = factorized_joint_logprob( tmp_rvs_to_values, extra_rewrites=transform_opt, diff --git a/pymc/distributions/multivariate.py b/pymc/distributions/multivariate.py index 99568861bf1..4ad26a06235 100644 --- a/pymc/distributions/multivariate.py +++ b/pymc/distributions/multivariate.py @@ -539,7 +539,7 @@ def moment(rv, size, n, p): n = at.shape_padright(n) mode = at.round(n * p) diff = n - at.sum(mode, axis=-1, keepdims=True) - inc_bool_arr = at.abs_(diff) > 0 + inc_bool_arr = at.abs(diff) > 0 mode = at.inc_subtensor(mode[inc_bool_arr.nonzero()], diff[inc_bool_arr.nonzero()]) if not rv_size_is_none(size): output_size = at.concatenate([size, [p.shape[-1]]]) diff --git a/pymc/distributions/simulator.py b/pymc/distributions/simulator.py index 7f152b7d9a2..a1c8d5d2a33 100644 --- a/pymc/distributions/simulator.py +++ b/pymc/distributions/simulator.py @@ -272,7 +272,7 @@ def gaussian(epsilon, obs_data, sim_data): def laplace(epsilon, obs_data, sim_data): """Laplace kernel.""" - return -at.abs_((obs_data - sim_data) / epsilon) + return -at.abs((obs_data - sim_data) / epsilon) class KullbackLeibler: diff --git a/pymc/distributions/timeseries.py b/pymc/distributions/timeseries.py index 347ed90d0fe..31e85914258 100644 --- a/pymc/distributions/timeseries.py +++ b/pymc/distributions/timeseries.py @@ -23,7 +23,7 @@ from aeppl.logprob import _logprob from aesara import scan from aesara.compile.builders import OpFromGraph -from aesara.graph import FunctionGraph, optimize_graph +from aesara.graph import FunctionGraph, rewrite_graph from aesara.graph.basic import Node from aesara.raise_op import Assert from aesara.tensor import TensorVariable @@ -495,7 +495,7 @@ def _get_ar_order(cls, rhos: TensorVariable, ar_order: Optional[int], constant: features=[ShapeFeature()], clone=True, ) - (folded_shape,) = optimize_graph(shape_fg, custom_opt=topo_constant_folding).outputs + (folded_shape,) = rewrite_graph(shape_fg, custom_opt=topo_constant_folding).outputs folded_shape = getattr(folded_shape, "data", None) if folded_shape is None: raise ValueError( diff --git a/pymc/gp/cov.py b/pymc/gp/cov.py index 41919a0dfb4..2894e7cd9e2 100644 --- a/pymc/gp/cov.py +++ b/pymc/gp/cov.py @@ -347,7 +347,7 @@ def dist(self, X, Xs): Xs = at.transpose(X) else: Xs = at.transpose(Xs) - return at.abs_((X - Xs + self.c) % (self.c * 2) - self.c) + return at.abs((X - Xs + self.c) % (self.c * 2) - self.c) def weinland(self, t): return (1 + self.tau * t / self.c) * at.clip(1 - t / self.c, 0, np.inf) ** self.tau diff --git a/pymc/math.py b/pymc/math.py index de5e5a723ec..4acd7359c6d 100644 --- a/pymc/math.py +++ b/pymc/math.py @@ -30,7 +30,7 @@ # pylint: disable=unused-import from aesara.tensor import ( - abs_, + abs, and_, ceil, clip, @@ -90,7 +90,7 @@ # pylint: enable=unused-import __all__ = [ - "abs_", + "abs", "and_", "ceil", "clip", diff --git a/pymc/sampling_jax.py b/pymc/sampling_jax.py index b1e906b93f0..b368411fc85 100644 --- a/pymc/sampling_jax.py +++ b/pymc/sampling_jax.py @@ -93,7 +93,7 @@ def get_jaxified_graph( if not (hasattr(fgraph, "destroyers") and fgraph.has_destroyers([input])) ) ) - mode.JAX.optimizer.optimize(fgraph) + mode.JAX.optimizer.rewrite(fgraph) # We now jaxify the optimized fgraph return jax_funcify(fgraph) diff --git a/pymc/tests/test_transforms.py b/pymc/tests/test_transforms.py index d939d0acfb9..226495cebbc 100644 --- a/pymc/tests/test_transforms.py +++ b/pymc/tests/test_transforms.py @@ -100,7 +100,7 @@ def check_jacobian_det( if not elemwise: jac = at.log(at.nlinalg.det(jacobian(x, [y]))) else: - jac = at.log(at.abs_(at.diag(jacobian(x, [y])))) + jac = at.log(at.abs(at.diag(jacobian(x, [y])))) # ljd = log jacobian det actual_ljd = aesara.function([y], jac) diff --git a/requirements-dev.txt b/requirements-dev.txt index ec7de780a0a..7ca036b4bd5 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,8 +1,8 @@ # This file is auto-generated by scripts/generate_pip_deps_from_conda.py, do not modify. # See that file for comments about the need/usage of each dependency. -aeppl==0.0.34 -aesara==2.7.9 +aeppl==0.0.35 +aesara==2.8.2 arviz>=0.12.0 cachetools>=4.2.1 cloudpickle diff --git a/requirements.txt b/requirements.txt index ae6acdbe96d..9e01f1d5fc9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ -aeppl==0.0.34 -aesara==2.7.9 +aeppl==0.0.35 +aesara==2.8.2 arviz>=0.12.0 cachetools>=4.2.1 cloudpickle