From fa43a1a48de42c8d15204578102070677100ea09 Mon Sep 17 00:00:00 2001 From: Michael Osthege Date: Thu, 8 Apr 2021 15:47:25 +0200 Subject: [PATCH] Implement shape/dims/size API Dims with Ellipsis are not yet implemented. Some tests were refactored because size is now implemented more consistently. --- pymc3/distributions/distribution.py | 114 +++++++++++++++++++++-- pymc3/tests/test_data_container.py | 34 +++++-- pymc3/tests/test_distributions_random.py | 7 +- pymc3/tests/test_sampling.py | 30 +++--- pymc3/tests/test_shape_handling.py | 98 +++++++++++++++++++ pymc3/tests/test_transforms.py | 92 +++++++++--------- 6 files changed, 293 insertions(+), 82 deletions(-) diff --git a/pymc3/distributions/distribution.py b/pymc3/distributions/distribution.py index e3f58937180..7648cd20cbc 100644 --- a/pymc3/distributions/distribution.py +++ b/pymc3/distributions/distribution.py @@ -13,6 +13,7 @@ # limitations under the License. import contextvars import inspect +import logging import multiprocessing import sys import types @@ -20,7 +21,7 @@ from abc import ABCMeta from copy import copy -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Sequence, Union import dill @@ -35,6 +36,8 @@ import aesara.graph.basic import aesara.tensor as at +from pymc3.aesaraf import change_rv_size +from pymc3.exceptions import ShapeError from pymc3.util import UNSET, get_repr_for_variable from pymc3.vartypes import string_types @@ -46,6 +49,8 @@ "NoDistribution", ] +_log = logging.getLogger(__file__) + vectorized_ppc = contextvars.ContextVar( "vectorized_ppc", default=None ) # type: contextvars.ContextVar[Optional[Callable]] @@ -122,6 +127,19 @@ def logcdf(op, var, rvs_to_values, *dist_params, **kwargs): return new_cls +def _valid_ellipsis_position( + items: Union[ + None, + Sequence[Union[str, type(Ellipsis)]], + Sequence[Union[int, type(Ellipsis)]], + ] +): + if items is not None and Ellipsis in items: + if any(i == Ellipsis for i in items[:-1]): + return False + return True + + class Distribution(metaclass=DistributionMeta): """Statistical distribution""" @@ -141,26 +159,102 @@ def __new__(cls, name, *args, **kwargs): "for a standalone distribution." ) - rng = kwargs.pop("rng", None) + if not isinstance(name, string_types): + raise TypeError(f"Name needs to be a string but got: {name}") + # Pop out PyMC3-related kwargs so only the disttribution kwargs remain + rng = kwargs.pop("rng", None) if rng is None: rng = model.default_rng - if not isinstance(name, string_types): - raise TypeError(f"Name needs to be a string but got: {name}") - data = kwargs.pop("observed", None) - total_size = kwargs.pop("total_size", None) + testval = kwargs.pop("testval", None) + transform = kwargs.pop("transform", UNSET) + shape = kwargs.pop("shape", None) dims = kwargs.pop("dims", None) + size = kwargs.pop("size", None) + + # Raise on unsupported parametrization + if shape is not None and dims is not None: + raise ValueError("Passing both `shape` ({shape}) and `dims` ({dims}) is not supported!") + if dims is not None and size is not None: + raise ValueError("Passing both `dims` ({dims}) and `size` ({size}) is not supported!") + if shape is not None and size is not None: + raise ValueError("Passing both `shape` ({shape}) and `size` ({size}) is not supported!") + + # Warn about discouraged parametrization + if shape is not None and not isinstance(shape, (list, tuple)): + warnings.warn("The `shape` parameter should be a list or tuple.", UserWarning) + shape = (shape,) + if dims is not None and not isinstance(dims, (list, tuple)): + warnings.warn("The `dims` parameter should be a list or tuple.", UserWarning) + dims = (dims,) + if size is not None and not isinstance(size, (list, tuple)): + warnings.warn("The `size` parameter should be a list or tuple.", UserWarning) + size = (size,) + + if size is not None and Ellipsis in size: + raise ValueError("The `size` parameter cannot contain an Ellipsis. Actual: {size}") + if not _valid_ellipsis_position(shape): + raise ValueError( + f"Ellipsis in `shape` may only appear in the last position. Actual: {shape}" + ) + if not _valid_ellipsis_position(dims): + raise ValueError( + f"Ellipsis in `dims` may only appear in the last position. Actual: {dims}" + ) - if "shape" in kwargs: - raise DeprecationWarning("The `shape` keyword is deprecated; use `size`.") + # Create the RV without specifying size or testval. + # The size will be expanded later (if necessary) and only then the testval fits. + rv_native = cls.dist(*args, rng=rng, testval=None, size=None, **kwargs) + implied_ndim = rv_native.ndim + implied_dims = ... # TODO: infer dimension names from Variables! + + if shape is None and dims is None and size is None: + size = () + elif size is not None: + # User already specified how to expand the RV + pass + elif shape is not None: + # Infer size from shape + if Ellipsis in shape: + size = tuple(shape[:-1]) + else: + size = tuple(shape[: len(shape) - implied_ndim]) + if size: + warnings.warn( + f"The specified shape {shape} has {len(shape)} dimensions. " + f"This is more than the {implied_ndim} dimensions implied by the distribution parameters. " + f"To replicate the RV beyond its implied dimensionality use `size={size}` instead.", + UserWarning, + ) + elif dims is not None: + # Infer size from dims (and coords) + if Ellipsis in dims: + raise NotImplementedError("Ellipsis-based dims inference is not implemented.") + dims = tuple(dims[:-1]) + implied_dims + dimshape = tuple(len(model.coords[dname]) for dname in dims) + size = tuple(dimshape[: len(dimshape) - implied_ndim]) + else: + raise Exception("This should have been unreachable code.") - transform = kwargs.pop("transform", UNSET) + if size: + rv_out = change_rv_size(rv_var=rv_native, new_size=size, expand=True) + else: + rv_out = rv_native - rv_out = cls.dist(*args, rng=rng, **kwargs) + if rv_out.ndim != len(size) + implied_ndim: + raise ShapeError( + "Created RV has incorrect dimensionality.", + actual=rv_out.ndim, + expected=len(size) + implied_ndim, + ) + + if testval is not None: + # Assigning the testval earlier causes trouble because the RV may not be created with the final shape already. + rv_out.tag.test_value = testval return model.register_rv(rv_out, name, data, total_size, dims=dims, transform=transform) diff --git a/pymc3/tests/test_data_container.py b/pymc3/tests/test_data_container.py index 88a1432d488..3d357d46338 100644 --- a/pymc3/tests/test_data_container.py +++ b/pymc3/tests/test_data_container.py @@ -158,22 +158,42 @@ def test_shared_data_as_rv_input(self): """ with pm.Model() as m: x = pm.Data("x", [1.0, 2.0, 3.0]) - _ = pm.Normal("y", mu=x, size=3) - trace = pm.sample( - chains=1, return_inferencedata=False, compute_convergence_checks=False + assert x.eval().shape == (3,) + y = pm.Normal("y", mu=x, size=2) + assert y.eval().shape == (2, 3) + idata = pm.sample( + chains=1, + tune=500, + draws=550, + return_inferencedata=True, + compute_convergence_checks=False, ) + samples = idata.posterior["y"] + assert samples.shape == (1, 550, 2, 3) np.testing.assert_allclose(np.array([1.0, 2.0, 3.0]), x.get_value(), atol=1e-1) - np.testing.assert_allclose(np.array([1.0, 2.0, 3.0]), trace["y"].mean(0), atol=1e-1) + np.testing.assert_allclose( + np.array([1.0, 2.0, 3.0]), samples.mean(("chain", "draw", "y_dim_0")), atol=1e-1 + ) with m: pm.set_data({"x": np.array([2.0, 4.0, 6.0])}) - trace = pm.sample( - chains=1, return_inferencedata=False, compute_convergence_checks=False + assert x.eval().shape == (3,) + assert y.eval().shape == (2, 3) + idata = pm.sample( + chains=1, + tune=500, + draws=620, + return_inferencedata=True, + compute_convergence_checks=False, ) + samples = idata.posterior["y"] + assert samples.shape == (1, 620, 2, 3) np.testing.assert_allclose(np.array([2.0, 4.0, 6.0]), x.get_value(), atol=1e-1) - np.testing.assert_allclose(np.array([2.0, 4.0, 6.0]), trace["y"].mean(0), atol=1e-1) + np.testing.assert_allclose( + np.array([2.0, 4.0, 6.0]), samples.mean(("chain", "draw", "y_dim_0")), atol=1e-1 + ) def test_shared_scalar_as_rv_input(self): # See https://github.com/pymc-devs/pymc3/issues/3139 diff --git a/pymc3/tests/test_distributions_random.py b/pymc3/tests/test_distributions_random.py index 3d677460a01..602417eb6dc 100644 --- a/pymc3/tests/test_distributions_random.py +++ b/pymc3/tests/test_distributions_random.py @@ -174,12 +174,7 @@ def get_random_variable(self, shape, with_vector_params=False, name=None): # in the test case parametrization "None" means "no specified (default)" return self.distribution(name, transform=None, **params) else: - ndim_supp = self.distribution.rv_op.ndim_supp - if ndim_supp == 0: - size = shape - else: - size = shape[:-ndim_supp] - return self.distribution(name, size=size, transform=None, **params) + return self.distribution(name, shape=shape, transform=None, **params) except TypeError: if np.sum(np.atleast_1d(shape)) == 0: pytest.skip("Timeseries must have positive shape") diff --git a/pymc3/tests/test_sampling.py b/pymc3/tests/test_sampling.py index 86d89424f4d..01e3b104475 100644 --- a/pymc3/tests/test_sampling.py +++ b/pymc3/tests/test_sampling.py @@ -213,7 +213,7 @@ def test_return_inferencedata(self, monkeypatch): return_inferencedata=True, discard_tuned_samples=True, idata_kwargs={"prior": prior}, - random_seed=-1 + random_seed=-1, ) assert "prior" in result assert isinstance(result, InferenceData) @@ -380,11 +380,11 @@ def test_shared_named(self): "theta0", mu=np.atleast_2d(0), tau=np.atleast_2d(1e20), - size=(1, 1), + shape=(1, 1), testval=np.atleast_2d(0), ) theta = pm.Normal( - "theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1) + "theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), shape=(1, 1) ) res = theta.eval() assert np.isclose(res, 0.0) @@ -396,11 +396,11 @@ def test_shared_unnamed(self): "theta0", mu=np.atleast_2d(0), tau=np.atleast_2d(1e20), - size=(1, 1), + shape=(1, 1), testval=np.atleast_2d(0), ) theta = pm.Normal( - "theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1) + "theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), shape=(1, 1) ) res = theta.eval() assert np.isclose(res, 0.0) @@ -412,11 +412,11 @@ def test_constant_named(self): "theta0", mu=np.atleast_2d(0), tau=np.atleast_2d(1e20), - size=(1, 1), + shape=(1, 1), testval=np.atleast_2d(0), ) theta = pm.Normal( - "theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1) + "theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), shape=(1, 1) ) res = theta.eval() @@ -931,14 +931,14 @@ def test_ignores_observed(self): npt.assert_array_almost_equal(prior["positive_mu"], np.abs(prior["mu"]), decimal=4) def test_respects_shape(self): - for shape in (2, (2,), (10, 2), (10, 10)): + for shape in ((2,), (10, 2), (10, 10)): with pm.Model(): - mu = pm.Gamma("mu", 3, 1, size=1) - goals = pm.Poisson("goals", mu, size=shape) + mu = pm.Gamma("mu", 3, 1) + assert mu.eval().shape == () + goals = pm.Poisson("goals", mu, shape=shape) + assert goals.eval().shape == shape, f"Current shape setting: {shape}" trace1 = pm.sample_prior_predictive(10, var_names=["mu", "mu", "goals"]) trace2 = pm.sample_prior_predictive(10, var_names=["mu", "goals"]) - if shape == 2: # want to test shape as an int - shape = (2,) assert trace1["goals"].shape == (10,) + shape assert trace2["goals"].shape == (10,) + shape @@ -966,14 +966,14 @@ def test_multivariate2(self): def test_layers(self): with pm.Model() as model: - a = pm.Uniform("a", lower=0, upper=1, size=10) - b = pm.Binomial("b", n=1, p=a, size=10) + a = pm.Uniform("a", lower=0, upper=1, size=5) + b = pm.Binomial("b", n=1, p=a, size=7) model.default_rng.get_value(borrow=True).seed(232093) b_sampler = aesara.function([], b) avg = np.stack([b_sampler() for i in range(10000)]).mean(0) - npt.assert_array_almost_equal(avg, 0.5 * np.ones((10,)), decimal=2) + npt.assert_array_almost_equal(avg, 0.5 * np.ones((7, 5)), decimal=2) def test_transformed(self): n = 18 diff --git a/pymc3/tests/test_shape_handling.py b/pymc3/tests/test_shape_handling.py index 37c06193226..be4a741f1b1 100644 --- a/pymc3/tests/test_shape_handling.py +++ b/pymc3/tests/test_shape_handling.py @@ -11,7 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging +import aesara import numpy as np import pytest @@ -28,6 +30,8 @@ to_tuple, ) +_log = logging.getLogger(__file__) + test_shapes = [ (tuple(), (1,), (4,), (5, 4)), (tuple(), (1,), (7,), (5, 4)), @@ -219,3 +223,97 @@ def test_sample_generate_values(fixture_model, fixture_sizes): prior = pm.sample_prior_predictive(samples=fixture_sizes) for rv in RVs: assert prior[rv.name].shape == size + tuple(rv.distribution.shape) + + +class TestShapeDimsSize: + @pytest.mark.parametrize("param_shape", [(), (3,)]) + @pytest.mark.parametrize("batch_shape", [(), (3,)]) + @pytest.mark.parametrize( + "parametrization", + [ + "implicit", + "shape", + "shape...", + "dims", + # not implemented: "dims...", + "size", + ], + ) + def test_param_and_batch_shape_combos( + self, param_shape: tuple, batch_shape: tuple, parametrization: str + ): + coords = {} + param_dims = [] + batch_dims = [] + + # Create coordinates corresponding to the parameter shape + for d in param_shape: + dname = f"param_dim_{d}" + coords[dname] = [f"c_{i}" for i in range(d)] + param_dims.append(dname) + assert len(param_dims) == len(param_shape) + # Create coordinates corresponding to the batch shape + for d in batch_shape: + dname = f"batch_dim_{d}" + coords[dname] = [f"c_{i}" for i in range(d)] + batch_dims.append(dname) + assert len(batch_dims) == len(batch_shape) + + with pm.Model(coords=coords) as pmodel: + mu = aesara.shared(np.random.normal(size=param_shape)) + + if parametrization == "implicit": + rv = pm.Normal("rv", mu=mu) + assert rv.eval().shape == param_shape + else: + if parametrization == "shape": + rv = pm.Normal("rv", mu=mu, shape=batch_shape + param_shape) + assert rv.eval().shape == batch_shape + param_shape + elif parametrization == "shape...": + rv = pm.Normal("rv", mu=mu, shape=(*batch_shape, ...)) + assert rv.eval().shape == batch_shape + param_shape + elif parametrization == "dims": + rv = pm.Normal("rv", mu=mu, dims=batch_dims + param_dims) + assert rv.eval().shape == batch_shape + param_shape + elif parametrization == "dims...": + rv = pm.Normal("rv", mu=mu, dims=(*batch_dims, ...)) + assert rv.eval().shape == batch_shape + param_shape + elif parametrization == "size": + rv = pm.Normal("rv", mu=mu, size=batch_shape) + assert rv.eval().shape == batch_shape + param_shape + else: + raise NotImplementedError("Invalid test case parametrization.") + pass + + def test_discouraged_flavors(self): + with pm.Model(coords=dict(town=["Greifswald", "Madrid"])): + with pytest.warns(UserWarning, match="should be a list or tuple"): + assert pm.Normal("n1", shape=5).eval().shape == (5,) + with pytest.warns(UserWarning, match="should be a list or tuple"): + assert pm.Normal("n2", dims="town").eval().shape == (2,) + with pytest.warns(UserWarning, match="should be a list or tuple"): + assert pm.Normal("n3", size=7).eval().shape == (7,) + + # Replication beyond implied dimensionality should be done with `size`. + with pytest.warns(UserWarning, match="use `size="): + assert pm.Normal("n4", mu=2, shape=(9,)).eval().shape == (9,) + pass + + def test_invalid_flavors(self): + with pm.Model(coords=dict(town=["Greifswald", "Madrid"])): + # redundant parametrizations + with pytest.raises(ValueError, match="Passing both"): + pm.Normal("n", shape=(2,), dims=("town",)) + with pytest.raises(ValueError, match="Passing both"): + pm.Normal("n", dims=("town",), size=(2,)) + with pytest.raises(ValueError, match="Passing both"): + pm.Normal("n", shape=(3,), size=(3,)) + + # invalid ellipsis positions + mu = aesara.shared([1, 2, 3]) + with pytest.raises(ValueError, match="may only appear in the last position"): + pm.Normal("n", mu=mu, shape=(3, ..., 2)) + with pytest.raises(ValueError, match="may only appear in the last position"): + pm.Normal("n", mu=mu, dims=(..., "town")) + with pytest.raises(ValueError, match="cannot contain"): + pm.Normal("n", mu=mu, size=(3, ...)) diff --git a/pymc3/tests/test_transforms.py b/pymc3/tests/test_transforms.py index fd32d8b9b65..8839294631f 100644 --- a/pymc3/tests/test_transforms.py +++ b/pymc3/tests/test_transforms.py @@ -274,11 +274,11 @@ def test_chain_jacob_det(): class TestElementWiseLogp(SeededTest): - def build_model(self, distfam, params, size, transform, testval=None): + def build_model(self, distfam, params, *, size=None, shape=None, transform=None, testval=None): if testval is not None: testval = pm.floatX(testval) with pm.Model() as m: - distfam("x", size=size, transform=transform, testval=testval, **params) + distfam("x", size=size, shape=shape, transform=transform, testval=testval, **params) return m def check_transform_elementwise_logp(self, model): @@ -317,43 +317,45 @@ def check_vectortransform_elementwise_logp(self, model, vect_opt=0): close_to(a, b, np.abs(0.5 * (a + b) * tol)) @pytest.mark.parametrize( - "sd,size", + "sd,shape", [ (2.5, 2), (5.0, (2, 3)), (np.ones(3) * 10.0, (4, 3)), ], ) - def test_half_normal(self, sd, size): - model = self.build_model(pm.HalfNormal, {"sd": sd}, size=size, transform=tr.log) + def test_half_normal(self, sd, shape): + model = self.build_model(pm.HalfNormal, {"sd": sd}, shape=shape, transform=tr.log) self.check_transform_elementwise_logp(model) - @pytest.mark.parametrize("lam,size", [(2.5, 2), (5.0, (2, 3)), (np.ones(3), (4, 3))]) - def test_exponential(self, lam, size): - model = self.build_model(pm.Exponential, {"lam": lam}, size=size, transform=tr.log) + @pytest.mark.parametrize("lam,shape", [(2.5, 2), (5.0, (2, 3)), (np.ones(3), (4, 3))]) + def test_exponential(self, lam, shape): + model = self.build_model(pm.Exponential, {"lam": lam}, shape=shape, transform=tr.log) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize( - "a,b,size", + "a,b,shape", [ (1.0, 1.0, 2), (0.5, 0.5, (2, 3)), (np.ones(3), np.ones(3), (4, 3)), ], ) - def test_beta(self, a, b, size): - model = self.build_model(pm.Beta, {"alpha": a, "beta": b}, size=size, transform=tr.logodds) + def test_beta(self, a, b, shape): + model = self.build_model( + pm.Beta, {"alpha": a, "beta": b}, shape=shape, transform=tr.logodds + ) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize( - "lower,upper,size", + "lower,upper,shape", [ (0.0, 1.0, 2), (0.5, 5.5, (2, 3)), (pm.floatX(np.zeros(3)), pm.floatX(np.ones(3)), (4, 3)), ], ) - def test_uniform(self, lower, upper, size): + def test_uniform(self, lower, upper, shape): def transform_params(rv_var): _, _, _, lower, upper = rv_var.owner.inputs lower = at.as_tensor_variable(lower) if lower is not None else None @@ -362,25 +364,25 @@ def transform_params(rv_var): interval = tr.Interval(transform_params) model = self.build_model( - pm.Uniform, {"lower": lower, "upper": upper}, size=size, transform=interval + pm.Uniform, {"lower": lower, "upper": upper}, shape=shape, transform=interval ) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize( - "mu,kappa,size", [(0.0, 1.0, 2), (-0.5, 5.5, (2, 3)), (np.zeros(3), np.ones(3), (4, 3))] + "mu,kappa,shape", [(0.0, 1.0, 2), (-0.5, 5.5, (2, 3)), (np.zeros(3), np.ones(3), (4, 3))] ) @pytest.mark.xfail(reason="Distribution not refactored yet") - def test_vonmises(self, mu, kappa, size): + def test_vonmises(self, mu, kappa, shape): model = self.build_model( - pm.VonMises, {"mu": mu, "kappa": kappa}, size=size, transform=tr.circular + pm.VonMises, {"mu": mu, "kappa": kappa}, shape=shape, transform=tr.circular ) self.check_transform_elementwise_logp(model) @pytest.mark.parametrize( - "a,size", [(np.ones(2), None), (np.ones((2, 3)) * 0.5, None), (np.ones(3), (4,))] + "a,shape", [(np.ones(2), None), (np.ones((2, 3)) * 0.5, None), (np.ones(3), (4,))] ) - def test_dirichlet(self, a, size): - model = self.build_model(pm.Dirichlet, {"a": a}, size=size, transform=tr.stick_breaking) + def test_dirichlet(self, a, shape): + model = self.build_model(pm.Dirichlet, {"a": a}, shape=shape, transform=tr.stick_breaking) self.check_vectortransform_elementwise_logp(model, vect_opt=1) def test_normal_ordered(self): @@ -394,59 +396,59 @@ def test_normal_ordered(self): self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize( - "sd,size", + "sd,shape", [ (2.5, (2,)), (np.ones(3), (4, 3)), ], ) @pytest.mark.xfail(condition=(aesara.config.floatX == "float32"), reason="Fails on float32") - def test_half_normal_ordered(self, sd, size): - testval = np.sort(np.abs(np.random.randn(*size))) + def test_half_normal_ordered(self, sd, shape): + testval = np.sort(np.abs(np.random.randn(*shape))) model = self.build_model( pm.HalfNormal, {"sd": sd}, - size=size, + shape=shape, testval=testval, transform=tr.Chain([tr.log, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) - @pytest.mark.parametrize("lam,size", [(2.5, (2,)), (np.ones(3), (4, 3))]) - def test_exponential_ordered(self, lam, size): - testval = np.sort(np.abs(np.random.randn(*size))) + @pytest.mark.parametrize("lam,shape", [(2.5, (2,)), (np.ones(3), (4, 3))]) + def test_exponential_ordered(self, lam, shape): + testval = np.sort(np.abs(np.random.randn(*shape))) model = self.build_model( pm.Exponential, {"lam": lam}, - size=size, + shape=shape, testval=testval, transform=tr.Chain([tr.log, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize( - "a,b,size", + "a,b,shape", [ (1.0, 1.0, (2,)), (np.ones(3), np.ones(3), (4, 3)), ], ) - def test_beta_ordered(self, a, b, size): - testval = np.sort(np.abs(np.random.rand(*size))) + def test_beta_ordered(self, a, b, shape): + testval = np.sort(np.abs(np.random.rand(*shape))) model = self.build_model( pm.Beta, {"alpha": a, "beta": b}, - size=size, + shape=shape, testval=testval, transform=tr.Chain([tr.logodds, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize( - "lower,upper,size", + "lower,upper,shape", [(0.0, 1.0, (2,)), (pm.floatX(np.zeros(3)), pm.floatX(np.ones(3)), (4, 3))], ) - def test_uniform_ordered(self, lower, upper, size): + def test_uniform_ordered(self, lower, upper, shape): def transform_params(rv_var): _, _, _, lower, upper = rv_var.owner.inputs lower = at.as_tensor_variable(lower) if lower is not None else None @@ -455,43 +457,45 @@ def transform_params(rv_var): interval = tr.Interval(transform_params) - testval = np.sort(np.abs(np.random.rand(*size))) + testval = np.sort(np.abs(np.random.rand(*shape))) model = self.build_model( pm.Uniform, {"lower": lower, "upper": upper}, - size=size, + shape=shape, testval=testval, transform=tr.Chain([interval, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=1) - @pytest.mark.parametrize("mu,kappa,size", [(0.0, 1.0, (2,)), (np.zeros(3), np.ones(3), (4, 3))]) + @pytest.mark.parametrize( + "mu,kappa,shape", [(0.0, 1.0, (2,)), (np.zeros(3), np.ones(3), (4, 3))] + ) @pytest.mark.xfail(reason="Distribution not refactored yet") - def test_vonmises_ordered(self, mu, kappa, size): - testval = np.sort(np.abs(np.random.rand(*size))) + def test_vonmises_ordered(self, mu, kappa, shape): + testval = np.sort(np.abs(np.random.rand(*shape))) model = self.build_model( pm.VonMises, {"mu": mu, "kappa": kappa}, - size=size, + shape=shape, testval=testval, transform=tr.Chain([tr.circular, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize( - "lower,upper,size,transform", + "lower,upper,shape,transform", [ (0.0, 1.0, (2,), tr.stick_breaking), (0.5, 5.5, (2, 3), tr.stick_breaking), (np.zeros(3), np.ones(3), (4, 3), tr.Chain([tr.sum_to_1, tr.logodds])), ], ) - def test_uniform_other(self, lower, upper, size, transform): - testval = np.ones(size) / size[-1] + def test_uniform_other(self, lower, upper, shape, transform): + testval = np.ones(shape) / shape[-1] model = self.build_model( pm.Uniform, {"lower": lower, "upper": upper}, - size=size, + shape=shape, testval=testval, transform=transform, )