diff --git a/pymc3/distributions/bart.py b/pymc3/distributions/bart.py index 49148445558..69c89fea1fa 100644 --- a/pymc3/distributions/bart.py +++ b/pymc3/distributions/bart.py @@ -27,7 +27,7 @@ def __init__(self, X, Y, m=200, alpha=0.25, split_prior=None, *args, **kwargs): self.X, self.Y, self.missing_data = self.preprocess_XY(X, Y) - super().__init__(shape=X.shape[0], dtype="float64", testval=0, *args, **kwargs) + super().__init__(shape=X.shape[0], dtype="float64", initval=0, *args, **kwargs) if self.X.ndim != 2: raise ValueError("The design matrix X must have two dimensions") diff --git a/pymc3/distributions/bound.py b/pymc3/distributions/bound.py index bc0e168f382..bbb19d5065f 100644 --- a/pymc3/distributions/bound.py +++ b/pymc3/distributions/bound.py @@ -42,7 +42,7 @@ def __init__(self, distribution, lower, upper, default, *args, **kwargs): super().__init__( shape=self._wrapped.shape, dtype=self._wrapped.dtype, - testval=self._wrapped.testval, + initval=self._wrapped.initval, defaults=defaults, transform=self._wrapped.transform, ) @@ -252,15 +252,15 @@ class Bound: with pm.Model(): NegativeNormal = pm.Bound(pm.Normal, upper=0.0) - par1 = NegativeNormal('par`', mu=0.0, sigma=1.0, testval=-0.5) + par1 = NegativeNormal('par`', mu=0.0, sigma=1.0, initval=-0.5) # you can use the Bound object multiple times to # create multiple bounded random variables - par1_1 = NegativeNormal('par1_1', mu=-1.0, sigma=1.0, testval=-1.5) + par1_1 = NegativeNormal('par1_1', mu=-1.0, sigma=1.0, initval=-1.5) # you can also define a Bound implicitly, while applying # it to a random variable par2 = pm.Bound(pm.Normal, lower=-1.0, upper=1.0)( - 'par2', mu=0.0, sigma=1.0, testval=1.0) + 'par2', mu=0.0, sigma=1.0, initval=1.0) """ def __init__(self, distribution, lower=None, upper=None): diff --git a/pymc3/distributions/distribution.py b/pymc3/distributions/distribution.py index 781fe19699f..1c6867696b3 100644 --- a/pymc3/distributions/distribution.py +++ b/pymc3/distributions/distribution.py @@ -265,14 +265,14 @@ def __init__( self, shape, dtype, - testval=None, + initval=None, defaults=(), parent_dist=None, *args, **kwargs, ): super().__init__( - shape=shape, dtype=dtype, testval=testval, defaults=defaults, *args, **kwargs + shape=shape, dtype=dtype, initval=initval, defaults=defaults, *args, **kwargs ) self.parent_dist = parent_dist @@ -330,7 +330,7 @@ def __init__( logp, shape=(), dtype=None, - testval=0, + initval=0, random=None, wrap_random_with_dist_shape=True, check_shape_in_random=True, @@ -351,8 +351,8 @@ def __init__( a value here. dtype: None, str (Optional) The dtype of the distribution. - testval: number or array (Optional) - The ``testval`` of the RV's tensor that follow the ``DensityDist`` + initval: number or array (Optional) + The ``initval`` of the RV's tensor that follow the ``DensityDist`` distribution. args, kwargs: (Optional) These are passed to the parent class' ``__init__``. @@ -388,7 +388,7 @@ def __init__( """ if dtype is None: dtype = aesara.config.floatX - super().__init__(shape, dtype, testval, *args, **kwargs) + super().__init__(shape, dtype, initval, *args, **kwargs) self.logp = logp if type(self.logp) == types.MethodType: if PLATFORM != "linux": diff --git a/pymc3/distributions/mixture.py b/pymc3/distributions/mixture.py index 3d82436f7d7..a462f81e2db 100644 --- a/pymc3/distributions/mixture.py +++ b/pymc3/distributions/mixture.py @@ -609,7 +609,7 @@ class NormalMixture(Mixture): 10, shape=n_components, transform=pm.transforms.ordered, - testval=[1, 2, 3], + initval=[1, 2, 3], ) σ = pm.HalfNormal("σ", 10, shape=n_components) weights = pm.Dirichlet("w", np.ones(n_components)) @@ -684,7 +684,7 @@ def __init__(self, w, comp_dists, mixture_axis=-1, *args, **kwargs): self.mixture_axis = mixture_axis kwargs.setdefault("dtype", self.comp_dists.dtype) - # Compute the mode so we don't always have to pass a testval + # Compute the mode so we don't always have to pass a initval defaults = kwargs.pop("defaults", []) event_shape = self.comp_dists.shape[mixture_axis + 1 :] _w = at.shape_padleft( diff --git a/pymc3/distributions/multivariate.py b/pymc3/distributions/multivariate.py index 4eb6b01817a..7cea4a90e2d 100644 --- a/pymc3/distributions/multivariate.py +++ b/pymc3/distributions/multivariate.py @@ -840,7 +840,7 @@ def logp(self, X): ) -def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False, testval=None): +def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False, initval=None): R""" Bartlett decomposition of the Wishart distribution. As the Wishart distribution requires the matrix to be symmetric positive semi-definite @@ -875,7 +875,7 @@ def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False, testv Input matrix S is already Cholesky decomposed as S.T * S return_cholesky: bool (default=False) Only return the Cholesky decomposed matrix. - testval: ndarray + initval: ndarray p x p positive definite matrix used to initialize Notes @@ -894,21 +894,21 @@ def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False, testv n_diag = len(diag_idx[0]) n_tril = len(tril_idx[0]) - if testval is not None: + if initval is not None: # Inverse transform - testval = np.dot(np.dot(np.linalg.inv(L), testval), np.linalg.inv(L.T)) - testval = linalg.cholesky(testval, lower=True) - diag_testval = testval[diag_idx] ** 2 - tril_testval = testval[tril_idx] + initval = np.dot(np.dot(np.linalg.inv(L), initval), np.linalg.inv(L.T)) + initval = linalg.cholesky(initval, lower=True) + diag_testval = initval[diag_idx] ** 2 + tril_testval = initval[tril_idx] else: diag_testval = None tril_testval = None c = at.sqrt( - ChiSquared("%s_c" % name, nu - np.arange(2, 2 + n_diag), shape=n_diag, testval=diag_testval) + ChiSquared("%s_c" % name, nu - np.arange(2, 2 + n_diag), shape=n_diag, initval=diag_testval) ) pm._log.info("Added new variable %s_c to model diagonal of Wishart." % name) - z = Normal("%s_z" % name, 0.0, 1.0, shape=n_tril, testval=tril_testval) + z = Normal("%s_z" % name, 0.0, 1.0, shape=n_tril, initval=tril_testval) pm._log.info("Added new variable %s_z to model off-diagonals of Wishart." % name) # Construct A matrix A = at.zeros(S.shape, dtype=np.float32) diff --git a/pymc3/tests/models.py b/pymc3/tests/models.py index 0289386e548..78324e72c7c 100644 --- a/pymc3/tests/models.py +++ b/pymc3/tests/models.py @@ -30,7 +30,7 @@ def simple_model(): mu = -2.1 tau = 1.3 with Model() as model: - Normal("x", mu, tau=tau, size=2, testval=floatX_array([0.1, 0.1])) + Normal("x", mu, tau=tau, size=2, initval=floatX_array([0.1, 0.1])) return model.initial_point, model, (mu, tau ** -0.5) @@ -39,7 +39,7 @@ def simple_categorical(): p = floatX_array([0.1, 0.2, 0.3, 0.4]) v = floatX_array([0.0, 1.0, 2.0, 3.0]) with Model() as model: - Categorical("x", p, size=3, testval=[1, 2, 3]) + Categorical("x", p, size=3, initval=[1, 2, 3]) mu = np.dot(p, v) var = np.dot(p, (v - mu) ** 2) @@ -50,7 +50,7 @@ def multidimensional_model(): mu = -2.1 tau = 1.3 with Model() as model: - Normal("x", mu, tau=tau, size=(3, 2), testval=0.1 * np.ones((3, 2))) + Normal("x", mu, tau=tau, size=(3, 2), initval=0.1 * np.ones((3, 2))) return model.initial_point, model, (mu, tau ** -0.5) @@ -81,7 +81,7 @@ def simple_2model(): tau = 1.3 p = 0.4 with Model() as model: - x = pm.Normal("x", mu, tau=tau, testval=0.1) + x = pm.Normal("x", mu, tau=tau, initval=0.1) pm.Deterministic("logx", at.log(x)) pm.Bernoulli("y", p) return model.initial_point, model @@ -91,7 +91,7 @@ def simple_2model_continuous(): mu = -2.1 tau = 1.3 with Model() as model: - x = pm.Normal("x", mu, tau=tau, testval=0.1) + x = pm.Normal("x", mu, tau=tau, initval=0.1) pm.Deterministic("logx", at.log(x)) pm.Beta("y", alpha=1, beta=1, size=2) return model.initial_point, model @@ -106,7 +106,7 @@ def mv_simple(): "x", at.constant(mu), tau=at.constant(tau), - testval=floatX_array([0.1, 1.0, 0.8]), + initval=floatX_array([0.1, 1.0, 0.8]), ) H = tau C = np.linalg.inv(H) @@ -122,7 +122,7 @@ def mv_simple_coarse(): "x", at.constant(mu), tau=at.constant(tau), - testval=floatX_array([0.1, 1.0, 0.8]), + initval=floatX_array([0.1, 1.0, 0.8]), ) H = tau C = np.linalg.inv(H) @@ -138,7 +138,7 @@ def mv_simple_very_coarse(): "x", at.constant(mu), tau=at.constant(tau), - testval=floatX_array([0.1, 1.0, 0.8]), + initval=floatX_array([0.1, 1.0, 0.8]), ) H = tau C = np.linalg.inv(H) @@ -150,7 +150,7 @@ def mv_simple_discrete(): n = 5 p = floatX_array([0.15, 0.85]) with pm.Model() as model: - pm.Multinomial("x", n, at.constant(p), testval=np.array([1, 4])) + pm.Multinomial("x", n, at.constant(p), initval=np.array([1, 4])) mu = n * p # covariance matrix C = np.zeros((d, d)) diff --git a/pymc3/tests/test_distributions_timeseries.py b/pymc3/tests/test_distributions_timeseries.py index 4f55d902144..961644e6d46 100644 --- a/pymc3/tests/test_distributions_timeseries.py +++ b/pymc3/tests/test_distributions_timeseries.py @@ -68,13 +68,13 @@ def test_AR_nd(): beta_tp = np.random.randn(p, n) y_tp = np.random.randn(T, n) with Model() as t0: - beta = Normal("beta", 0.0, 1.0, shape=(p, n), testval=beta_tp) - AR("y", beta, sigma=1.0, shape=(T, n), testval=y_tp) + beta = Normal("beta", 0.0, 1.0, shape=(p, n), initval=beta_tp) + AR("y", beta, sigma=1.0, shape=(T, n), initval=y_tp) with Model() as t1: - beta = Normal("beta", 0.0, 1.0, shape=(p, n), testval=beta_tp) + beta = Normal("beta", 0.0, 1.0, shape=(p, n), initval=beta_tp) for i in range(n): - AR("y_%d" % i, beta[:, i], sigma=1.0, shape=T, testval=y_tp[:, i]) + AR("y_%d" % i, beta[:, i], sigma=1.0, shape=T, initval=y_tp[:, i]) np.testing.assert_allclose(t0.logp(t0.initial_point), t1.logp(t1.initial_point)) @@ -150,7 +150,7 @@ def test_linear(): # build model with Model() as model: lamh = Flat("lamh") - xh = EulerMaruyama("xh", dt, sde, (lamh,), shape=N + 1, testval=x) + xh = EulerMaruyama("xh", dt, sde, (lamh,), shape=N + 1, initval=x) Normal("zh", mu=xh, sigma=sig2, observed=z) # invert with model: diff --git a/pymc3/tests/test_model.py b/pymc3/tests/test_model.py index e13c9cf4ac9..2ec08c7a72f 100644 --- a/pymc3/tests/test_model.py +++ b/pymc3/tests/test_model.py @@ -57,7 +57,7 @@ def __init__(self, mean=0, sigma=1, name="", model=None): super().__init__(name, model) self.register_rv(Normal.dist(mu=mean, sigma=sigma), "v1") Normal("v2", mu=mean, sigma=sigma) - Normal("v3", mu=mean, sigma=Normal("sd", mu=10, sigma=1, testval=1.0)) + Normal("v3", mu=mean, sigma=Normal("sd", mu=10, sigma=1, initval=1.0)) Deterministic("v3_sq", self.v3 ** 2) Potential("p1", at.constant(1)) @@ -462,7 +462,7 @@ def test_make_obs_var(): fake_model = pm.Model() with fake_model: fake_distribution = pm.Normal.dist(mu=0, sigma=1) - # Create the testval attribute simply for the sake of model testing + # Create the initval attribute simply for the sake of model testing fake_distribution.name = input_name # Check function behavior using the various inputs diff --git a/pymc3/tests/test_sampling.py b/pymc3/tests/test_sampling.py index 8756138c15d..9b0a39602a8 100644 --- a/pymc3/tests/test_sampling.py +++ b/pymc3/tests/test_sampling.py @@ -387,7 +387,7 @@ def test_shared_named(self): mu=np.atleast_2d(0), tau=np.atleast_2d(1e20), size=(1, 1), - testval=np.atleast_2d(0), + initval=np.atleast_2d(0), ) theta = pm.Normal( "theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1) @@ -403,7 +403,7 @@ def test_shared_unnamed(self): mu=np.atleast_2d(0), tau=np.atleast_2d(1e20), size=(1, 1), - testval=np.atleast_2d(0), + initval=np.atleast_2d(0), ) theta = pm.Normal( "theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1) @@ -419,7 +419,7 @@ def test_constant_named(self): mu=np.atleast_2d(0), tau=np.atleast_2d(1e20), size=(1, 1), - testval=np.atleast_2d(0), + initval=np.atleast_2d(0), ) theta = pm.Normal( "theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1) @@ -688,10 +688,10 @@ def test_deterministic_of_observed_modified_interface(self): meas_in_1 = pm.aesaraf.floatX(2 + 4 * rng.randn(100)) meas_in_2 = pm.aesaraf.floatX(5 + 4 * rng.randn(100)) with pm.Model(rng_seeder=rng) as model: - mu_in_1 = pm.Normal("mu_in_1", 0, 1, testval=0) - sigma_in_1 = pm.HalfNormal("sd_in_1", 1, testval=1) - mu_in_2 = pm.Normal("mu_in_2", 0, 1, testval=0) - sigma_in_2 = pm.HalfNormal("sd__in_2", 1, testval=1) + mu_in_1 = pm.Normal("mu_in_1", 0, 1, initval=0) + sigma_in_1 = pm.HalfNormal("sd_in_1", 1, initval=1) + mu_in_2 = pm.Normal("mu_in_2", 0, 1, initval=0) + sigma_in_2 = pm.HalfNormal("sd__in_2", 1, initval=1) in_1 = pm.Normal("in_1", mu_in_1, sigma_in_1, observed=meas_in_1) in_2 = pm.Normal("in_2", mu_in_2, sigma_in_2, observed=meas_in_2) @@ -882,7 +882,7 @@ def _mocked_init_nuts(*args, **kwargs): @pytest.mark.parametrize( - "testval, jitter_max_retries, expectation", + "initval, jitter_max_retries, expectation", [ (0, 0, pytest.raises(SamplingError)), (0, 1, pytest.raises(SamplingError)), @@ -891,9 +891,9 @@ def _mocked_init_nuts(*args, **kwargs): (1, 0, does_not_raise()), ], ) -def test_init_jitter(testval, jitter_max_retries, expectation): +def test_init_jitter(initval, jitter_max_retries, expectation): with pm.Model() as m: - pm.HalfNormal("x", transform=None, testval=testval) + pm.HalfNormal("x", transform=None, initval=initval) with expectation: # Starting value is negative (invalid) when np.random.rand returns 0 (jitter = -1) diff --git a/pymc3/tests/test_step.py b/pymc3/tests/test_step.py index 7f4796755cc..1daf0e1c574 100644 --- a/pymc3/tests/test_step.py +++ b/pymc3/tests/test_step.py @@ -964,7 +964,7 @@ def test_multiple_samplers(self, caplog): def test_bad_init_nonparallel(self): with Model(): - HalfNormal("a", sigma=1, testval=-1, transform=None) + HalfNormal("a", sigma=1, initval=-1, transform=None) with pytest.raises(SamplingError) as error: sample(init=None, chains=1, random_seed=1) error.match("Initial evaluation") @@ -972,17 +972,17 @@ def test_bad_init_nonparallel(self): @pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher") def test_bad_init_parallel(self): with Model(): - HalfNormal("a", sigma=1, testval=-1, transform=None) + HalfNormal("a", sigma=1, initval=-1, transform=None) with pytest.raises(SamplingError) as error: sample(init=None, cores=2, random_seed=1) error.match("Initial evaluation") def test_linalg(self, caplog): with Model(): - a = Normal("a", size=2, testval=floatX(np.zeros(2))) + a = Normal("a", size=2, initval=floatX(np.zeros(2))) a = at.switch(a > 0, np.inf, a) b = at.slinalg.solve(floatX(np.eye(2)), a) - Normal("c", mu=b, size=2, testval=floatX(np.r_[0.0, 0.0])) + Normal("c", mu=b, size=2, initval=floatX(np.r_[0.0, 0.0])) caplog.clear() trace = sample(20, init=None, tune=5, chains=2) warns = [msg.msg for msg in caplog.records] diff --git a/pymc3/tests/test_transforms.py b/pymc3/tests/test_transforms.py index 2c0265aabe0..280471a09e1 100644 --- a/pymc3/tests/test_transforms.py +++ b/pymc3/tests/test_transforms.py @@ -227,7 +227,7 @@ def test_interval_near_boundary(): x0 = np.nextafter(ub, lb) with pm.Model() as model: - pm.Uniform("x", testval=x0, lower=lb, upper=ub) + pm.Uniform("x", initval=x0, lower=lb, upper=ub) log_prob = model.point_logps() np.testing.assert_allclose(log_prob, np.array([-52.68])) @@ -274,11 +274,11 @@ def test_chain_jacob_det(): class TestElementWiseLogp(SeededTest): - def build_model(self, distfam, params, size, transform, testval=None): - if testval is not None: - testval = pm.floatX(testval) + def build_model(self, distfam, params, size, transform, initval=None): + if initval is not None: + initval = pm.floatX(initval) with pm.Model() as m: - distfam("x", size=size, transform=transform, testval=testval, **params) + distfam("x", size=size, transform=transform, initval=initval, **params) return m def check_transform_elementwise_logp(self, model): @@ -408,7 +408,7 @@ def test_normal_ordered(self): pm.Normal, {"mu": 0.0, "sd": 1.0}, size=3, - testval=np.asarray([-1.0, 1.0, 4.0]), + initval=np.asarray([-1.0, 1.0, 4.0]), transform=tr.ordered, ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @@ -422,24 +422,24 @@ def test_normal_ordered(self): ) @pytest.mark.xfail(condition=(aesara.config.floatX == "float32"), reason="Fails on float32") def test_half_normal_ordered(self, sd, size): - testval = np.sort(np.abs(np.random.randn(*size))) + initval = np.sort(np.abs(np.random.randn(*size))) model = self.build_model( pm.HalfNormal, {"sd": sd}, size=size, - testval=testval, + initval=initval, transform=tr.Chain([tr.log, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @pytest.mark.parametrize("lam,size", [(2.5, (2,)), (np.ones(3), (4, 3))]) def test_exponential_ordered(self, lam, size): - testval = np.sort(np.abs(np.random.randn(*size))) + initval = np.sort(np.abs(np.random.randn(*size))) model = self.build_model( pm.Exponential, {"lam": lam}, size=size, - testval=testval, + initval=initval, transform=tr.Chain([tr.log, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @@ -452,12 +452,12 @@ def test_exponential_ordered(self, lam, size): ], ) def test_beta_ordered(self, a, b, size): - testval = np.sort(np.abs(np.random.rand(*size))) + initval = np.sort(np.abs(np.random.rand(*size))) model = self.build_model( pm.Beta, {"alpha": a, "beta": b}, size=size, - testval=testval, + initval=initval, transform=tr.Chain([tr.logodds, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @@ -475,24 +475,24 @@ def transform_params(rv_var): interval = tr.Interval(transform_params) - testval = np.sort(np.abs(np.random.rand(*size))) + initval = np.sort(np.abs(np.random.rand(*size))) model = self.build_model( pm.Uniform, {"lower": lower, "upper": upper}, size=size, - testval=testval, + initval=initval, transform=tr.Chain([interval, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=1) @pytest.mark.parametrize("mu,kappa,size", [(0.0, 1.0, (2,)), (np.zeros(3), np.ones(3), (4, 3))]) def test_vonmises_ordered(self, mu, kappa, size): - testval = np.sort(np.abs(np.random.rand(*size))) + initval = np.sort(np.abs(np.random.rand(*size))) model = self.build_model( pm.VonMises, {"mu": mu, "kappa": kappa}, size=size, - testval=testval, + initval=initval, transform=tr.Chain([tr.circular, tr.ordered]), ) self.check_vectortransform_elementwise_logp(model, vect_opt=0) @@ -506,12 +506,12 @@ def test_vonmises_ordered(self, mu, kappa, size): ], ) def test_uniform_other(self, lower, upper, size, transform): - testval = np.ones(size) / size[-1] + initval = np.ones(size) / size[-1] model = self.build_model( pm.Uniform, {"lower": lower, "upper": upper}, size=size, - testval=testval, + initval=initval, transform=transform, ) self.check_vectortransform_elementwise_logp(model, vect_opt=1) @@ -524,9 +524,9 @@ def test_uniform_other(self, lower, upper, size, transform): ], ) def test_mvnormal_ordered(self, mu, cov, size, shape): - testval = np.sort(np.random.randn(*shape)) + initval = np.sort(np.random.randn(*shape)) model = self.build_model( - pm.MvNormal, {"mu": mu, "cov": cov}, size=size, testval=testval, transform=tr.ordered + pm.MvNormal, {"mu": mu, "cov": cov}, size=size, initval=initval, transform=tr.ordered ) self.check_vectortransform_elementwise_logp(model, vect_opt=1) diff --git a/pymc3/tests/test_types.py b/pymc3/tests/test_types.py index c38c04edf8c..7bfd260664c 100644 --- a/pymc3/tests/test_types.py +++ b/pymc3/tests/test_types.py @@ -37,7 +37,7 @@ def teardown_method(self): @aesara.config.change_flags({"floatX": "float64", "warn_float64": "ignore"}) def test_float64(self): with Model() as model: - x = Normal("x", testval=np.array(1.0, dtype="float64")) + x = Normal("x", initval=np.array(1.0, dtype="float64")) obs = Normal("obs", mu=x, sigma=1.0, observed=np.random.randn(5)) assert x.dtype == "float64" @@ -50,7 +50,7 @@ def test_float64(self): @aesara.config.change_flags({"floatX": "float32", "warn_float64": "warn"}) def test_float32(self): with Model() as model: - x = Normal("x", testval=np.array(1.0, dtype="float32")) + x = Normal("x", initval=np.array(1.0, dtype="float32")) obs = Normal("obs", mu=x, sigma=1.0, observed=np.random.randn(5).astype("float32")) assert x.dtype == "float32" @@ -65,11 +65,11 @@ def test_float64_MLDA(self): data = np.random.randn(5) with Model() as coarse_model: - x = Normal("x", testval=np.array(1.0, dtype="float64")) + x = Normal("x", initval=np.array(1.0, dtype="float64")) obs = Normal("obs", mu=x, sigma=1.0, observed=data + 0.5) with Model() as model: - x = Normal("x", testval=np.array(1.0, dtype="float64")) + x = Normal("x", initval=np.array(1.0, dtype="float64")) obs = Normal("obs", mu=x, sigma=1.0, observed=data) assert x.dtype == "float64" @@ -83,11 +83,11 @@ def test_float32_MLDA(self): data = np.random.randn(5).astype("float32") with Model() as coarse_model: - x = Normal("x", testval=np.array(1.0, dtype="float32")) + x = Normal("x", initval=np.array(1.0, dtype="float32")) obs = Normal("obs", mu=x, sigma=1.0, observed=data + 0.5) with Model() as model: - x = Normal("x", testval=np.array(1.0, dtype="float32")) + x = Normal("x", initval=np.array(1.0, dtype="float32")) obs = Normal("obs", mu=x, sigma=1.0, observed=data) assert x.dtype == "float32" diff --git a/pymc3/tests/test_variational_inference.py b/pymc3/tests/test_variational_inference.py index b083e578701..a4a470dfe08 100644 --- a/pymc3/tests/test_variational_inference.py +++ b/pymc3/tests/test_variational_inference.py @@ -655,7 +655,7 @@ def simple_model_data(use_minibatch): def simple_model(simple_model_data): with pm.Model() as model: mu_ = pm.Normal( - "mu", mu=simple_model_data["mu0"], sigma=simple_model_data["sigma0"], testval=0 + "mu", mu=simple_model_data["mu0"], sigma=simple_model_data["sigma0"], initval=0 ) pm.Normal( "x",