Skip to content

Commit

Permalink
Replace uses of testval with initval
Browse files Browse the repository at this point in the history
  • Loading branch information
brandonwillard committed Jun 1, 2021
1 parent 83b4957 commit 186b4f5
Show file tree
Hide file tree
Showing 13 changed files with 79 additions and 79 deletions.
2 changes: 1 addition & 1 deletion pymc3/distributions/bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def __init__(self, X, Y, m=200, alpha=0.25, split_prior=None, *args, **kwargs):

self.X, self.Y, self.missing_data = self.preprocess_XY(X, Y)

super().__init__(shape=X.shape[0], dtype="float64", testval=0, *args, **kwargs)
super().__init__(shape=X.shape[0], dtype="float64", initval=0, *args, **kwargs)

if self.X.ndim != 2:
raise ValueError("The design matrix X must have two dimensions")
Expand Down
8 changes: 4 additions & 4 deletions pymc3/distributions/bound.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def __init__(self, distribution, lower, upper, default, *args, **kwargs):
super().__init__(
shape=self._wrapped.shape,
dtype=self._wrapped.dtype,
testval=self._wrapped.testval,
initval=self._wrapped.initval,
defaults=defaults,
transform=self._wrapped.transform,
)
Expand Down Expand Up @@ -252,15 +252,15 @@ class Bound:
with pm.Model():
NegativeNormal = pm.Bound(pm.Normal, upper=0.0)
par1 = NegativeNormal('par`', mu=0.0, sigma=1.0, testval=-0.5)
par1 = NegativeNormal('par`', mu=0.0, sigma=1.0, initval=-0.5)
# you can use the Bound object multiple times to
# create multiple bounded random variables
par1_1 = NegativeNormal('par1_1', mu=-1.0, sigma=1.0, testval=-1.5)
par1_1 = NegativeNormal('par1_1', mu=-1.0, sigma=1.0, initval=-1.5)
# you can also define a Bound implicitly, while applying
# it to a random variable
par2 = pm.Bound(pm.Normal, lower=-1.0, upper=1.0)(
'par2', mu=0.0, sigma=1.0, testval=1.0)
'par2', mu=0.0, sigma=1.0, initval=1.0)
"""

def __init__(self, distribution, lower=None, upper=None):
Expand Down
12 changes: 6 additions & 6 deletions pymc3/distributions/distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,14 +265,14 @@ def __init__(
self,
shape,
dtype,
testval=None,
initval=None,
defaults=(),
parent_dist=None,
*args,
**kwargs,
):
super().__init__(
shape=shape, dtype=dtype, testval=testval, defaults=defaults, *args, **kwargs
shape=shape, dtype=dtype, initval=initval, defaults=defaults, *args, **kwargs
)
self.parent_dist = parent_dist

Expand Down Expand Up @@ -330,7 +330,7 @@ def __init__(
logp,
shape=(),
dtype=None,
testval=0,
initval=0,
random=None,
wrap_random_with_dist_shape=True,
check_shape_in_random=True,
Expand All @@ -351,8 +351,8 @@ def __init__(
a value here.
dtype: None, str (Optional)
The dtype of the distribution.
testval: number or array (Optional)
The ``testval`` of the RV's tensor that follow the ``DensityDist``
initval: number or array (Optional)
The ``initval`` of the RV's tensor that follow the ``DensityDist``
distribution.
args, kwargs: (Optional)
These are passed to the parent class' ``__init__``.
Expand Down Expand Up @@ -388,7 +388,7 @@ def __init__(
"""
if dtype is None:
dtype = aesara.config.floatX
super().__init__(shape, dtype, testval, *args, **kwargs)
super().__init__(shape, dtype, initval, *args, **kwargs)
self.logp = logp
if type(self.logp) == types.MethodType:
if PLATFORM != "linux":
Expand Down
4 changes: 2 additions & 2 deletions pymc3/distributions/mixture.py
Original file line number Diff line number Diff line change
Expand Up @@ -609,7 +609,7 @@ class NormalMixture(Mixture):
10,
shape=n_components,
transform=pm.transforms.ordered,
testval=[1, 2, 3],
initval=[1, 2, 3],
)
σ = pm.HalfNormal("σ", 10, shape=n_components)
weights = pm.Dirichlet("w", np.ones(n_components))
Expand Down Expand Up @@ -684,7 +684,7 @@ def __init__(self, w, comp_dists, mixture_axis=-1, *args, **kwargs):
self.mixture_axis = mixture_axis
kwargs.setdefault("dtype", self.comp_dists.dtype)

# Compute the mode so we don't always have to pass a testval
# Compute the mode so we don't always have to pass a initval
defaults = kwargs.pop("defaults", [])
event_shape = self.comp_dists.shape[mixture_axis + 1 :]
_w = at.shape_padleft(
Expand Down
18 changes: 9 additions & 9 deletions pymc3/distributions/multivariate.py
Original file line number Diff line number Diff line change
Expand Up @@ -840,7 +840,7 @@ def logp(self, X):
)


def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False, testval=None):
def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False, initval=None):
R"""
Bartlett decomposition of the Wishart distribution. As the Wishart
distribution requires the matrix to be symmetric positive semi-definite
Expand Down Expand Up @@ -875,7 +875,7 @@ def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False, testv
Input matrix S is already Cholesky decomposed as S.T * S
return_cholesky: bool (default=False)
Only return the Cholesky decomposed matrix.
testval: ndarray
initval: ndarray
p x p positive definite matrix used to initialize
Notes
Expand All @@ -894,21 +894,21 @@ def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False, testv
n_diag = len(diag_idx[0])
n_tril = len(tril_idx[0])

if testval is not None:
if initval is not None:
# Inverse transform
testval = np.dot(np.dot(np.linalg.inv(L), testval), np.linalg.inv(L.T))
testval = linalg.cholesky(testval, lower=True)
diag_testval = testval[diag_idx] ** 2
tril_testval = testval[tril_idx]
initval = np.dot(np.dot(np.linalg.inv(L), initval), np.linalg.inv(L.T))
initval = linalg.cholesky(initval, lower=True)
diag_testval = initval[diag_idx] ** 2
tril_testval = initval[tril_idx]
else:
diag_testval = None
tril_testval = None

c = at.sqrt(
ChiSquared("%s_c" % name, nu - np.arange(2, 2 + n_diag), shape=n_diag, testval=diag_testval)
ChiSquared("%s_c" % name, nu - np.arange(2, 2 + n_diag), shape=n_diag, initval=diag_testval)
)
pm._log.info("Added new variable %s_c to model diagonal of Wishart." % name)
z = Normal("%s_z" % name, 0.0, 1.0, shape=n_tril, testval=tril_testval)
z = Normal("%s_z" % name, 0.0, 1.0, shape=n_tril, initval=tril_testval)
pm._log.info("Added new variable %s_z to model off-diagonals of Wishart." % name)
# Construct A matrix
A = at.zeros(S.shape, dtype=np.float32)
Expand Down
18 changes: 9 additions & 9 deletions pymc3/tests/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def simple_model():
mu = -2.1
tau = 1.3
with Model() as model:
Normal("x", mu, tau=tau, size=2, testval=floatX_array([0.1, 0.1]))
Normal("x", mu, tau=tau, size=2, initval=floatX_array([0.1, 0.1]))

return model.initial_point, model, (mu, tau ** -0.5)

Expand All @@ -39,7 +39,7 @@ def simple_categorical():
p = floatX_array([0.1, 0.2, 0.3, 0.4])
v = floatX_array([0.0, 1.0, 2.0, 3.0])
with Model() as model:
Categorical("x", p, size=3, testval=[1, 2, 3])
Categorical("x", p, size=3, initval=[1, 2, 3])

mu = np.dot(p, v)
var = np.dot(p, (v - mu) ** 2)
Expand All @@ -50,7 +50,7 @@ def multidimensional_model():
mu = -2.1
tau = 1.3
with Model() as model:
Normal("x", mu, tau=tau, size=(3, 2), testval=0.1 * np.ones((3, 2)))
Normal("x", mu, tau=tau, size=(3, 2), initval=0.1 * np.ones((3, 2)))

return model.initial_point, model, (mu, tau ** -0.5)

Expand Down Expand Up @@ -81,7 +81,7 @@ def simple_2model():
tau = 1.3
p = 0.4
with Model() as model:
x = pm.Normal("x", mu, tau=tau, testval=0.1)
x = pm.Normal("x", mu, tau=tau, initval=0.1)
pm.Deterministic("logx", at.log(x))
pm.Bernoulli("y", p)
return model.initial_point, model
Expand All @@ -91,7 +91,7 @@ def simple_2model_continuous():
mu = -2.1
tau = 1.3
with Model() as model:
x = pm.Normal("x", mu, tau=tau, testval=0.1)
x = pm.Normal("x", mu, tau=tau, initval=0.1)
pm.Deterministic("logx", at.log(x))
pm.Beta("y", alpha=1, beta=1, size=2)
return model.initial_point, model
Expand All @@ -106,7 +106,7 @@ def mv_simple():
"x",
at.constant(mu),
tau=at.constant(tau),
testval=floatX_array([0.1, 1.0, 0.8]),
initval=floatX_array([0.1, 1.0, 0.8]),
)
H = tau
C = np.linalg.inv(H)
Expand All @@ -122,7 +122,7 @@ def mv_simple_coarse():
"x",
at.constant(mu),
tau=at.constant(tau),
testval=floatX_array([0.1, 1.0, 0.8]),
initval=floatX_array([0.1, 1.0, 0.8]),
)
H = tau
C = np.linalg.inv(H)
Expand All @@ -138,7 +138,7 @@ def mv_simple_very_coarse():
"x",
at.constant(mu),
tau=at.constant(tau),
testval=floatX_array([0.1, 1.0, 0.8]),
initval=floatX_array([0.1, 1.0, 0.8]),
)
H = tau
C = np.linalg.inv(H)
Expand All @@ -150,7 +150,7 @@ def mv_simple_discrete():
n = 5
p = floatX_array([0.15, 0.85])
with pm.Model() as model:
pm.Multinomial("x", n, at.constant(p), testval=np.array([1, 4]))
pm.Multinomial("x", n, at.constant(p), initval=np.array([1, 4]))
mu = n * p
# covariance matrix
C = np.zeros((d, d))
Expand Down
10 changes: 5 additions & 5 deletions pymc3/tests/test_distributions_timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,13 @@ def test_AR_nd():
beta_tp = np.random.randn(p, n)
y_tp = np.random.randn(T, n)
with Model() as t0:
beta = Normal("beta", 0.0, 1.0, shape=(p, n), testval=beta_tp)
AR("y", beta, sigma=1.0, shape=(T, n), testval=y_tp)
beta = Normal("beta", 0.0, 1.0, shape=(p, n), initval=beta_tp)
AR("y", beta, sigma=1.0, shape=(T, n), initval=y_tp)

with Model() as t1:
beta = Normal("beta", 0.0, 1.0, shape=(p, n), testval=beta_tp)
beta = Normal("beta", 0.0, 1.0, shape=(p, n), initval=beta_tp)
for i in range(n):
AR("y_%d" % i, beta[:, i], sigma=1.0, shape=T, testval=y_tp[:, i])
AR("y_%d" % i, beta[:, i], sigma=1.0, shape=T, initval=y_tp[:, i])

np.testing.assert_allclose(t0.logp(t0.initial_point), t1.logp(t1.initial_point))

Expand Down Expand Up @@ -150,7 +150,7 @@ def test_linear():
# build model
with Model() as model:
lamh = Flat("lamh")
xh = EulerMaruyama("xh", dt, sde, (lamh,), shape=N + 1, testval=x)
xh = EulerMaruyama("xh", dt, sde, (lamh,), shape=N + 1, initval=x)
Normal("zh", mu=xh, sigma=sig2, observed=z)
# invert
with model:
Expand Down
4 changes: 2 additions & 2 deletions pymc3/tests/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def __init__(self, mean=0, sigma=1, name="", model=None):
super().__init__(name, model)
self.register_rv(Normal.dist(mu=mean, sigma=sigma), "v1")
Normal("v2", mu=mean, sigma=sigma)
Normal("v3", mu=mean, sigma=Normal("sd", mu=10, sigma=1, testval=1.0))
Normal("v3", mu=mean, sigma=Normal("sd", mu=10, sigma=1, initval=1.0))
Deterministic("v3_sq", self.v3 ** 2)
Potential("p1", at.constant(1))

Expand Down Expand Up @@ -462,7 +462,7 @@ def test_make_obs_var():
fake_model = pm.Model()
with fake_model:
fake_distribution = pm.Normal.dist(mu=0, sigma=1)
# Create the testval attribute simply for the sake of model testing
# Create the initval attribute simply for the sake of model testing
fake_distribution.name = input_name

# Check function behavior using the various inputs
Expand Down
20 changes: 10 additions & 10 deletions pymc3/tests/test_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ def test_shared_named(self):
mu=np.atleast_2d(0),
tau=np.atleast_2d(1e20),
size=(1, 1),
testval=np.atleast_2d(0),
initval=np.atleast_2d(0),
)
theta = pm.Normal(
"theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1)
Expand All @@ -403,7 +403,7 @@ def test_shared_unnamed(self):
mu=np.atleast_2d(0),
tau=np.atleast_2d(1e20),
size=(1, 1),
testval=np.atleast_2d(0),
initval=np.atleast_2d(0),
)
theta = pm.Normal(
"theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1)
Expand All @@ -419,7 +419,7 @@ def test_constant_named(self):
mu=np.atleast_2d(0),
tau=np.atleast_2d(1e20),
size=(1, 1),
testval=np.atleast_2d(0),
initval=np.atleast_2d(0),
)
theta = pm.Normal(
"theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1)
Expand Down Expand Up @@ -688,10 +688,10 @@ def test_deterministic_of_observed_modified_interface(self):
meas_in_1 = pm.aesaraf.floatX(2 + 4 * rng.randn(100))
meas_in_2 = pm.aesaraf.floatX(5 + 4 * rng.randn(100))
with pm.Model(rng_seeder=rng) as model:
mu_in_1 = pm.Normal("mu_in_1", 0, 1, testval=0)
sigma_in_1 = pm.HalfNormal("sd_in_1", 1, testval=1)
mu_in_2 = pm.Normal("mu_in_2", 0, 1, testval=0)
sigma_in_2 = pm.HalfNormal("sd__in_2", 1, testval=1)
mu_in_1 = pm.Normal("mu_in_1", 0, 1, initval=0)
sigma_in_1 = pm.HalfNormal("sd_in_1", 1, initval=1)
mu_in_2 = pm.Normal("mu_in_2", 0, 1, initval=0)
sigma_in_2 = pm.HalfNormal("sd__in_2", 1, initval=1)

in_1 = pm.Normal("in_1", mu_in_1, sigma_in_1, observed=meas_in_1)
in_2 = pm.Normal("in_2", mu_in_2, sigma_in_2, observed=meas_in_2)
Expand Down Expand Up @@ -882,7 +882,7 @@ def _mocked_init_nuts(*args, **kwargs):


@pytest.mark.parametrize(
"testval, jitter_max_retries, expectation",
"initval, jitter_max_retries, expectation",
[
(0, 0, pytest.raises(SamplingError)),
(0, 1, pytest.raises(SamplingError)),
Expand All @@ -891,9 +891,9 @@ def _mocked_init_nuts(*args, **kwargs):
(1, 0, does_not_raise()),
],
)
def test_init_jitter(testval, jitter_max_retries, expectation):
def test_init_jitter(initval, jitter_max_retries, expectation):
with pm.Model() as m:
pm.HalfNormal("x", transform=None, testval=testval)
pm.HalfNormal("x", transform=None, initval=initval)

with expectation:
# Starting value is negative (invalid) when np.random.rand returns 0 (jitter = -1)
Expand Down
8 changes: 4 additions & 4 deletions pymc3/tests/test_step.py
Original file line number Diff line number Diff line change
Expand Up @@ -964,25 +964,25 @@ def test_multiple_samplers(self, caplog):

def test_bad_init_nonparallel(self):
with Model():
HalfNormal("a", sigma=1, testval=-1, transform=None)
HalfNormal("a", sigma=1, initval=-1, transform=None)
with pytest.raises(SamplingError) as error:
sample(init=None, chains=1, random_seed=1)
error.match("Initial evaluation")

@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_bad_init_parallel(self):
with Model():
HalfNormal("a", sigma=1, testval=-1, transform=None)
HalfNormal("a", sigma=1, initval=-1, transform=None)
with pytest.raises(SamplingError) as error:
sample(init=None, cores=2, random_seed=1)
error.match("Initial evaluation")

def test_linalg(self, caplog):
with Model():
a = Normal("a", size=2, testval=floatX(np.zeros(2)))
a = Normal("a", size=2, initval=floatX(np.zeros(2)))
a = at.switch(a > 0, np.inf, a)
b = at.slinalg.solve(floatX(np.eye(2)), a)
Normal("c", mu=b, size=2, testval=floatX(np.r_[0.0, 0.0]))
Normal("c", mu=b, size=2, initval=floatX(np.r_[0.0, 0.0]))
caplog.clear()
trace = sample(20, init=None, tune=5, chains=2)
warns = [msg.msg for msg in caplog.records]
Expand Down
Loading

0 comments on commit 186b4f5

Please sign in to comment.