-
-
Notifications
You must be signed in to change notification settings - Fork 2.1k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
2 changed files
with
42 additions
and
7 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2781,7 +2781,7 @@ def test_car_logp(size): | |
scipy_logp = scipy.stats.multivariate_normal.logpdf(xs, mu, cov) | ||
|
||
car_dist = CAR.dist(mu, W, alpha, tau, size=size) | ||
#xs = np.broadcast_to(xs, size + mu.shape) | ||
# xs = np.broadcast_to(xs, size + mu.shape) | ||
car_logp = logpt(car_dist, xs).eval() | ||
|
||
# Check to make sure that the CAR and MVN log PDFs are equivalent | ||
|
@@ -2792,6 +2792,37 @@ def test_car_logp(size): | |
assert np.allclose(delta_logp - delta_logp[0], 0.0) | ||
|
||
|
||
@pytest.mark.parametrize("size", [(100,), (100, 2)], ids=str) | ||
def test_car_rng_fn(size): | ||
delta = 0.05 # limit for KS p-value | ||
n_fails = 10 # Allows the KS fails a certain number of times | ||
|
||
W = np.array( | ||
[[0.0, 1.0, 1.0, 0.0], [1.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 1.0, 0.0]] | ||
) | ||
|
||
tau = 2 | ||
alpha = 0.5 | ||
mu = np.array([1, 1, 1, 1]) | ||
|
||
D = W.sum(axis=0) | ||
prec = tau * (np.diag(D) - alpha * W) | ||
cov = np.linalg.inv(prec) | ||
|
||
p, f = delta, n_fails | ||
while p <= delta and f > 0: | ||
with Model(): | ||
car = pm.CAR("car", mu, W, alpha, tau, size=size) | ||
mn = pm.MvNormal("mn", mu, cov, size=size) | ||
check = pm.sample_prior_predictive(100) | ||
car_smp, mn_smp = check["car"], check["mn"] | ||
_, p = scipy.stats.ks_2samp( | ||
np.atleast_1d(car_smp).flatten(), np.atleast_1d(mn_smp).flatten() | ||
This comment has been minimized.
Sorry, something went wrong.
This comment has been minimized.
Sorry, something went wrong.
ckrapu
Contributor
|
||
) | ||
f -= 1 | ||
assert p > delta | ||
|
||
|
||
class TestBugfixes: | ||
@pytest.mark.parametrize( | ||
"dist_cls,kwargs", [(MvNormal, dict(mu=0)), (MvStudentT, dict(mu=0, nu=2))] | ||
|
If I am understanding this code correctly, this application of the KS test statistic function from scipy isn't appropriate here. Ideally,
ks_2samp
is used to test for equality in distribution between scalar random variables but we have vector-valued random variables here. If we compared a CAR distribution with unit diagonal variance (but nontrivial off-diagonal terms) and a multivariate Gaussian with an identity covariance matrix, this test in current form would, provided enough data, tell you that they are equal in distribution even though they are clearly different. Perhaps you could try selecting multiple specific elements of the CAR vector and applying the KS statistic to those elements' distributions piece by piece. I am still not sure if that's a good idea but it's an improvement over the current test.