diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index 1fe060bef1e..a7a7796da26 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -50,7 +50,8 @@ - `nuts_kwargs` and `step_kwargs` have been deprecated in favor of using the standard `kwargs` to pass optional step method arguments. - `SGFS` and `CSG` have been removed (Fix for [#3353](https://github.com/pymc-devs/pymc3/issues/3353)). They have been moved to [pymc3-experimental](https://github.com/pymc-devs/pymc3-experimental). -- References to `live_plot` and corresponding notebooks have been removed. +- References to `live_plot` and corresponding notebooks have been removed. +- Function `approx_hessian` was removed, due to `numdifftools` becoming incompatible with current `scipy`. The function was already optional, only available to a user who installed `numdifftools` separately, and not hit on any common codepaths. [#3485](https://github.com/pymc-devs/pymc3/pull/3485). ## PyMC3 3.6 (Dec 21 2018) diff --git a/pymc3/tests/test_distributions.py b/pymc3/tests/test_distributions.py index 56d5af6a870..04025ee9962 100644 --- a/pymc3/tests/test_distributions.py +++ b/pymc3/tests/test_distributions.py @@ -3,8 +3,8 @@ from .helpers import SeededTest, select_by_precision from ..vartypes import continuous_types -from ..model import Model, Point, Potential, Deterministic -from ..blocking import DictToVarBijection, DictToArrayBijection, ArrayOrdering +from ..model import Model, Point, Deterministic +from ..blocking import DictToVarBijection from ..distributions import ( DensityDist, Categorical, Multinomial, VonMises, Dirichlet, MvStudentT, MvNormal, MatrixNormal, ZeroInflatedPoisson, @@ -471,37 +471,9 @@ def check_int_to_1(self, model, value, domain, paramdomains): area = integrate_nd(pdfx, domain, value.dshape, value.dtype) assert_almost_equal(area, 1, err_msg=str(pt)) - def check_dlogp(self, model, value, domain, paramdomains): - try: - from numdifftools import Gradient - except ImportError: - return - if not model.cont_vars: - return - - domains = paramdomains.copy() - domains['value'] = domain - bij = DictToArrayBijection( - ArrayOrdering(model.cont_vars), model.test_point) - dlogp = bij.mapf(model.fastdlogp(model.cont_vars)) - logp = bij.mapf(model.fastlogp) - - def wrapped_logp(x): - try: - return logp(x) - except: - return np.nan - - ndlogp = Gradient(wrapped_logp) - for pt in product(domains, n_samples=100): - pt = Point(pt, model=model) - pt = bij.map(pt) - decimals = select_by_precision(float64=6, float32=4) - assert_almost_equal(dlogp(pt), ndlogp(pt), decimal=decimals, err_msg=str(pt)) - def checkd(self, distfam, valuedomain, vardomains, checks=None, extra_args=None): if checks is None: - checks = (self.check_int_to_1, self.check_dlogp) + checks = (self.check_int_to_1, ) if extra_args is None: extra_args = {} @@ -940,7 +912,8 @@ def test_wishart(self, n): # This check compares the autodiff gradient to the numdiff gradient. # However, due to the strict constraints of the wishart, # it is impossible to numerically determine the gradient as a small - # pertubation breaks the symmetry. Thus disabling. + # pertubation breaks the symmetry. Thus disabling. Also, numdifftools was + # removed in June 2019, so an alternative would be needed. # # self.checkd(Wishart, PdMatrix(n), {'n': Domain([2, 3, 4, 2000]), 'V': PdMatrix(n)}, # checks=[self.check_dlogp]) @@ -1120,12 +1093,6 @@ def logp(x): return -log(2 * .5) - abs(x - .5) / .5 self.checkd(DensityDist, R, {}, extra_args={'logp': logp}) - def test_addpotential(self): - with Model() as model: - value = Normal('value', 1, 1) - Potential('value_squared', -value ** 2) - self.check_dlogp(model, value, R, {}) - def test_get_tau_sigma(self): sigma = np.array([2]) assert_almost_equal(continuous.get_tau_sigma(sigma=sigma), [1. / sigma**2, sigma]) diff --git a/pymc3/tuning/__init__.py b/pymc3/tuning/__init__.py index 69b0ac4ebf4..63df3a82427 100644 --- a/pymc3/tuning/__init__.py +++ b/pymc3/tuning/__init__.py @@ -1,2 +1,2 @@ from .starting import find_MAP -from .scaling import approx_hessian, find_hessian, trace_cov, guess_scaling +from .scaling import find_hessian, trace_cov, guess_scaling diff --git a/pymc3/tuning/scaling.py b/pymc3/tuning/scaling.py index 68ba236cd47..5a5b580a4e7 100644 --- a/pymc3/tuning/scaling.py +++ b/pymc3/tuning/scaling.py @@ -4,41 +4,7 @@ from ..theanof import hessian_diag, inputvars from ..blocking import DictToArrayBijection, ArrayOrdering -__all__ = ['approx_hessian', 'find_hessian', 'trace_cov', 'guess_scaling'] - - -def approx_hessian(point, vars=None, model=None): - """ - Returns an approximation of the Hessian at the current chain location. - - Parameters - ---------- - model : Model (optional if in `with` context) - point : dict - vars : list - Variables for which Hessian is to be calculated. - """ - from numdifftools import Jacobian - - model = modelcontext(model) - if vars is None: - vars = model.cont_vars - vars = inputvars(vars) - - point = Point(point, model=model) - - bij = DictToArrayBijection(ArrayOrdering(vars), point) - dlogp = bij.mapf(model.fastdlogp(vars)) - - def grad_logp(point): - return np.nan_to_num(dlogp(point)) - - ''' - Find the jacobian of the gradient function at the current position - this should be the Hessian; invert it to find the approximate - covariance matrix. - ''' - return -Jacobian(grad_logp)(bij.map(point)) +__all__ = ['find_hessian', 'trace_cov', 'guess_scaling'] def fixed_hessian(point, vars=None, model=None): diff --git a/requirements-dev.txt b/requirements-dev.txt index a4a3d7451df..80883464d0c 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -7,7 +7,6 @@ Keras>=2.0.8 nbsphinx>=0.2.13 nose>=1.3.7 nose-parameterized==0.6.0 -numdifftools>=0.9.20 numpy>=1.13.0 numpydoc==0.7.0 pycodestyle>=2.3.1