diff --git a/deerlab/classes.py b/deerlab/classes.py index 20917bd6a..6119cb512 100644 --- a/deerlab/classes.py +++ b/deerlab/classes.py @@ -2,7 +2,7 @@ # Copyright(c) 2019-2020: Luis Fabregas, Stefan Stoll and other contributors. import numpy as np -import numdifftools as nd +from deerlab.utils import fdJacobian from scipy.stats import norm from scipy.signal import fftconvolve import copy @@ -322,8 +322,8 @@ def propagate(self,model,lbm=[],ubm=[]): raise IndexError ('The 2nd and 3rd input arguments must have the same number of elements as the model output.') # Get jacobian of model to be propagated with respect to parameters - J = np.reshape(nd.Jacobian(model)(parfit),(-1,parfit.size)) - + J = fdJacobian(model,parfit) + # Clip at boundaries modelfit = np.maximum(modelfit,lbm) modelfit = np.minimum(modelfit,ubm) diff --git a/deerlab/fitmultimodel.py b/deerlab/fitmultimodel.py index ac643ab24..20429a0cc 100644 --- a/deerlab/fitmultimodel.py +++ b/deerlab/fitmultimodel.py @@ -5,11 +5,10 @@ import copy import numpy as np -import numdifftools as nd import matplotlib.pyplot as plt from types import FunctionType import deerlab as dl -from deerlab.utils import hccm, goodness_of_fit +from deerlab.utils import hccm, goodness_of_fit, fdJacobian from deerlab.classes import FitResult def fitmultimodel(V, Kmodel, r, model, maxModels, method='aic', lb=None, ub=None, lbK=None, ubK=None, @@ -368,10 +367,10 @@ def spread_within_box(lb,ub,Nmodels): res = weights*(Vfit - V) # Compute the Jacobian - Jnonlin = np.reshape(nd.Jacobian(lambda p: weights*(Knonlin(p)@plin))(pnonlin),(-1,pnonlin.size)) + Jnonlin = fdJacobian(lambda p: weights*(Knonlin(p)@plin),pnonlin) Jlin = weights[:,np.newaxis]*Knonlin(pnonlin) J = np.concatenate((Jnonlin, Jlin),1) - + # Estimate the heteroscedasticity-consistent covariance matrix covmatrix = hccm(J,res,'HC1') diff --git a/deerlab/fitparamodel.py b/deerlab/fitparamodel.py index 78ef5e997..f21628a9e 100644 --- a/deerlab/fitparamodel.py +++ b/deerlab/fitparamodel.py @@ -4,8 +4,7 @@ # Copyright(c) 2019-2020: Luis Fabregas, Stefan Stoll and other contributors. import numpy as np -import numdifftools as nd -from deerlab.utils import isempty, multistarts, hccm, parse_multidatasets, goodness_of_fit +from deerlab.utils import isempty, multistarts, hccm, parse_multidatasets, goodness_of_fit, fdJacobian from deerlab.classes import UncertQuant, FitResult import matplotlib.pyplot as plt from scipy.optimize import least_squares @@ -34,7 +33,7 @@ def fitparamodel(V, model, par0=[],lb=[],ub=[], weights = 1, :ref:`FitResult` with the following fields defined: param : ndarray Fitted model parameters - paramuq : :ref:`UncertQuant` + uncertainty : :ref:`UncertQuant` Covariance-based uncertainty quantification of the fitted parameters. scale : float int or list of float int Amplitude scale(s) of the dipolar signal(s). @@ -216,8 +215,8 @@ def lsqresiduals(p): # of the negative log-likelihood with warnings.catch_warnings(): warnings.simplefilter('ignore') - J = np.reshape(nd.Jacobian(lsqresiduals)(parfit),(-1,parfit.size)) - + J = fdJacobian(lsqresiduals,parfit) + # Estimate the heteroscedasticity-consistent covariance matrix if isempty(covmatrix): # Use estimated data covariance matrix diff --git a/deerlab/fitsignal.py b/deerlab/fitsignal.py index 6f833ebf0..a629990c1 100644 --- a/deerlab/fitsignal.py +++ b/deerlab/fitsignal.py @@ -4,7 +4,6 @@ # Copyright(c) 2019-2020: Luis Fabregas, Stefan Stoll and other contributors. import numpy as np -import numdifftools as nd import types import copy import inspect @@ -13,7 +12,7 @@ from deerlab.classes import UncertQuant, FitResult from deerlab.bg_models import bg_hom3d from deerlab.ex_models import ex_4pdeer -from deerlab.utils import isempty, goodness_of_fit +from deerlab.utils import isempty, goodness_of_fit, fdJacobian def fitsignal(Vexp, t, r, dd_model='P', bg_model=bg_hom3d, ex_model=ex_4pdeer, par0=[None,None,None], lb=[None,None,None], ub=[None,None,None], verbose= False, @@ -404,7 +403,7 @@ def splituq(full_uq,scales=1): Vfit_uq.append( UncertQuant('covariance',Vfit[jj],Vcovmat,[],[])) elif includeForeground: # Parametric signal with parameter-free distribution - Jnonlin = np.reshape(nd.Jacobian(lambda par: multiPathwayModel(par[paramidx])[0][jj]@Pfit)(parfit_),(-1,parfit_.size)) + Jnonlin = fdJacobian(lambda par: multiPathwayModel(par[paramidx])[0][jj]@Pfit,parfit_) J = np.concatenate((Jnonlin, Kfit[jj]),1) Vcovmat = J@covmat@J.T Vfit_uq.append( UncertQuant('covariance',Vfit[jj],Vcovmat,[],[])) diff --git a/deerlab/snlls.py b/deerlab/snlls.py index 514f1be63..a39a5f221 100644 --- a/deerlab/snlls.py +++ b/deerlab/snlls.py @@ -5,14 +5,13 @@ import copy import numpy as np -import numdifftools as nd from scipy.optimize import least_squares, lsq_linear import matplotlib.pyplot as plt from numpy.linalg import solve # Import DeerLab depencies import deerlab as dl -from deerlab.utils import goodness_of_fit, hccm, isempty +from deerlab.utils import goodness_of_fit, hccm, isempty, fdJacobian from deerlab.nnls import cvxnnls, fnnls, nnlsbpp from deerlab.classes import UncertQuant, FitResult @@ -365,7 +364,8 @@ def ResidualsFcn(p): # Compute the Jacobian for the linear and non-linear parameters fcn = lambda p: Amodel(p)@linfit - Jnonlin = np.reshape(nd.Jacobian(fcn)(nonlinfit),(-1,nonlinfit.size)) + Jnonlin = fdJacobian(fcn,nonlinfit) + Jlin = Afit J = np.concatenate((Jnonlin, Jlin),1) diff --git a/deerlab/utils/utils.py b/deerlab/utils/utils.py index c6ec65ffe..88aad3cb8 100644 --- a/deerlab/utils/utils.py +++ b/deerlab/utils/utils.py @@ -382,8 +382,8 @@ def csd(Q1,Q2): return U,V,Z,C,S #=============================================================================== -def diagf(X): #=============================================================================== +def diagf(X): """ Diagonal force @@ -393,9 +393,8 @@ def diagf(X): return X #=============================================================================== - -def diagp(Y,X,k): #=============================================================================== +def diagp(Y,X,k): """ DIAGP Diagonal positive. Y,X = diagp(Y,X,k) scales the columns of Y and the rows of X by @@ -410,6 +409,29 @@ def diagp(Y,X,k): return Y,X #=============================================================================== +#=============================================================================== +def fdJacobian(fcn, param): + """ + Finite difference Jacobian + Estimates the Jacobian matrix of a function ``fcn`` at the point defined by ``param``. + + """ + param = np.atleast_1d(param) + # Step size + h = 1e-5 + # Central value + f0 = fcn(param) + f0 = np.atleast_1d(f0) + # Loop over parameters + J = np.zeros((len(f0),len(param))) + for i in range(len(param)): + rise = np.copy(param) + rise[i] = rise[i] + h + # Finite difference derivative of i-th parameter + J[:,i] = (fcn(rise) - f0)/h + return J +#=============================================================================== + #=============================================================================== def movmean(x, N): """ diff --git a/docsrc/source/firstscript.rst b/docsrc/source/firstscript.rst index 78c3b4cc7..cfe52b3b1 100644 --- a/docsrc/source/firstscript.rst +++ b/docsrc/source/firstscript.rst @@ -32,6 +32,9 @@ which in matrix form reads .. math:: \boldsymbol{V} = \boldsymbol{K}\boldsymbol{P} +Simulating a distance distribution +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + To simulate a dipolar signal you need first to define a distance distribution. This distribution can be the result of a fit, come from molecular dynamics simulations, etc. or you can assume a certain shape for the distribution and use one of the parametric models available in DeerLab. For example, to simulate a Gaussian distance distribution use the ``dd_gauss`` model function @@ -43,6 +46,10 @@ available in DeerLab. For example, to simulate a Gaussian distance distribution sigma = 0.3 # nm P = dl.dd_gauss(r,[rmean, sigma]) # single-Gaussian distance distribution + +Simulating a dipolar background +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + Next, we calculate the background decay function due to a homogeneus 3D distribution of spins: .. code-block:: python diff --git a/docsrc/source/installation.rst b/docsrc/source/installation.rst index 340c07dcb..a17590112 100644 --- a/docsrc/source/installation.rst +++ b/docsrc/source/installation.rst @@ -39,7 +39,6 @@ DeerLab will install the following packages: * `cvxopt `_ - Free software package for convex optimization * `numpy `_ - Base N-dimensional array package * `scipy `_ - Fundamental library for scientific computing - * `numdifftools `_ - Tools to solve automatic numerical differentiation problems in one or more variables. * `joblib `_ - Library lightweight pipelining and parallelization. The installed numerical packages (numpy, scipy, cvxopt) are linked against different BLAS libraries depending on the OS: @@ -66,12 +65,12 @@ Installing from source To install DeerLab from the source, first it must be downloaded or cloned from the `DeerLab repository `_. DeerLab and its dependencies can be installed by running the following command on a terminal window to install DeerLab as a static Python package:: - python setup.py install + python -m setup.py install For developers, in order to install DeerLab but be able to frequently edit the code without having to re-install the package every time use the command:: - python setup.py develop + python -m setup.py develop any further changes made to the source code will then immediate effect. \ No newline at end of file diff --git a/setup.py b/setup.py index 1657e9d98..dd51dfb36 100644 --- a/setup.py +++ b/setup.py @@ -23,7 +23,6 @@ def install_dependencies(): subprocess.run(['pip','install','scipy'],check=True) subprocess.run(['pip','install','cvxopt'],check=True) - subprocess.run(['pip','install','numdifftools'],check=True) subprocess.run(['pip','install','tqdm'],check=True) subprocess.run(['pip','install','joblib'],check=True) #----------------------------------------------------------------------- diff --git a/test/test_fitsignal.py b/test/test_fitsignal.py index 2c5a0e3cf..05f2c0de2 100644 --- a/test/test_fitsignal.py +++ b/test/test_fitsignal.py @@ -34,7 +34,6 @@ def test_4pdeer(): assert_experiment_model(ex_4pdeer) # ====================================================================== -test_4pdeer() def test_5pdeer(): # ====================================================================== @@ -464,4 +463,4 @@ def test_global_scale_4pdeer(): fit = fitsignal([V1,V2],[t1,t2],r,'P',bg_exp,ex_4pdeer,uqanalysis=False) assert max(abs(np.asarray(scales)/np.asarray(fit.scale) - 1)) < 1e-2 -# ====================================================================== \ No newline at end of file +# ======================================================================