From e53f9c2b5514e882617f844acb0b62186bb98bab Mon Sep 17 00:00:00 2001 From: Luis Fabregas Date: Wed, 31 Mar 2021 08:34:17 +0200 Subject: [PATCH 1/2] snlls: enable scale preconditioning to avoid scaling issues in uncertainty quantification (#132) --- deerlab/snlls.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/deerlab/snlls.py b/deerlab/snlls.py index 2dc0d1e79..ff496199e 100644 --- a/deerlab/snlls.py +++ b/deerlab/snlls.py @@ -205,7 +205,7 @@ def snlls(y, Amodel, par0, lb=None, ub=None, lbl=None, ubl=None, nnlsSolver='cvx par0 = np.atleast_1d(par0) # Parse multiple datsets and non-linear operators into a single concatenated vector/matrix - y, Amodel, weights, subsets = dl.utils.parse_multidatasets(y, Amodel, weights, precondition=False) + y, Amodel, weights, subsets, prescales = dl.utils.parse_multidatasets(y, Amodel, weights, precondition=True) # Get info on the problem parameters and non-linear operator A0 = Amodel(par0) @@ -213,7 +213,7 @@ def snlls(y, Amodel, par0, lb=None, ub=None, lbl=None, ubl=None, nnlsSolver='cvx Nlin = np.shape(A0)[1] linfit = np.zeros(Nlin) scales = [1 for _ in subsets] - prescales= [1 for _ in subsets] + # Determine whether to use regularization penalty illConditioned = np.linalg.cond(A0) > 10 if reg == 'auto': @@ -258,7 +258,6 @@ def snlls(y, Amodel, par0, lb=None, ub=None, lbl=None, ubl=None, nnlsSolver='cvx # Check for non-negativity constraints on the linear solution nonNegativeOnly = (np.all(lbl == 0)) and (np.all(np.isinf(ubl))) - # Use an arbitrary axis ax = np.arange(1, Nlin+1) if includeRegularization : @@ -432,7 +431,7 @@ def ResidualsFcn(p): # Jacobian (linear part) Jlin = np.zeros((len(res),len(linfit))) - Jlin[:len(y),:] = Amodel(nonlinfit) + Jlin[:len(y),:] = scales_vec[:,np.newaxis]*Amodel(nonlinfit) if includeRegularization: Jlin[len(res)-Nlin:,:] = reg_penalty(regtype, alpha, L, linfit, huberparam, Nnonlin)[1] @@ -534,7 +533,8 @@ def reg_penalty(regtype, alpha, L, x, eta, Nnonlin): def _plot(subsets,y,yfit,show): # =========================================================================================== nSignals = len(subsets) - fig,axs = plt.subplots(nSignals+1,figsize=[7,3*nSignals]) + fig,axs = plt.subplots(nSignals,figsize=[7,3*nSignals]) + axs = np.atleast_1d(axs) for i in range(nSignals): subset = subsets[i] # Plot the experimental signal and fit From efe54e15d7b1d24fc0a5d697586d1c6e7c451275 Mon Sep 17 00:00:00 2001 From: Luis Fabregas Date: Wed, 31 Mar 2021 09:23:08 +0200 Subject: [PATCH 2/2] add test for snlls to ensure agnosticity of uncertainty w.r.t. signal scaling --- test/test_snlls.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/test/test_snlls.py b/test/test_snlls.py index 5dc3d75a7..a359c84b1 100644 --- a/test/test_snlls.py +++ b/test/test_snlls.py @@ -427,3 +427,32 @@ def test_confinter_values(): ci_match = lambda ci,ci_ref,truth:np.max(abs(np.array(ci) - np.array(ci_ref)))/truth < 0.01 assert ci_match(a_ci,a_ci_ref,pnonlin[0]) & ci_match(b_ci,b_ci_ref,pnonlin[1]) # ====================================================================== + + +def test_confinter_scaling(): +#============================================================ + "Check that the confidence intervals are agnostic w.r.t. scaling" + + # Prepare test data + r = np.linspace(1,8,80) + t = np.linspace(0,4,200) + lam = 0.25 + K = dipolarkernel(t,r,lam) + parin = [3.5, 0.4, 0.6, 4.5, 0.5, 0.4] + P = dd_gauss2(r,parin) + V = K@P + # Non-linear parameters + nlpar0 = 0.2 + lb = 0 + ub = 1 + # Linear parameters: non-negativity + lbl = np.zeros(len(r)) + V0_1 = 1 + V0_2 = 1e9 + + # Separable LSQ fit + fit1 = snlls(V*V0_1,lambda lam: dipolarkernel(t,r,lam),nlpar0,lb,ub,lbl) + fit2 = snlls(V*V0_2,lambda lam: dipolarkernel(t,r,lam),nlpar0,lb,ub,lbl) + + assert np.max(abs(fit1.linUncert.ci(95)/V0_1 - fit2.linUncert.ci(95)/V0_2)) < 0.05 +#============================================================