From a18529e985a067fe0512e7b4b16cca51809baa8f Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 21 Oct 2021 14:50:48 +0100 Subject: [PATCH 01/88] add strongly convex case, and simple denoising test --- .../cil/optimisation/algorithms/PDHG.py | 110 +++++++++++++++++- 1 file changed, 104 insertions(+), 6 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 2387ba4817..0beb70ae84 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -17,7 +17,7 @@ from cil.optimisation.algorithms import Algorithm import warnings - +import numpy class PDHG(Algorithm): @@ -78,9 +78,9 @@ def __init__(self, f=None, g=None, operator=None, tau=None, sigma=1.,initial=Non self._use_axpby = use_axpby if f is not None and operator is not None and g is not None: - self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma, initial=initial) + self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma, initial=initial, **kwargs) - def set_up(self, f, g, operator, tau=None, sigma=1., initial=None): + def set_up(self, f, g, operator, tau=None, sigma=1., initial=None, **kwargs): '''initialisation of the algorithm :param operator: a Linear Operator @@ -117,9 +117,17 @@ def set_up(self, f, g, operator, tau=None, sigma=1., initial=None): self.x = self.x_old.copy() self.x_tmp = self.operator.domain_geometry().allocate(0) self.y = self.operator.range_geometry().allocate(0) - self.y_tmp = self.operator.range_geometry().allocate(0) - # relaxation parameter - self.theta = 1 + self.y_tmp = self.operator.range_geometry().allocate(0) + + # relaxation parameter, default value is 1.0 + self.theta = kwargs.get('theta',1.0) + + # Strongly convex case g + self.gamma_g = kwargs.get('gamma_g', None) + + # Strongly convex case f + self.gamma_fconj = kwargs.get('gamma_fconj', None) + self.configured = True print("{} configured".format(self.__class__.__name__, )) @@ -168,6 +176,19 @@ def update(self): #update_previous_solution() called after update by base class #i.e current solution is now in x_old, previous solution is now in x + + # Update sigma and tau based on the strong convexity of G + if self.gamma_g is not None: + self.theta = float(1 / numpy.sqrt(1 + 2 * self.gamma_g * self.tau)) + self.tau *= self.theta + self.sigma /= self.theta + + # Update sigma and tau based on the strong convexity of F + # Following operations are reversed due to symmetry, sigma --> tau, tau -->sigma + if self.gamma_fconj is not None: + self.theta = float(1 / numpy.sqrt(1 + 2 * self.gamma_f * self.sigma)) + self.sigma *= self.theta + self.tau /= self.theta def update_objective(self): @@ -197,3 +218,80 @@ def dual_objective(self): @property def primal_dual_gap(self): return [x[2] for x in self.loss] + + +if __name__ == "__main__": + + # Import libraries + from cil.utilities import dataexample, noise + from cil.optimisation.operators import GradientOperator + from cil.optimisation.functions import MixedL21Norm, L2NormSquared + from cil.utilities.display import show2D + from cil.io import NEXUSDataWriter, NEXUSDataReader + import pickle + import matplotlib.pyplot as plt + import os + + print("Denoising Case : Default sigma/tau vs Strongly convex") + # Load data + data = dataexample.CAMERA.get(size=(256,256)) + + # Add gaussian noise + noisy_data = noise.gaussian(data, seed = 10, var = 0.02) + + ig = noisy_data.geometry + + alpha = 1. + K = GradientOperator(ig) + F = alpha * MixedL21Norm() + G = L2NormSquared(b=noisy_data) + + normK = K.norm() + sigma = 1./normK + tau = 1./normK + + # standard pdhg + pdhg = PDHG(f = F, g = G, operator = K, + update_objective_interval=1, + max_iteration=2000, sigma=sigma, tau=tau) + pdhg.run(verbose=0) + + # pdhg = {} + # pdhg['primal'] = pdhg.objective + # pdhg['dual'] = pdhg.dual_objective + # pdhg['pdgap'] = pdhg.primal_dual_gap + + # with open(os.getcwd() + 'pdhg_noaccel_info.pkl','wb') as f: + # pickle.dump(pdhg_noaccel_info, f) + + # PDHG with G strongly convex acceleration + pdhg_sc = PDHG(f = F, g = G, operator = K, + update_objective_interval=1, max_iteration=2000, + gamma_g = 1, sigma=sigma, tau=tau) + pdhg_sc.run(verbose=0) + + # Load pdhg_noaccel_info + # pdhg_noaccel_info = pickle.load( open( os.getcwd() + 'pdhg_noaccel_info.pkl', "rb" ) ) + + plt.figure() + plt.loglog(pdhg_sc.objective, label="Strongly Convex (g)") + plt.loglog(pdhg.objective, label="No accelerate") + plt.legend() + plt.title("Primal") + plt.show() + + plt.figure() + plt.loglog(pdhg_sc.primal_dual_gap, label="Strongly Convex (g)") + plt.loglog(pdhg.primal_dual_gap, label="No accelerate") + plt.legend() + plt.title("PrimalDual gap") + plt.show() + +# reader_pdhg = NEXUSDataReader(file_name = os.getcwd() + "pdhg_noaccel" + ".nxs") +# pdhg_accel_solution = reader_pdhg.load_data() + + show2D([pdhg_sc.solution, + pdhg.solution, + (pdhg_sc.solution - pdhg.solution).abs()], + num_cols=1, origin="upper") + From e581dfe661c18f041922691afd59f14e6c248ee6 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 21 Oct 2021 15:10:03 +0100 Subject: [PATCH 02/88] add strongly convex case, and simple denoising test --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 0beb70ae84..f251082f8f 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -294,4 +294,3 @@ def primal_dual_gap(self): pdhg.solution, (pdhg_sc.solution - pdhg.solution).abs()], num_cols=1, origin="upper") - From 7c8cdd35e17d5882b9cbd99ec0b97c95e5451e42 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 22 Oct 2021 08:57:37 +0100 Subject: [PATCH 03/88] merge master --- CHANGELOG.md | 1 + Wrappers/Python/cil/framework/framework.py | 12 +- .../ccpi_regularisation/functions/__init__.py | 3 +- .../functions/regularisers.py | 284 ++++++++++++------ .../Python/test/test_PluginsRegularisation.py | 235 +++++++++++++-- Wrappers/Python/test/test_functions.py | 57 +--- 6 files changed, 407 insertions(+), 185 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b83c5f43b2..7f7102b335 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ * 21.2.1 + - CCPi Regularisation plugin is refactored, only FGP_TV, FGP_dTV, TGV and TNV are exposed. Docstrings and functionality unit tests are added. Tests of the functions are meant to be in the CCPi-Regularisation toolkit itself. - Add dtype for ImageGeometry, AcquisitionGeometry, VectorGeometry, BlockGeometry - Fix GradientOperator to handle pseudo 2D CIL geometries - Created Reconstructor base class for simpler use of CIL methods diff --git a/Wrappers/Python/cil/framework/framework.py b/Wrappers/Python/cil/framework/framework.py index 9841ba8a8c..eb6c93bf0f 100644 --- a/Wrappers/Python/cil/framework/framework.py +++ b/Wrappers/Python/cil/framework/framework.py @@ -2401,11 +2401,6 @@ def log(self, *args, **kwargs): '''Applies log pixel-wise to the DataContainer''' return self.pixel_wise_unary(numpy.log, *args, **kwargs) - #def __abs__(self): - # operation = FM.OPERATION.ABS - # return self.callFieldMath(operation, None, self.mask, self.maskOnValue) - # __abs__ - ## reductions def sum(self, *args, **kwargs): return self.as_array().sum(*args, **kwargs) @@ -3085,7 +3080,12 @@ def get_order_for_engine(engine, geometry): if isinstance(geometry, AcquisitionGeometry): dim_order = DataOrder.TIGRE_AG_LABELS else: - dim_order = DataOrder.TIGRE_IG_LABELS + dim_order = DataOrder.TIGRE_IG_LABELS + elif engine == 'cil': + if isinstance(geometry, AcquisitionGeometry): + dim_order = DataOrder.CIL_AG_LABELS + else: + dim_order = DataOrder.CIL_IG_LABELS else: raise ValueError("Unknown engine expected 'tigre' or 'astra' got {}".format(engine)) diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py index aea6902e7d..1b27aef58e 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py @@ -15,5 +15,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .regularisers import FGP_TV, ROF_TV, TGV, LLT_ROF, FGP_dTV,\ - SB_TV, TNV +from .regularisers import FGP_TV, TGV, FGP_dTV, TNV diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py index 9940c95766..b0d39f6222 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py @@ -25,21 +25,36 @@ "Minimal version is 20.04") -from cil.framework import DataContainer +from cil.framework import DataOrder from cil.optimisation.functions import Function import numpy as np import warnings +from numbers import Number class RegulariserFunction(Function): def proximal(self, x, tau, out=None): + '''Generic proximal method for a RegulariserFunction + + :param x: image to be regularised + :type x: an ImageData + :param tau: + :type tau: Number + :param out: a placeholder for the result + :type out: same as x: ImageData + + If the ImageData contains complex data, rather than the default float32, the regularisation + is run indipendently on the real and imaginary part. + ''' + + self.check_input(x) arr = x.as_array() if arr.dtype in [np.complex, np.complex64]: # do real and imag part indep in_arr = np.asarray(arr.real, dtype=np.float32, order='C') - res, info = self.proximal_numpy(in_arr, tau, out) + res, info = self.proximal_numpy(in_arr, tau) arr.real = res[:] in_arr = np.asarray(arr.imag, dtype=np.float32, order='C') - res, info = self.proximal_numpy(in_arr, tau, out) + res, info = self.proximal_numpy(in_arr, tau) arr.imag = res[:] self.info = info if out is not None: @@ -50,7 +65,7 @@ def proximal(self, x, tau, out=None): return out else: arr = np.asarray(x.as_array(), dtype=np.float32, order='C') - res, info = self.proximal_numpy(arr, tau, out) + res, info = self.proximal_numpy(arr, tau) self.info = info if out is not None: out.fill(res) @@ -58,9 +73,12 @@ def proximal(self, x, tau, out=None): out = x.copy() out.fill(res) return out - def proximal_numpy(self, xarr, tau, out=None): + def proximal_numpy(self, xarr, tau): raise NotImplementedError('Please implement proximal_numpy') + def check_input(self, input): + pass + class TV_Base(RegulariserFunction): def __call__(self,x): in_arr = np.asarray(x.as_array(), dtype=np.float32, order='C') @@ -70,28 +88,25 @@ def __call__(self,x): def convex_conjugate(self,x): return 0.0 -class ROF_TV(TV_Base): - def __init__(self,lambdaReg,iterationsTV,tolerance,time_marchstep,device): - # set parameters - self.alpha = lambdaReg - self.max_iteration = iterationsTV - self.time_marchstep = time_marchstep - self.device = device # string for 'cpu' or 'gpu' - self.tolerance = tolerance - - def proximal_numpy(self, in_arr, tau, out = None): - res , info = regularisers.ROF_TV(in_arr, - self.alpha, - self.max_iteration, - self.time_marchstep, - self.tolerance, - self.device) - - return res, info class FGP_TV(TV_Base): - def __init__(self, alpha=1, max_iteration=100, tolerance=1e-6, isotropic=True, nonnegativity=True, printing=False, device='cpu'): + def __init__(self, alpha=1, max_iteration=100, tolerance=0, isotropic=True, nonnegativity=True, device='cpu'): + '''Creator of FGP_TV Function + + :param alpha: regularisation parameter + :type alpha: number, default 1 + :param isotropic: Whether it uses L2 (isotropic) or L1 (unisotropic) norm + :type isotropic: boolean, default True, can range between 1 and 2 + :param nonnegativity: Whether to add the non-negativity constraint + :type nonnegativity: boolean, default True + :param max_iteration: max number of sub iterations. The algorithm will iterate up to this number of iteration or up to when the tolerance has been reached + :type max_iteration: integer, default 100 + :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than max_iteration. If set to 0 only the max_iteration will be used as stop criterion. + :type tolerance: float, default 0 + :param device: determines if the code runs on CPU or GPU + :type device: string, default 'cpu', can be 'gpu' if GPU is installed + ''' if isotropic == True: self.methodTV = 0 else: @@ -108,40 +123,83 @@ def __init__(self, alpha=1, max_iteration=100, tolerance=1e-6, isotropic=True, n self.nonnegativity = nonnegativity self.device = device # string for 'cpu' or 'gpu' - def proximal_numpy(self, in_arr, tau, out = None): + def proximal_numpy(self, in_arr, tau): res , info = regularisers.FGP_TV(\ in_arr,\ - self.alpha*tau,\ + self.alpha * tau,\ self.max_iteration,\ self.tolerance,\ self.methodTV,\ self.nonnegativity,\ self.device) return res, info + + def __rmul__(self, scalar): + '''Define the multiplication with a scalar + + this changes the regularisation parameter in the plugin''' + if not isinstance (scalar, Number): + raise NotImplemented + else: + self.alpha *= scalar + return self + def check_input(self, input): + if input.geometry.length > 3: + raise ValueError('{} cannot work on more than 3D. Got {}'.format(self.__class__.__name__, input.geometry.length)) class TGV(RegulariserFunction): - def __init__(self, regularisation_parameter, alpha1, alpha2, iter_TGV, LipshitzConstant, torelance, device ): - self.regularisation_parameter = regularisation_parameter - self.alpha1 = alpha1 - self.alpha2 = alpha2 - self.iter_TGV = iter_TGV - self.LipshitzConstant = LipshitzConstant - self.torelance = torelance + def __init__(self, alpha=1, gamma=1, max_iteration=100, tolerance=0, device='cpu' , **kwargs): + '''Creator of Total Generalised Variation Function + + :param alpha: regularisation parameter + :type alpha: number, default 1 + :param gamma: ratio of TGV terms + :type gamma: number, default 1, can range between 1 and 2 + :param max_iteration: max number of sub iterations. The algorithm will iterate up to this number of iteration or up to when the tolerance has been reached + :type max_iteration: integer, default 100 + :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than max_iteration. If set to 0 only the max_iteration will be used as stop criterion. + :type tolerance: float, default 0 + :param device: determines if the code runs on CPU or GPU + :type device: string, default 'cpu', can be 'gpu' if GPU is installed + + ''' + + self.alpha = alpha + self.gamma = gamma + self.max_iteration = max_iteration + self.tolerance = tolerance self.device = device + + if kwargs.get('iter_TGV', None) is not None: + # raise ValueError('iter_TGV parameter has been superseded by num_iter. Use that instead.') + self.num_iter = kwargs.get('iter_TGV') def __call__(self,x): warnings.warn("{}: the __call__ method is not implemented. Returning NaN.".format(self.__class__.__name__)) return np.nan + @property + def gamma(self): + return self.__gamma + @gamma.setter + def gamma(self, value): + if value <= 2 and value >= 1: + self.__gamma = value + @property + def alpha2(self): + return self.alpha1 * self.gamma + @property + def alpha1(self): + return 1. - def proximal_numpy(self, in_arr, tau, out = None): + def proximal_numpy(self, in_arr, tau): res , info = regularisers.TGV(in_arr, - self.regularisation_parameter, + self.alpha * tau, self.alpha1, self.alpha2, - self.iter_TGV, + self.max_iteration, self.LipshitzConstant, - self.torelance, + self.tolerance, self.device) # info: return number of iteration and reached tolerance @@ -152,46 +210,51 @@ def proximal_numpy(self, in_arr, tau, out = None): def convex_conjugate(self, x): warnings.warn("{}: the convex_conjugate method is not implemented. Returning NaN.".format(self.__class__.__name__)) return np.nan - -class LLT_ROF(RegulariserFunction): - - def __init__(self, regularisation_parameterROF, - regularisation_parameterLLT, - iter_LLT_ROF, time_marching_parameter, torelance, device ): - self.regularisation_parameterROF = regularisation_parameterROF - self.regularisation_parameterLLT = regularisation_parameterLLT - self.iter_LLT_ROF = iter_LLT_ROF - self.time_marching_parameter = time_marching_parameter - self.torelance = torelance - self.device = device + def __rmul__(self, scalar): + '''Define the multiplication with a scalar - def __call__(self,x): - warnings.warn("{}: the __call__ method is not implemented. Returning NaN.".format(self.__class__.__name__)) - return np.nan - - def proximal_numpy(self, in_arr, tau, out = None): - res , info = regularisers.LLT_ROF(in_arr, - self.regularisation_parameterROF, - self.regularisation_parameterLLT, - self.iter_LLT_ROF, - self.time_marching_parameter, - self.torelance, - self.device) - - # info: return number of iteration and reached tolerance - # https://github.com/vais-ral/CCPi-Regularisation-Toolkit/blob/master/src/Core/regularisers_CPU/TGV_core.c#L168 - # Stopping Criteria || u^k - u^(k-1) ||_{2} / || u^{k} ||_{2} - - return res, info + this changes the regularisation parameter in the plugin''' + if not isinstance (scalar, Number): + raise NotImplemented + else: + self.alpha *= scalar + return self - def convex_conjugate(self, x): - warnings.warn("{}: the convex_conjugate method is not implemented. Returning NaN.".format(self.__class__.__name__)) - return np.nan + # f = TGV() + # f = alpha * f + + def check_input(self, input): + if len(input.dimension_labels) == 2: + self.LipshitzConstant = 12 + elif len(input.dimension_labels) == 3: + self.LipshitzConstant = 16 # Vaggelis to confirm + else: + raise ValueError('{} cannot work on more than 3D. Got {}'.format(self.__class__.__name__, input.geometry.length)) + class FGP_dTV(RegulariserFunction): + '''Creator of FGP_dTV Function + + :param reference: reference image + :type reference: ImageData + :param alpha: regularisation parameter + :type alpha: number, default 1 + :param max_iteration: max number of sub iterations. The algorithm will iterate up to this number of iteration or up to when the tolerance has been reached + :type max_iteration: integer, default 100 + :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than max_iteration. If set to 0 only the max_iteration will be used as stop criterion. + :type tolerance: float, default 0 + :param eta: smoothing constant to calculate gradient of the reference + :type eta: number, default 0.01 + :param isotropic: Whether it uses L2 (isotropic) or L1 (anisotropic) norm + :type isotropic: boolean, default True, can range between 1 and 2 + :param nonnegativity: Whether to add the non-negativity constraint + :type nonnegativity: boolean, default True + :param device: determines if the code runs on CPU or GPU + :type device: string, default 'cpu', can be 'gpu' if GPU is installed + ''' def __init__(self, reference, alpha=1, max_iteration=100, - tolerance=1e-6, eta=0.01, isotropic=True, nonnegativity=True, device='cpu'): + tolerance=0, eta=0.01, isotropic=True, nonnegativity=True, device='cpu'): if isotropic == True: self.methodTV = 0 @@ -214,11 +277,11 @@ def __call__(self,x): warnings.warn("{}: the __call__ method is not implemented. Returning NaN.".format(self.__class__.__name__)) return np.nan - def proximal_numpy(self, in_arr, tau, out = None): + def proximal_numpy(self, in_arr, tau): res , info = regularisers.FGP_dTV(\ in_arr,\ self.reference,\ - self.alpha*tau,\ + self.alpha * tau,\ self.max_iteration,\ self.tolerance,\ self.eta,\ @@ -231,47 +294,68 @@ def convex_conjugate(self, x): warnings.warn("{}: the convex_conjugate method is not implemented. Returning NaN.".format(self.__class__.__name__)) return np.nan -class SB_TV(TV_Base): - def __init__(self,lambdaReg,iterationsTV,tolerance,methodTV,printing,device): - # set parameters - self.alpha = lambdaReg - self.max_iteration = iterationsTV - self.tolerance = tolerance - self.methodTV = methodTV - self.printing = printing - self.device = device # string for 'cpu' or 'gpu' - - def proximal_numpy(self, in_arr, tau, out = None): - res , info = regularisers.SB_TV(in_arr, - self.alpha*tau, - self.max_iteration, - self.tolerance, - self.methodTV, - self.device) + def __rmul__(self, scalar): + '''Define the multiplication with a scalar - return res, info + this changes the regularisation parameter in the plugin''' + if not isinstance (scalar, Number): + raise NotImplemented + else: + self.alpha *= scalar + return self + + def check_input(self, input): + if input.geometry.length > 3: + raise ValueError('{} cannot work on more than 3D. Got {}'.format(self.__class__.__name__, input.geometry.length)) class TNV(RegulariserFunction): - def __init__(self,regularisation_parameter,iterationsTNV,tolerance): - + def __init__(self,alpha=1, max_iteration=100, tolerance=0): + '''Creator of TNV Function + + :param alpha: regularisation parameter + :type alpha: number, default 1 + :param max_iteration: max number of sub iterations. The algorithm will iterate up to this number of iteration or up to when the tolerance has been reached + :type max_iteration: integer, default 100 + :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than max_iteration. If set to 0 only the max_iteration will be used as stop criterion. + :type tolerance: float, default 0 + ''' # set parameters - self.regularisation_parameter = regularisation_parameter - self.iterationsTNV = iterationsTNV + self.alpha = alpha + self.max_iteration = max_iteration self.tolerance = tolerance def __call__(self,x): warnings.warn("{}: the __call__ method is not implemented. Returning NaN.".format(self.__class__.__name__)) return np.nan - def proximal_numpy(self, in_arr, tau, out = None): + def proximal_numpy(self, in_arr, tau): + if in_arr.ndim != 3: + # https://github.com/vais-ral/CCPi-Regularisation-Toolkit/blob/413c6001003c6f1272aeb43152654baaf0c8a423/src/Python/src/cpu_regularisers.pyx#L584-L588 + raise ValueError('Only 3D data is supported. Passed data has {} dimensions'.format(in_arr.ndim)) res = regularisers.TNV(in_arr, - self.regularisation_parameter, - self.iterationsTNV, + self.alpha * tau, + self.max_iteration, self.tolerance) - return res, [] def convex_conjugate(self, x): warnings.warn("{}: the convex_conjugate method is not implemented. Returning NaN.".format(self.__class__.__name__)) - return np.nan \ No newline at end of file + return np.nan + + def __rmul__(self, scalar): + '''Define the multiplication with a scalar + + this changes the regularisation parameter in the plugin''' + if not isinstance (scalar, Number): + raise NotImplemented + else: + self.alpha *= scalar + return self + + def check_input(self, input): + '''TNV requires 2D+channel data with the first dimension as the channel dimension''' + DataOrder.check_order_for_engine('cil', input.geometry) + if ( input.geometry.channels == 1 ) or ( not input.geometry.length == 3) : + raise ValueError('TNV requires 2D+channel data. Got {}'.format(input.geometry.dimension_labels)) + \ No newline at end of file diff --git a/Wrappers/Python/test/test_PluginsRegularisation.py b/Wrappers/Python/test/test_PluginsRegularisation.py index 8950af7f10..372ea5f676 100644 --- a/Wrappers/Python/test/test_PluginsRegularisation.py +++ b/Wrappers/Python/test/test_PluginsRegularisation.py @@ -40,10 +40,11 @@ # "Minimal version is 20.04") has_regularisation_toolkit = False print ("has_regularisation_toolkit", has_regularisation_toolkit) +TNV_fixed = False class TestPlugin(unittest.TestCase): def setUp(self): - print ("test plugins") + # print ("test plugins") pass def tearDown(self): pass @@ -55,13 +56,7 @@ def test_import_FGP_TV(self): except ModuleNotFoundError as ie: print (ie) assert False - @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") - def test_import_ROF_TV(self): - try: - from cil.plugins.ccpi_regularisation.functions import ROF_TV - assert True - except ModuleNotFoundError as ie: - assert False + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") def test_import_TGV(self): try: @@ -69,13 +64,7 @@ def test_import_TGV(self): assert True except ModuleNotFoundError as ie: assert False - @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") - def test_import_LLT_ROF(self): - try: - from cil.plugins.ccpi_regularisation.functions import LLT_ROF - assert True - except ModuleNotFoundError as ie: - assert False + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") def test_import_FGP_dTV(self): try: @@ -83,13 +72,7 @@ def test_import_FGP_dTV(self): assert True except ModuleNotFoundError as ie: assert False - @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") - def test_import_SB_TV(self): - try: - from cil.plugins.ccpi_regularisation.functions import SB_TV - assert True - except ModuleNotFoundError as ie: - assert False + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") def test_import_TNV(self): try: @@ -97,6 +80,7 @@ def test_import_TNV(self): assert True except ModuleNotFoundError as ie: assert False + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") def test_FGP_TV_complex(self): data = dataexample.CAMERA.get(size=(256,256)) @@ -108,4 +92,209 @@ def test_FGP_TV_complex(self): reg = FGP_TV() out = reg.proximal(data, 1) outarr = out.as_array() - np.testing.assert_almost_equal(outarr.imag, outarr.real) \ No newline at end of file + np.testing.assert_almost_equal(outarr.imag, outarr.real) + + def rmul_test(self, f): + + alpha = f.alpha + scalar = 2.123 + af = scalar*f + + assert (id(af) == id(f)) + assert af.alpha == scalar * alpha + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_FGP_TV_rmul(self): + from cil.plugins.ccpi_regularisation.functions import FGP_TV + f = FGP_TV() + + self.rmul_test(f) + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_FGP_TGV_rmul(self): + from cil.plugins.ccpi_regularisation.functions import FGP_TGV + f = FGP_TGV() + + self.rmul_test(f) + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_FGP_TGV_rmul(self): + from cil.plugins.ccpi_regularisation.functions import TNV + f = TNV() + + self.rmul_test(f) + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_FGP_dTV_rmul(self): + from cil.plugins.ccpi_regularisation.functions import FGP_dTV + data = dataexample.CAMERA.get(size=(256,256)) + f = FGP_dTV(data) + + self.rmul_test(f) + + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_functionality_FGP_TV(self): + + data = dataexample.CAMERA.get(size=(256,256)) + datarr = data.as_array() + from cil.plugins.ccpi_regularisation.functions import FGP_TV + from ccpi.filters import regularisers + + tau = 1. + fcil = FGP_TV() + outcil = fcil.proximal(data, tau=tau) + # use CIL defaults + outrgl, info = regularisers.FGP_TV(datarr, fcil.alpha*tau, fcil.max_iteration, fcil.tolerance, 0, 1, 'cpu' ) + np.testing.assert_almost_equal(outrgl, outcil.as_array()) + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_functionality_TGV(self): + + data = dataexample.CAMERA.get(size=(256,256)) + datarr = data.as_array() + from cil.plugins.ccpi_regularisation.functions import TGV + from ccpi.filters import regularisers + + tau = 1. + fcil = TGV() + outcil = fcil.proximal(data, tau=tau) + # use CIL defaults + outrgl, info = regularisers.TGV(datarr, fcil.alpha*tau, 1,1, fcil.max_iteration, 12, fcil.tolerance, 'cpu' ) + + np.testing.assert_almost_equal(outrgl, outcil.as_array()) + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_functionality_FGP_dTV(self): + + data = dataexample.CAMERA.get(size=(256,256)) + datarr = data.as_array() + ref = data*0.3 + from cil.plugins.ccpi_regularisation.functions import FGP_dTV + from ccpi.filters import regularisers + + tau = 1. + fcil = FGP_dTV(ref) + outcil = fcil.proximal(data, tau=tau) + # use CIL defaults + outrgl, info = regularisers.FGP_dTV(datarr, ref.as_array(), fcil.alpha*tau, fcil.max_iteration, fcil.tolerance, 0.01, 0, 1, 'cpu' ) + np.testing.assert_almost_equal(outrgl, outcil.as_array()) + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_functionality_TNV(self): + + # fake a 2D+channel image + d = dataexample.SYNCHROTRON_PARALLEL_BEAM_DATA.get() + ig = ImageGeometry(160, 135, channels=91) + data = ig.allocate(None) + data.fill(d) + del d + + datarr = data.as_array() + from cil.plugins.ccpi_regularisation.functions import TNV + from ccpi.filters import regularisers + + tau = 1. + + # CIL defaults + outrgl = regularisers.TNV(datarr, 1, 100, 1e-6 ) + + fcil = TNV() + outcil = fcil.proximal(data, tau=tau) + np.testing.assert_almost_equal(outrgl, outcil.as_array()) + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_TNV_raise_on_2D(self): + + # data = dataexample.SYNCHROTRON_PARALLEL_BEAM_DATA.get() + data = dataexample.CAMERA.get(size=(256,256)) + datarr = data.as_array() + from cil.plugins.ccpi_regularisation.functions import TNV + + tau = 1. + + fcil = TNV() + try: + outcil = fcil.proximal(data, tau=tau) + assert False + except ValueError: + assert True + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_TNV_raise_on_3D_nochannel(self): + + # data = dataexample.SYNCHROTRON_PARALLEL_BEAM_DATA.get() + data = dataexample.CAMERA.get(size=(256,256)) + datarr = data.as_array() + from cil.plugins.ccpi_regularisation.functions import TNV + + tau = 1. + + fcil = TNV() + try: + outcil = fcil.proximal(data, tau=tau) + assert False + except ValueError: + assert True + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_TNV_raise_on_4D(self): + + from cil.plugins.ccpi_regularisation.functions import TNV + + data = ImageGeometry(3,4,5,channels=5).allocate(1) + + tau = 1. + + fcil = TNV() + try: + outcil = fcil.proximal(data, tau=tau) + assert False + except ValueError: + assert True + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_FGP_TV_raise_on_4D_data(self): + + from cil.plugins.ccpi_regularisation.functions import FGP_TV + + tau = 1. + fcil = FGP_TV() + data = ImageGeometry(3,4,5,channels=10).allocate(0) + + + try: + outcil = fcil.proximal(data, tau=tau) + assert False + except ValueError: + assert True + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_TGV_raise_on_4D_data(self): + + from cil.plugins.ccpi_regularisation.functions import TGV + + tau = 1. + fcil = TGV() + data = ImageGeometry(3,4,5,channels=10).allocate(0) + + + try: + outcil = fcil.proximal(data, tau=tau) + assert False + except ValueError: + assert True + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_FGP_dTV_raise_on_4D_data(self): + + from cil.plugins.ccpi_regularisation.functions import FGP_dTV + + tau = 1. + + data = ImageGeometry(3,4,5,channels=10).allocate(0) + ref = data * 2 + + fcil = FGP_dTV(ref) + + try: + outcil = fcil.proximal(data, tau=tau) + assert False + except ValueError: + assert True diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 328604b0f6..a061426a16 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -988,10 +988,8 @@ def test_compare_regularisation_toolkit(self): r_tolerance = 1e-9 r_iso = True r_nonneg = True - r_printing = 0 - # g_CCPI_reg_toolkit = alpha * FGP_TV(1., r_iterations, r_tolerance, r_iso, r_nonneg, r_printing, 'cpu') g_CCPI_reg_toolkit = alpha * FGP_TV(max_iteration=r_iterations, tolerance=r_tolerance, - isotropic=r_iso, nonnegativity=r_nonneg, printing=r_printing, device='cpu') + isotropic=r_iso, nonnegativity=r_nonneg, device='cpu') t2 = timer() res2 = g_CCPI_reg_toolkit.proximal(noisy_data, 1.) @@ -1021,10 +1019,8 @@ def test_compare_regularisation_toolkit(self): r_tolerance = 1e-9 r_iso = True r_nonneg = True - r_printing = 0 - # g_CCPI_reg_toolkit = alpha * FGP_TV(1., r_iterations, r_tolerance, r_iso, r_nonneg, r_printing, 'cpu') g_CCPI_reg_toolkit = alpha * FGP_TV(max_iteration=r_iterations, tolerance=r_tolerance, - isotropic=r_iso, nonnegativity=r_nonneg, printing=r_printing, device='cpu') + isotropic=r_iso, nonnegativity=r_nonneg, device='cpu') t2 = timer() res2 = g_CCPI_reg_toolkit.proximal(noisy_data, 1.) @@ -1073,9 +1069,8 @@ def test_compare_regularisation_toolkit_tomophantom(self): r_tolerance = 1e-9 r_iso = True r_nonneg = True - r_printing = 0 g_CCPI_reg_toolkit = alpha * FGP_TV(max_iteration=r_iterations, tolerance=r_tolerance, - isotropic=r_iso, nonnegativity=r_nonneg, printing=r_printing, device='cpu') + isotropic=r_iso, nonnegativity=r_nonneg, device='cpu') t2 = timer() @@ -1085,52 +1080,6 @@ def test_compare_regularisation_toolkit_tomophantom(self): np.testing.assert_allclose(res1.as_array(), res2.as_array(), atol=7.5e-2) - # the following were in the unit tests but didn't assert anything - # # CIL_FGP_TV no tolerance - # g_CIL.tolerance = None - # t0 = timer() - # res1 = g_CIL.proximal(noisy_data, 1.) - # t1 = timer() - # # print(t1-t0) - - # ################################################################### - # ################################################################### - # ################################################################### - # ################################################################### - - # data = dataexample.PEPPERS.get(size=(256, 256)) - # ig = data.geometry - # ag = ig - - # noisy_data = noise.gaussian(data, seed=10) - - # alpha = 0.1 - # iters = 1000 - - # # CIL_FGP_TV no tolerance - # g_CIL = alpha * TotalVariation(iters, tolerance=None) - # t0 = timer() - # res1 = g_CIL.proximal(noisy_data, 1.) - # t1 = timer() - # # print(t1-t0) - - # # CCPi Regularisation toolkit high tolerance - - # r_alpha = alpha - # r_iterations = iters - # r_tolerance = 1e-9 - # r_iso = True - # r_nonneg = True - # r_printing = 0 - # g_CCPI_reg_toolkit = alpha * FGP_TV(max_iteration=r_iterations, tolerance=r_tolerance, - # isotropic=r_iso, nonnegativity=r_nonneg, printing=r_printing, device='cpu') - - - # t2 = timer() - # res2 = g_CCPI_reg_toolkit.proximal(noisy_data, 1.) - # t3 = timer() - # # print (t3-t2) - class TestKullbackLeiblerNumba(unittest.TestCase): def setUp(self): From df9ff72228d28e8110588581312e844fa9ae9748 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 22 Oct 2021 11:22:46 +0100 Subject: [PATCH 04/88] PDHG fix strongly --- .../cil/optimisation/algorithms/PDHG.py | 122 ++++++------------ 1 file changed, 38 insertions(+), 84 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index f251082f8f..7ef47cda3a 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -21,13 +21,42 @@ class PDHG(Algorithm): - r'''Primal Dual Hybrid Gradient - - Problem: - - .. math:: + + r"""Primal Dual Hybrid Gradient Algorithm - \min_{x} f(Kx) + g(x) + Solves the following problem: + + :math: $\omega$ + + .. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n} + + .. math:: \min_{x} f(Kx) + g(x) + + + Parameters + ---------- + + x : + primal variable + + + f: convex function with a "simple" `proximal_conjugate` method + + + g: convex function with a "simple" `proximal` method + + + operator: Linear operator with `direct` and `adjoint` properties + + + tau: + + + sigma: + + + + :param operator: Linear Operator = K :param f: Convex function with "simple" proximal of its conjugate. @@ -52,7 +81,8 @@ class PDHG(Algorithm): (b) E. Esser, X. Zhang and T. F. Chan (2010), "A general framework for a class of first order primal–dual algorithms for convex optimization in imaging science", SIAM J. Imaging Sci. 3, 1015–1046. - ''' + + """ def __init__(self, f=None, g=None, operator=None, tau=None, sigma=1.,initial=None, use_axpby=True, **kwargs): '''PDHG algorithm creator @@ -217,80 +247,4 @@ def dual_objective(self): @property def primal_dual_gap(self): - return [x[2] for x in self.loss] - - -if __name__ == "__main__": - - # Import libraries - from cil.utilities import dataexample, noise - from cil.optimisation.operators import GradientOperator - from cil.optimisation.functions import MixedL21Norm, L2NormSquared - from cil.utilities.display import show2D - from cil.io import NEXUSDataWriter, NEXUSDataReader - import pickle - import matplotlib.pyplot as plt - import os - - print("Denoising Case : Default sigma/tau vs Strongly convex") - # Load data - data = dataexample.CAMERA.get(size=(256,256)) - - # Add gaussian noise - noisy_data = noise.gaussian(data, seed = 10, var = 0.02) - - ig = noisy_data.geometry - - alpha = 1. - K = GradientOperator(ig) - F = alpha * MixedL21Norm() - G = L2NormSquared(b=noisy_data) - - normK = K.norm() - sigma = 1./normK - tau = 1./normK - - # standard pdhg - pdhg = PDHG(f = F, g = G, operator = K, - update_objective_interval=1, - max_iteration=2000, sigma=sigma, tau=tau) - pdhg.run(verbose=0) - - # pdhg = {} - # pdhg['primal'] = pdhg.objective - # pdhg['dual'] = pdhg.dual_objective - # pdhg['pdgap'] = pdhg.primal_dual_gap - - # with open(os.getcwd() + 'pdhg_noaccel_info.pkl','wb') as f: - # pickle.dump(pdhg_noaccel_info, f) - - # PDHG with G strongly convex acceleration - pdhg_sc = PDHG(f = F, g = G, operator = K, - update_objective_interval=1, max_iteration=2000, - gamma_g = 1, sigma=sigma, tau=tau) - pdhg_sc.run(verbose=0) - - # Load pdhg_noaccel_info - # pdhg_noaccel_info = pickle.load( open( os.getcwd() + 'pdhg_noaccel_info.pkl', "rb" ) ) - - plt.figure() - plt.loglog(pdhg_sc.objective, label="Strongly Convex (g)") - plt.loglog(pdhg.objective, label="No accelerate") - plt.legend() - plt.title("Primal") - plt.show() - - plt.figure() - plt.loglog(pdhg_sc.primal_dual_gap, label="Strongly Convex (g)") - plt.loglog(pdhg.primal_dual_gap, label="No accelerate") - plt.legend() - plt.title("PrimalDual gap") - plt.show() - -# reader_pdhg = NEXUSDataReader(file_name = os.getcwd() + "pdhg_noaccel" + ".nxs") -# pdhg_accel_solution = reader_pdhg.load_data() - - show2D([pdhg_sc.solution, - pdhg.solution, - (pdhg_sc.solution - pdhg.solution).abs()], - num_cols=1, origin="upper") + return [x[2] for x in self.loss] \ No newline at end of file From 1e216e49839608cdf721a52c955fd928d3c13c89 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 22 Oct 2021 13:03:52 +0100 Subject: [PATCH 05/88] trying with docs --- .../Python/cil/optimisation/algorithms/PDHG.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 7ef47cda3a..a5af48a807 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -26,18 +26,16 @@ class PDHG(Algorithm): Solves the following problem: - :math: $\omega$ + .. math:: \min_{x} f(Kx) + g(x) - .. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n} - - .. math:: \min_{x} f(Kx) + g(x) - - Parameters - ---------- - - x : - primal variable + ---------- + x : `Ima` + Primal variable for the primal problem. + y : array + Dual variable for the dual problem. + f : function + Dual variable for the dual problem. f: convex function with a "simple" `proximal_conjugate` method From 5f82fab46ff24ff1e1ce9adb783128ec2666b391 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 26 Oct 2021 16:11:25 +0100 Subject: [PATCH 06/88] revert docs --- .../cil/optimisation/algorithms/PDHG.py | 38 +++---------------- 1 file changed, 6 insertions(+), 32 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index a5af48a807..92f8bfab87 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -22,39 +22,13 @@ class PDHG(Algorithm): - r"""Primal Dual Hybrid Gradient Algorithm + r"""Primal Dual Hybrid Gradient - Solves the following problem: - - .. math:: \min_{x} f(Kx) + g(x) - - Parameters - ---------- - x : `Ima` - Primal variable for the primal problem. - y : array - Dual variable for the dual problem. - f : function - Dual variable for the dual problem. - - - f: convex function with a "simple" `proximal_conjugate` method - - - g: convex function with a "simple" `proximal` method - - - operator: Linear operator with `direct` and `adjoint` properties - - - tau: - - - sigma: - - - - + Problem: + + .. math:: + + \min_{x} f(Kx) + g(x) :param operator: Linear Operator = K :param f: Convex function with "simple" proximal of its conjugate. From 16137b81508019cbe5f3ed90d6857669f2fbcfd7 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 26 Oct 2021 17:31:20 +0100 Subject: [PATCH 07/88] fix fconj case --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 92f8bfab87..8ae27e30d2 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -188,7 +188,7 @@ def update(self): # Update sigma and tau based on the strong convexity of F # Following operations are reversed due to symmetry, sigma --> tau, tau -->sigma if self.gamma_fconj is not None: - self.theta = float(1 / numpy.sqrt(1 + 2 * self.gamma_f * self.sigma)) + self.theta = float(1 / numpy.sqrt(1 + 2 * self.gamma_fconj * self.sigma)) self.sigma *= self.theta self.tau /= self.theta From 781cbde94cf5828b74b1197eb66e8a68f5bf6fef Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 28 Oct 2021 12:38:40 +0100 Subject: [PATCH 08/88] add info in doc, add update_step_sizes mehtod --- .../cil/optimisation/algorithms/PDHG.py | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 8ae27e30d2..4d699bbbf9 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -17,7 +17,7 @@ from cil.optimisation.algorithms import Algorithm import warnings -import numpy +import numpy as np class PDHG(Algorithm): @@ -35,6 +35,12 @@ class PDHG(Algorithm): :param g: Convex function with "simple" proximal :param tau: Step size parameter for Primal problem :param sigma: Step size parameter for Dual problem + + If the function g is strongly convex, you can use `gamma_g` to accelerate PDHG. + If the convex conjugate of f is strongly convex, you can use `gamma_fconj` to accelerate PDHG. + + :param gamma_g: Strongly convex constant for the function + :param gamma_fconj: Strongly convex constant for the function Remark: Convergence is guaranted provided that @@ -176,21 +182,31 @@ def update(self): self.g.proximal(self.x_tmp, self.tau, out=self.x) + # update the step sizes for special cases + self.update_step_sizes() + + + def update_step_sizes(self): + #update_previous_solution() called after update by base class #i.e current solution is now in x_old, previous solution is now in x # Update sigma and tau based on the strong convexity of G if self.gamma_g is not None: - self.theta = float(1 / numpy.sqrt(1 + 2 * self.gamma_g * self.tau)) + self.theta = float(1 / np.sqrt(1 + 2 * self.gamma_g * self.tau)) self.tau *= self.theta - self.sigma /= self.theta + self.sigma /= self.theta # Update sigma and tau based on the strong convexity of F # Following operations are reversed due to symmetry, sigma --> tau, tau -->sigma if self.gamma_fconj is not None: - self.theta = float(1 / numpy.sqrt(1 + 2 * self.gamma_fconj * self.sigma)) + self.theta = float(1 / np.sqrt(1 + 2 * self.gamma_fconj * self.sigma)) self.sigma *= self.theta - self.tau /= self.theta + self.tau /= self.theta + + if self.gamma_g is not None and self.gamma_fconj is not None: + raise NotImplemented("This case is not implemented") + def update_objective(self): From 0de28d581af80e790e55dc382a9783cf5b901763 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 28 Oct 2021 13:38:31 +0100 Subject: [PATCH 09/88] remove test --- .../Python/cil/optimisation/algorithms/PDHG.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 4d699bbbf9..f275610321 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -182,30 +182,30 @@ def update(self): self.g.proximal(self.x_tmp, self.tau, out=self.x) + #update_previous_solution() called after update by base class + #i.e current solution is now in x_old, previous solution is now in x + # update the step sizes for special cases self.update_step_sizes() def update_step_sizes(self): - - #update_previous_solution() called after update by base class - #i.e current solution is now in x_old, previous solution is now in x # Update sigma and tau based on the strong convexity of G if self.gamma_g is not None: - self.theta = float(1 / np.sqrt(1 + 2 * self.gamma_g * self.tau)) + self.theta = 1.0/ np.sqrt(1 + 2 * self.gamma_g * self.tau) self.tau *= self.theta self.sigma /= self.theta # Update sigma and tau based on the strong convexity of F # Following operations are reversed due to symmetry, sigma --> tau, tau -->sigma if self.gamma_fconj is not None: - self.theta = float(1 / np.sqrt(1 + 2 * self.gamma_fconj * self.sigma)) + self.theta = 1.0 / np.sqrt(1 + 2 * self.gamma_fconj * self.sigma) self.sigma *= self.theta self.tau /= self.theta if self.gamma_g is not None and self.gamma_fconj is not None: - raise NotImplemented("This case is not implemented") + raise NotImplementedError("This case is not implemented") def update_objective(self): @@ -235,4 +235,5 @@ def dual_objective(self): @property def primal_dual_gap(self): - return [x[2] for x in self.loss] \ No newline at end of file + return [x[2] for x in self.loss] + From 9e56299a68109bb799dac9be7a5bc4b2efc640ee Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 28 Oct 2021 13:38:54 +0100 Subject: [PATCH 10/88] add strongly convex tests for PDHG --- Wrappers/Python/test/test_algorithms.py | 35 +++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py index 26e6bd2f66..eb9fcda891 100755 --- a/Wrappers/Python/test/test_algorithms.py +++ b/Wrappers/Python/test/test_algorithms.py @@ -395,6 +395,41 @@ def setup(data, dnoise): print ("RMSE", rmse) self.assertLess(rmse, 2e-4) + def test_PDHG_strongly_convex(self): + + ig = ImageGeometry(3,3) + data = ig.allocate('random') + + f = L2NormSquared(b=data) + g = L2NormSquared() + operator = IdentityOperator(ig) + + sigma = 1.0 + tau = 1.0 + + pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10) + pdhg.run(verbose=0) + self.assertEqual(pdhg.sigma, sigma) + self.assertEqual(pdhg.tau, sigma) + + pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10, gamma_g=0.5) + pdhg.run(verbose=0) + self.assertNotEqual(pdhg.sigma, sigma) + self.assertNotEqual(pdhg.tau, tau) + + pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10, gamma_fconj=0.5) + pdhg.run(verbose=0) + self.assertNotEqual(pdhg.sigma, sigma) + self.assertNotEqual(pdhg.tau, tau) + + try: + pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10, gamma_g = 0.5, gamma_fconj=0.5) + pdhg.run(verbose=0) + except NotImplementedError as err: + print(err) + + + def test_FISTA_Denoising(self): if debug_print: print ("FISTA Denoising Poisson Noise Tikhonov") From e075d5dc239b994468ec9a54d6d4abbdcdc28bc9 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 2 Nov 2021 08:50:08 +0000 Subject: [PATCH 11/88] add gamma, gamm_conj to base function class --- Wrappers/Python/cil/optimisation/functions/Function.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/Function.py b/Wrappers/Python/cil/optimisation/functions/Function.py index e308adb764..db60c5df6f 100644 --- a/Wrappers/Python/cil/optimisation/functions/Function.py +++ b/Wrappers/Python/cil/optimisation/functions/Function.py @@ -33,9 +33,15 @@ class Function(object): """ - def __init__(self, L = None): + def __init__(self, L = None, gamma = None, gamma_conj = None): # overrides the type check to allow None as initial value self._L = L + + # Strongly convexity constant value is None, by default + self._gamma = gamma + + # Strongly convexity constant for the conjugate of the function is None, by default + self._gamma_conj= gamma_conj def __call__(self,x): From fd954f2f5c01feab01892cc36715a653f07617f2 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 2 Nov 2021 08:51:24 +0000 Subject: [PATCH 12/88] add gamma, gamm_conj properties for base Function --- .../cil/optimisation/functions/Function.py | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/Wrappers/Python/cil/optimisation/functions/Function.py b/Wrappers/Python/cil/optimisation/functions/Function.py index db60c5df6f..8f103d81cd 100644 --- a/Wrappers/Python/cil/optimisation/functions/Function.py +++ b/Wrappers/Python/cil/optimisation/functions/Function.py @@ -171,6 +171,34 @@ def L(self, value): self._L = value else: raise TypeError('The Lipschitz constant is a real positive number') + + @property + def gamma(self): + '''Strongly convex constant of a function.''' + + return self._gamma + + @gamma.setter + def gamma(self, value): + '''Setter for strongly convex constant for a function ''' + if isinstance(value, (Number,)) and value >= 0: + self._gamma = value + else: + raise TypeError('The strongly convex constant is a real positive number') + + @property + def gamma_conj(self): + '''Strongly convex constant of the convex conjugate of a function. + ''' + return self._gamma_conj + + @gamma_conj.setter + def gamma_conj(self, value): + '''Setter for Strongly convex constant for the convex conjugate of a function ''' + if isinstance(value, (Number,)) and value >= 0: + self._gamma_conj = value + else: + raise TypeError('The strongly convex constant is a real positive number') class SumFunction(Function): From a1c13d7f84cf346792922d7be5ee6bd692497656 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 2 Nov 2021 08:54:13 +0000 Subject: [PATCH 13/88] add gamma, gamm_conj properties for SumFunction --- .../cil/optimisation/functions/Function.py | 56 ++++++++++++++++++- 1 file changed, 54 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/Function.py b/Wrappers/Python/cil/optimisation/functions/Function.py index 8f103d81cd..b882687d47 100644 --- a/Wrappers/Python/cil/optimisation/functions/Function.py +++ b/Wrappers/Python/cil/optimisation/functions/Function.py @@ -181,7 +181,7 @@ def gamma(self): @gamma.setter def gamma(self, value): '''Setter for strongly convex constant for a function ''' - if isinstance(value, (Number,)) and value >= 0: + if isinstance(value, (Number,)) and value > 0: self._gamma = value else: raise TypeError('The strongly convex constant is a real positive number') @@ -195,7 +195,7 @@ def gamma_conj(self): @gamma_conj.setter def gamma_conj(self, value): '''Setter for Strongly convex constant for the convex conjugate of a function ''' - if isinstance(value, (Number,)) and value >= 0: + if isinstance(value, (Number,)) and value > 0: self._gamma_conj = value else: raise TypeError('The strongly convex constant is a real positive number') @@ -227,6 +227,58 @@ def L(self, value): # call base class setter super(SumFunction, self.__class__).L.fset(self, value ) + @property + def gamma(self): + '''Strongly convex constant for the sum of two functions. If both are strongly convex + with constants a, b then the SumFunction is strongly convex with a+b. + + If one is strongly convex with constant a and the other is convex then the SumFunction is strongly convex + with constant a. + + In the following, we assume that we always deal with convex functions. + ''' + + # TODO better to use a .convex member = False by default + if self.function1.gamma is not None and self.function2.gamma is not None: + self._gamma = self.function1.gamma + self.function2.gamma + elif self.function1.gamma is None and self.function2.gamma is not None: + self._gamma = self.function1.gamma + elif self.function2.gamma is None and self.function1.gamma is not None: + self._gamma = self.function1.gamma + else: + self._gamma = None + return self._gamma + @gamma.setter + def gamma(self, value): + # call base class setter + super(SumFunction, self.__class__).gamma.fset(self, value ) + + @property + def gamma_conj(self): + '''Strongly convex constant for the sum of two functions. If both are strongly convex + with constants a, b then the SumFunction is strongly convex with a+b. + If one is strongly convex (a) and the other is convex then the SumFunction is strongly convex + with constant a. + + In the following, we assume that we always deal with convex functions. + ''' + + # TODO better to use a .convex member = False by default + if self.function1.gamma_conj is not None and self.function2.gamma_conj is not None: + self._gamma_conj = self.function1.gamma_conj + self.function2.gamma_conj + elif self.function1.gamma_conj is None and self.function2.gamma_conj is not None: + self._gamma_conj= self.function1.gamma_conj + elif self.function2.gamma_conj is None and self.function1.gamma_conj is not None: + self._gamma_conj = self.function1.gamma_conj + else: + self._gamma_conj = None + return self._gamma_conj + @gamma_conj.setter + def gamma_conj(self, value): + # call base class setter + super(SumFunction, self.__class__).gamma_conj.fset(self, value ) + + def __call__(self,x): r"""Returns the value of the sum of functions :math:`F_{1}` and :math:`F_{2}` at x From ab25c283d5cad07a056bb75f16f9403b1093efec Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 2 Nov 2021 09:25:15 +0000 Subject: [PATCH 14/88] add gamma, gamm_conj properties for Function + salar --- .../cil/optimisation/functions/Function.py | 28 ++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/Function.py b/Wrappers/Python/cil/optimisation/functions/Function.py index b882687d47..6ce275d256 100644 --- a/Wrappers/Python/cil/optimisation/functions/Function.py +++ b/Wrappers/Python/cil/optimisation/functions/Function.py @@ -338,7 +338,7 @@ def L(self): @L.setter def L(self, value): # call base class setter - super(ScaledFunction, self.__class__).L.fset(self, value ) + super(ScaledFunction, self.__class__).L.fset(self, value ) @property def scalar(self): @@ -476,6 +476,32 @@ def L(self, value): # call base class setter super(SumScalarFunction, self.__class__).L.fset(self, value ) + @property + def gamma(self): + if self._gamma is None: + if self.function.gamma is not None: + self._gamma = self.function.gamma + else: + self._gamma = None + return self._gamma + @gamma.setter + def gamma(self, value): + # call base class setter + super(SumScalarFunction, self.__class__).gamma.fset(self, value ) + + @property + def gamma_conj(self): + if self._gamma_conj is None: + if self.function.gamma_conj is not None: + self._gamma_conj = self.function.gamma_conj + else: + self._gamma_conj = None + return self._gamma_conj + @gamma_conj.setter + def gamma_conj(self, value): + # call base class setter + super(SumScalarFunction, self.__class__).gamma_conj.fset(self, value ) + class ConstantFunction(Function): From 032690c810baa969987dd91a08232a837bf83b60 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 2 Nov 2021 11:56:08 +0000 Subject: [PATCH 15/88] add gamma, gamma_conj ScaledFunction --- .../cil/optimisation/functions/Function.py | 164 ++++++++++++++++-- 1 file changed, 154 insertions(+), 10 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/Function.py b/Wrappers/Python/cil/optimisation/functions/Function.py index 6ce275d256..95c92f5975 100644 --- a/Wrappers/Python/cil/optimisation/functions/Function.py +++ b/Wrappers/Python/cil/optimisation/functions/Function.py @@ -184,7 +184,7 @@ def gamma(self, value): if isinstance(value, (Number,)) and value > 0: self._gamma = value else: - raise TypeError('The strongly convex constant is a real positive number') + raise TypeError('The strongly convex constant is strictly positive number') @property def gamma_conj(self): @@ -198,7 +198,7 @@ def gamma_conj(self, value): if isinstance(value, (Number,)) and value > 0: self._gamma_conj = value else: - raise TypeError('The strongly convex constant is a real positive number') + raise TypeError('The strongly convex constant is strictly positive number') class SumFunction(Function): @@ -242,7 +242,7 @@ def gamma(self): if self.function1.gamma is not None and self.function2.gamma is not None: self._gamma = self.function1.gamma + self.function2.gamma elif self.function1.gamma is None and self.function2.gamma is not None: - self._gamma = self.function1.gamma + self._gamma = self.function2.gamma elif self.function2.gamma is None and self.function1.gamma is not None: self._gamma = self.function1.gamma else: @@ -338,7 +338,39 @@ def L(self): @L.setter def L(self, value): # call base class setter - super(ScaledFunction, self.__class__).L.fset(self, value ) + super(ScaledFunction, self.__class__).L.fset(self, value ) + + @property + def gamma(self): + if self._gamma is None: + if self.function.gamma is not None: + if self.scalar <= 0: + self._gamma = None + else: + self._gamma = self.scalar * self.function.gamma + else: + self._gamma = None + return self._gamma + @gamma.setter + def gamma(self, value): + # call base class setter + super(ScaledFunction, self.__class__).gamma.fset(self, value ) + + @property + def gamma_conj(self): + if self._gamma_conj is None: + if self.function.gamma_conj is not None: + if self.scalar <= 0: + self._gamma_conj= None + else: + self._gamma_conj = self.scalar * self.function.gamma_conj + else: + self._gamma_conj = None + return self._gamma_conj + @gamma_conj.setter + def gamma_conj(self, value): + # call base class setter + super(ScaledFunction, self.__class__).gamma_conj.fset(self, value ) @property def scalar(self): @@ -441,7 +473,8 @@ class SumScalarFunction(SumFunction): def __init__(self, function, constant): - super(SumScalarFunction, self).__init__(function, ConstantFunction(constant)) + super(SumScalarFunction, self).__init__(function, + ConstantFunction(constant)) self.constant = constant self.function = function @@ -607,11 +640,10 @@ class TranslateFunction(Function): """ def __init__(self, function, center): - try: - L = function.L - except NotImplementedError as nie: - L = None - super(TranslateFunction, self).__init__(L = L) + + super(TranslateFunction, self).__init__(L = function.L, + gamma = function.gamma, + gamma_conj= function.gamma_conj) self.function = function self.center = center @@ -697,3 +729,115 @@ def convex_conjugate(self, x): """ return self.function.convex_conjugate(x) + self.center.dot(x) + + +if __name__ == "__main__": + + # F is not strongly convex, cc of F is not strongle convex + f = Function() + print(f.gamma) + print(f.gamma_conj) + + # F is strongly convex, cc of F is not strongle convex + f1 = Function() + f1.gamma = 2.0 + print(f1.gamma) + print(f1.gamma_conj) + + # F is not strongly convex, cc of F is strongle convex + f2 = Function() + f2.gamma_conj = 2.0 + print(f2.gamma) + print(f2.gamma_conj) + + # Both Functions are strongly convex + f3 = Function() + f3.gamma = 2.0 + + f4 = Function() + f4.gamma = 2.0 + + h1 = f3 + f4 + print(h1.gamma) + print(h1.gamma_conj) + + # Both cc of Functions are strongly convex + f3 = Function() + f3.gamma_conj = 2.0 + + f4 = Function() + f4.gamma_conj = 2.0 + + h1 = f3 + f4 + print(h1.gamma) + print(h1.gamma_conj) + + # first fucntion is convex , other strongly convex + f3 = Function() + f3.gamma = 2.0 + + f4 = Function() + + h1 = f3 + f4 + print(h1.gamma) + print(h1.gamma_conj) + + # first fucntion is strongly convex , other convex + f3 = Function() + + f4 = Function() + f4.gamma = 2.0 + + h1 = f3 + f4 + print(h1.gamma) + print(h1.gamma_conj) + + g = Function() + g.gamma = 10. + g.gamma_conj = 3.0 + h = g + 3 + print(h.gamma) + print(h.gamma_conj) + + from cil.framework import ImageGeometry + ig = ImageGeometry(3,3) + x = ig.allocate('random') + + g = Function() + f = TranslateFunction(function=g, center=x) + print(f.gamma) + print(f.gamma_conj) + + g.gamma = 1.0 + f = TranslateFunction(function=g, center=x) + print(f.gamma) + print(f.gamma_conj) + + g.gamma = 1.0 + g.gamma_conj = 10.0 + f = TranslateFunction(function=g, center=x) + print(f.gamma) + print(f.gamma_conj) + + + f = Function() + g = 3 * f + print(g.gamma) + print(g.gamma_conj) + + f = Function() + f.gamma = 3 + f.gamma_conj = 4 + g = 3 * f + print(g.gamma) + print(g.gamma_conj) + + f = Function() + f.gamma = 3 + f.gamma_conj = 4 + g = -3 * f + print(g.gamma) + print(g.gamma_conj) + + + From 93e101b54d4dff2408eb5820fd5523485924b92a Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Wed, 3 Nov 2021 08:03:48 +0000 Subject: [PATCH 16/88] add tests --- .../cil/optimisation/functions/Function.py | 133 +++-------------- Wrappers/Python/test/test_functions.py | 134 +++++++++++++++++- 2 files changed, 147 insertions(+), 120 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/Function.py b/Wrappers/Python/cil/optimisation/functions/Function.py index 95c92f5975..023654962b 100644 --- a/Wrappers/Python/cil/optimisation/functions/Function.py +++ b/Wrappers/Python/cil/optimisation/functions/Function.py @@ -26,7 +26,11 @@ class Function(object): :param L: Lipschitz constant of the gradient of the function F(x), when it is differentiable. :type L: number, positive, default None - :param domain: The domain of the function. + :param gamma: Strongly convex constant of the function F(x) + :type gamma: number, strictly positive, default None + :param gamma_conj: Strongly convex constant of the convex conjugate function of F(x) + :type gamma_conj: number, strictly positive, default None + Lipschitz of the gradient of the function; it is a positive real number, such that |f'(x) - f'(y)| <= L ||x-y||, assuming f: IG --> R @@ -34,7 +38,7 @@ class Function(object): def __init__(self, L = None, gamma = None, gamma_conj = None): - # overrides the type check to allow None as initial value + # Lipschitz constant for the gradient of f, default is None self._L = L # Strongly convexity constant value is None, by default @@ -195,8 +199,14 @@ def gamma_conj(self): @gamma_conj.setter def gamma_conj(self, value): '''Setter for Strongly convex constant for the convex conjugate of a function ''' + if isinstance(value, (Number,)) and value > 0: - self._gamma_conj = value + if self.L is not None: + if self._gamma_conj != 1.0/self.L: + raise ValueError('If a function is convex, and its gradient Lipschitz with constant L, then\ + the conjugate of f is 1/L strongly convex. [Hiriart-Urruty, Lemarechal, Theorem 4.2.2]') + else: + self._gamma_conj = value else: raise TypeError('The strongly convex constant is strictly positive number') @@ -267,7 +277,7 @@ def gamma_conj(self): if self.function1.gamma_conj is not None and self.function2.gamma_conj is not None: self._gamma_conj = self.function1.gamma_conj + self.function2.gamma_conj elif self.function1.gamma_conj is None and self.function2.gamma_conj is not None: - self._gamma_conj= self.function1.gamma_conj + self._gamma_conj= self.function2.gamma_conj elif self.function2.gamma_conj is None and self.function1.gamma_conj is not None: self._gamma_conj = self.function1.gamma_conj else: @@ -642,8 +652,8 @@ class TranslateFunction(Function): def __init__(self, function, center): super(TranslateFunction, self).__init__(L = function.L, - gamma = function.gamma, - gamma_conj= function.gamma_conj) + gamma = function.gamma, + gamma_conj = function.gamma_conj) self.function = function self.center = center @@ -730,114 +740,3 @@ def convex_conjugate(self, x): return self.function.convex_conjugate(x) + self.center.dot(x) - -if __name__ == "__main__": - - # F is not strongly convex, cc of F is not strongle convex - f = Function() - print(f.gamma) - print(f.gamma_conj) - - # F is strongly convex, cc of F is not strongle convex - f1 = Function() - f1.gamma = 2.0 - print(f1.gamma) - print(f1.gamma_conj) - - # F is not strongly convex, cc of F is strongle convex - f2 = Function() - f2.gamma_conj = 2.0 - print(f2.gamma) - print(f2.gamma_conj) - - # Both Functions are strongly convex - f3 = Function() - f3.gamma = 2.0 - - f4 = Function() - f4.gamma = 2.0 - - h1 = f3 + f4 - print(h1.gamma) - print(h1.gamma_conj) - - # Both cc of Functions are strongly convex - f3 = Function() - f3.gamma_conj = 2.0 - - f4 = Function() - f4.gamma_conj = 2.0 - - h1 = f3 + f4 - print(h1.gamma) - print(h1.gamma_conj) - - # first fucntion is convex , other strongly convex - f3 = Function() - f3.gamma = 2.0 - - f4 = Function() - - h1 = f3 + f4 - print(h1.gamma) - print(h1.gamma_conj) - - # first fucntion is strongly convex , other convex - f3 = Function() - - f4 = Function() - f4.gamma = 2.0 - - h1 = f3 + f4 - print(h1.gamma) - print(h1.gamma_conj) - - g = Function() - g.gamma = 10. - g.gamma_conj = 3.0 - h = g + 3 - print(h.gamma) - print(h.gamma_conj) - - from cil.framework import ImageGeometry - ig = ImageGeometry(3,3) - x = ig.allocate('random') - - g = Function() - f = TranslateFunction(function=g, center=x) - print(f.gamma) - print(f.gamma_conj) - - g.gamma = 1.0 - f = TranslateFunction(function=g, center=x) - print(f.gamma) - print(f.gamma_conj) - - g.gamma = 1.0 - g.gamma_conj = 10.0 - f = TranslateFunction(function=g, center=x) - print(f.gamma) - print(f.gamma_conj) - - - f = Function() - g = 3 * f - print(g.gamma) - print(g.gamma_conj) - - f = Function() - f.gamma = 3 - f.gamma_conj = 4 - g = 3 * f - print(g.gamma) - print(g.gamma_conj) - - f = Function() - f.gamma = 3 - f.gamma_conj = 4 - g = -3 * f - print(g.gamma) - print(g.gamma_conj) - - - diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index a061426a16..e44f71ff94 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -140,20 +140,148 @@ def test_Function(self): d = ag.allocate(ImageGeometry.RANDOM) alpha = 0.5 + # scaled function g = alpha * L2NormSquared(b=noisy_data) # Compare call of g a2 = alpha*(d - noisy_data).power(2).sum() - #print(a2, g(d)) + self.assertEqual(a2, g(d)) # Compare convex conjugate of g a3 = 0.5 * d.squared_norm() + d.dot(noisy_data) self.assertAlmostEqual(a3, g.convex_conjugate(d), places=7) - #print( a3, g.convex_conjugate(d)) - #test proximal conjugate + def test_Function_strongly_convexity(self): + + # F is not strongly convex, cc of F is not strongle convex + f = Function() + self.assertIsNone(f.gamma) + self.assertIsNone(f.gamma_conj) + + # change value + f.gamma = 1.0 + f.gamma_conj = 2.0 + self.assertIsNotNone(f.gamma) + self.assertIsNotNone(f.gamma_conj) + + # change to a negative value + try: + f.gamma = -1.0 + except TypeError as err: + print(err) + + # change to a negative value + try: + f.gamma_conj = -1.0 + except TypeError as err: + print(err) + + + def test_SumFunction_strongly_convexity(self): + + # Both functions not stronlgy convex + f = Function() + g = Function() + h = f + g + self.assertIsNone(h.gamma) + self.assertIsNone(h.gamma_conj) + + # First is convex, second is strongly convex: Function itself + f = Function() + g = Function() + g.gamma = 1.0 + h = f + g + self.assertEquals(h.gamma, g.gamma) + + # First is strongly convex, second is convex: Function itself + f = Function() + f.gamma = 2.0 + g = Function() + h = f + g + self.assertEquals(h.gamma, f.gamma) + + # Both strongly convex: Function itself + f = Function() + f.gamma = 2.0 + g = Function() + g.gamma = 4.0 + h = f + g + self.assertEquals(h.gamma, f.gamma + g.gamma) + + # First is convex, second is strongly convex: Convex conjugate of the Function + f = Function() + g = Function() + g.gamma_conj = 1.0 + h = f + g + self.assertEquals(h.gamma_conj, g.gamma_conj) + + # First is strongly convex, second is convex: Convex conjugate of the Function + f = Function() + f.gamma_conj = 2.0 + g = Function() + h = f + g + self.assertEquals(h.gamma_conj, f.gamma_conj) + + # Both strongly convex: Convex conjugate of the Function + f = Function() + f.gamma_conj = 2.0 + g = Function() + g.gamma_conj = 4.0 + h = f + g + self.assertEquals(h.gamma_conj, f.gamma_conj + g.gamma_conj) + + def test_ScaledFunction_strongly_convexity(self): + + # Scaled Function, gamma and gamma_conj default values are None + scalar = 3 + f = Function() + g = scalar * f + self.assertIsNone(g.gamma) + self.assertIsNone(g.gamma_conj) + + # Check strongly convexity + f.gamma = 3.0 + f.gamma_conj = 2.0 + g = scalar * f + self.assertEquals(g.gamma, scalar*f.gamma) + self.assertIsNone(g.gamma_conj, scalar*f.gamma_conj) + + # Check is scalar is Negative, hence ScaledFunction is not convex + f.gamma = 3.0 + f.gamma_conj = 2.0 + g = -3.0 * f + self.assertIsNone(g.gamma) + self.assertIsNone(g.gamma_conj) + + def test_SumFunction_scalar_strongly_convexity(self): + + # gamma and gamma_conj default values are None + f = Function() + g = f + 3 + self.assertIsNone(g.gamma) + self.assertIsNone(g.gamma_conj) + + # gamma and gamma_conj + f.gamma = 3.0 + f.gamma_conj = 2.0 + g = f + 3 + self.assertEquals(g.gamma, f.gamma) + self.assertIsNone(g.gamma_conj, f.gamma_conj) + + + + + + + + + + + + + def test_L2NormSquared(self): From 84520dfe1c193882b93cfc88aa6f9f1a8339763c Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Wed, 3 Nov 2021 11:53:50 +0000 Subject: [PATCH 17/88] add gamma, gamm_conj values to L2NormSquared --- Wrappers/Python/cil/optimisation/functions/L2NormSquared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/L2NormSquared.py b/Wrappers/Python/cil/optimisation/functions/L2NormSquared.py index f65e928cc0..7c07bc3c90 100644 --- a/Wrappers/Python/cil/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/cil/optimisation/functions/L2NormSquared.py @@ -49,7 +49,7 @@ def __init__(self, **kwargs): :param b: translation of the function :type b: :code:`DataContainer`, optional ''' - super(L2NormSquared, self).__init__(L = 2) + super(L2NormSquared, self).__init__(L = 2, gamma=2, gamma_conj=1/2) self.b = kwargs.get('b',None) From 7c50e798a739f9da2597887857dd7099b0b3c72b Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Wed, 3 Nov 2021 12:45:17 +0000 Subject: [PATCH 18/88] add TODOs --- .../cil/optimisation/functions/Function.py | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/Function.py b/Wrappers/Python/cil/optimisation/functions/Function.py index 023654962b..53edd1317f 100644 --- a/Wrappers/Python/cil/optimisation/functions/Function.py +++ b/Wrappers/Python/cil/optimisation/functions/Function.py @@ -15,7 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import warnings from numbers import Number import numpy as np @@ -32,19 +31,23 @@ class Function(object): :type gamma_conj: number, strictly positive, default None - Lipschitz of the gradient of the function; it is a positive real number, such that |f'(x) - f'(y)| <= L ||x-y||, assuming f: IG --> R + # TODO Add definition of Lipschitz + # Lipschitz of the gradient of the function; it is a positive real number, such that |f'(x) - f'(y)| <= L ||x-y||, assuming f: IG --> R + + # TODO Add definition of Strongly convex constant """ def __init__(self, L = None, gamma = None, gamma_conj = None): - # Lipschitz constant for the gradient of f, default is None + + # Lipschitz constant for the gradient of f self._L = L - # Strongly convexity constant value is None, by default + # Strongly convexity constant for the Function self._gamma = gamma - # Strongly convexity constant for the conjugate of the function is None, by default + # Strongly convexity constant for the conjugate of the Function self._gamma_conj= gamma_conj def __call__(self,x): @@ -163,11 +166,10 @@ def centered_at(self, center): @property def L(self): - '''Lipschitz of the gradient of function f. - - L is positive real number, such that |f'(x) - f'(y)| <= L ||x-y||, assuming f: IG --> R''' + '''Lipschitz constant of the gradient of function f.''' + return self._L - # return self._L + @L.setter def L(self, value): '''Setter for Lipschitz constant''' @@ -203,8 +205,8 @@ def gamma_conj(self, value): if isinstance(value, (Number,)) and value > 0: if self.L is not None: if self._gamma_conj != 1.0/self.L: - raise ValueError('If a function is convex, and its gradient Lipschitz with constant L, then\ - the conjugate of f is 1/L strongly convex. [Hiriart-Urruty, Lemarechal, Theorem 4.2.2]') + raise ValueError('If Function is convex, and its gradient Lipschitz with constant L, then\ + the conjugate of the Function is 1/L strongly convex. [Hiriart-Urruty, Lemarechal, Theorem 4.2.2]') else: self._gamma_conj = value else: @@ -355,6 +357,7 @@ def gamma(self): if self._gamma is None: if self.function.gamma is not None: if self.scalar <= 0: + # If it strongly convex and multiply with a number <=0, it is not strongl convex self._gamma = None else: self._gamma = self.scalar * self.function.gamma From 4793ef28d153f77b997cb0baf00f6480575c2f74 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Wed, 3 Nov 2021 14:32:16 +0000 Subject: [PATCH 19/88] fix tests --- .../cil/optimisation/functions/Function.py | 11 +++++++---- Wrappers/Python/test/test_functions.py | 17 ++--------------- 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/Function.py b/Wrappers/Python/cil/optimisation/functions/Function.py index 53edd1317f..bb28b8151a 100644 --- a/Wrappers/Python/cil/optimisation/functions/Function.py +++ b/Wrappers/Python/cil/optimisation/functions/Function.py @@ -203,12 +203,13 @@ def gamma_conj(self, value): '''Setter for Strongly convex constant for the convex conjugate of a function ''' if isinstance(value, (Number,)) and value > 0: + + self._gamma_conj = value + if self.L is not None: - if self._gamma_conj != 1.0/self.L: + if value != 1.0/self.L: raise ValueError('If Function is convex, and its gradient Lipschitz with constant L, then\ - the conjugate of the Function is 1/L strongly convex. [Hiriart-Urruty, Lemarechal, Theorem 4.2.2]') - else: - self._gamma_conj = value + the conjugate of the Function is 1/L strongly convex. [Hiriart-Urruty, Lemarechal, Theorem 4.2.2]') else: raise TypeError('The strongly convex constant is strictly positive number') @@ -743,3 +744,5 @@ def convex_conjugate(self, x): return self.function.convex_conjugate(x) + self.center.dot(x) + + diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index e44f71ff94..dd8c8fab14 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -246,7 +246,7 @@ def test_ScaledFunction_strongly_convexity(self): f.gamma_conj = 2.0 g = scalar * f self.assertEquals(g.gamma, scalar*f.gamma) - self.assertIsNone(g.gamma_conj, scalar*f.gamma_conj) + self.assertEquals(g.gamma_conj, scalar*f.gamma_conj) # Check is scalar is Negative, hence ScaledFunction is not convex f.gamma = 3.0 @@ -268,20 +268,7 @@ def test_SumFunction_scalar_strongly_convexity(self): f.gamma_conj = 2.0 g = f + 3 self.assertEquals(g.gamma, f.gamma) - self.assertIsNone(g.gamma_conj, f.gamma_conj) - - - - - - - - - - - - - + self.assertEquals(g.gamma_conj, f.gamma_conj) def test_L2NormSquared(self): From 04d17f54cd4998806660a08396f14458a0d1995e Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 4 Nov 2021 20:14:02 +0000 Subject: [PATCH 20/88] wip add gamma conj in BlockFunction --- .../optimisation/functions/BlockFunction.py | 49 +++++++++++++++++++ .../cil/optimisation/functions/Function.py | 2 +- 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/BlockFunction.py b/Wrappers/Python/cil/optimisation/functions/BlockFunction.py index 8caa7a83e0..218c558f08 100644 --- a/Wrappers/Python/cil/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/cil/optimisation/functions/BlockFunction.py @@ -45,6 +45,12 @@ def __init__(self, *functions): super(BlockFunction, self).__init__() self.functions = functions self.length = len(self.functions) + + tmp_gamma_conj = sum(filter(None, [func.gamma_conj for func in functions])) + if tmp_gamma_conj == 0: + self.gamma_conj = None + else: + self.gamma_conj = tmp_gamma_conj @property def L(self): @@ -57,6 +63,31 @@ def L(self): tmp_L = None break return tmp_L + + # @property + # def gamma_conj(self): + # sum_gamma_conj = 0 + # for func in self.functions: + # if func.gamma_conj is not None: + # sum_gamma_conj += func.gamma_conj + # else: + # pass + # if sum_gamma_conj==0: + # return None + # else: + # return sum_gamma_conj + + @gamma_conj.setter + def gamma_conj(self, value): + # call base class setter + + if self.gamma_conj==0: + if value is not None: + raise ValueError("No strongly convex functions in this direct sum. `gamma_conj` should be None. {} is passed".format(value)) + if value!=self.gamma_conj: + raise ValueError("The strongly convex constant of a direct sum of strongly convex or convex functions should agree with the sum of the strongly convex constants of the strongly convex functions. {} is passed. {} is needed".format(value, self.gamma_conj)) + + super(BlockFunction, self.__class__).gamma_conj.fset(self, value ) def __call__(self, x): @@ -205,3 +236,21 @@ def __rmul__(self, other): +if __name__ == "__main__": + + from cil.framework import ImageGeometry + from cil.optimisation.functions import L1Norm, L2NormSquared + + f = BlockFunction(L2NormSquared()) + + # print(f.gamma_conj) + # f.gamma_conj = None + + f = BlockFunction(L1Norm(), L1Norm()) + + print(f.gamma_conj) + # f.gamma_conj = None + + + + diff --git a/Wrappers/Python/cil/optimisation/functions/Function.py b/Wrappers/Python/cil/optimisation/functions/Function.py index bb28b8151a..ca9af82016 100644 --- a/Wrappers/Python/cil/optimisation/functions/Function.py +++ b/Wrappers/Python/cil/optimisation/functions/Function.py @@ -209,7 +209,7 @@ def gamma_conj(self, value): if self.L is not None: if value != 1.0/self.L: raise ValueError('If Function is convex, and its gradient Lipschitz with constant L, then\ - the conjugate of the Function is 1/L strongly convex. [Hiriart-Urruty, Lemarechal, Theorem 4.2.2]') + the conjugate of the Function is 1/L strongly convex. [Hiriart-Urruty, Lemarechal, Theorem 4.2.2]') else: raise TypeError('The strongly convex constant is strictly positive number') From caa1e14cf1fbd86063abaf61b5d9dd24df767d61 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 5 Nov 2021 15:45:04 +0000 Subject: [PATCH 21/88] going back to the original implementation --- .../cil/optimisation/functions/Function.py | 195 +----------------- 1 file changed, 5 insertions(+), 190 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/Function.py b/Wrappers/Python/cil/optimisation/functions/Function.py index ca9af82016..a035d98899 100644 --- a/Wrappers/Python/cil/optimisation/functions/Function.py +++ b/Wrappers/Python/cil/optimisation/functions/Function.py @@ -24,31 +24,18 @@ class Function(object): """ Abstract class representing a function :param L: Lipschitz constant of the gradient of the function F(x), when it is differentiable. - :type L: number, positive, default None - :param gamma: Strongly convex constant of the function F(x) - :type gamma: number, strictly positive, default None - :param gamma_conj: Strongly convex constant of the convex conjugate function of F(x) - :type gamma_conj: number, strictly positive, default None - + :type L: number, positive, default None # TODO Add definition of Lipschitz # Lipschitz of the gradient of the function; it is a positive real number, such that |f'(x) - f'(y)| <= L ||x-y||, assuming f: IG --> R - # TODO Add definition of Strongly convex constant - """ - def __init__(self, L = None, gamma = None, gamma_conj = None): - - # Lipschitz constant for the gradient of f - self._L = L - - # Strongly convexity constant for the Function - self._gamma = gamma + def __init__(self, L = None): - # Strongly convexity constant for the conjugate of the Function - self._gamma_conj= gamma_conj + # Lipschitz constant for the gradient of the Function + self._L = L def __call__(self,x): @@ -113,8 +100,6 @@ def proximal_conjugate(self, x, tau, out = None): if out is None: return val - - # Algebra for Function Class # Add functions @@ -178,40 +163,6 @@ def L(self, value): else: raise TypeError('The Lipschitz constant is a real positive number') - @property - def gamma(self): - '''Strongly convex constant of a function.''' - - return self._gamma - - @gamma.setter - def gamma(self, value): - '''Setter for strongly convex constant for a function ''' - if isinstance(value, (Number,)) and value > 0: - self._gamma = value - else: - raise TypeError('The strongly convex constant is strictly positive number') - - @property - def gamma_conj(self): - '''Strongly convex constant of the convex conjugate of a function. - ''' - return self._gamma_conj - - @gamma_conj.setter - def gamma_conj(self, value): - '''Setter for Strongly convex constant for the convex conjugate of a function ''' - - if isinstance(value, (Number,)) and value > 0: - - self._gamma_conj = value - - if self.L is not None: - if value != 1.0/self.L: - raise ValueError('If Function is convex, and its gradient Lipschitz with constant L, then\ - the conjugate of the Function is 1/L strongly convex. [Hiriart-Urruty, Lemarechal, Theorem 4.2.2]') - else: - raise TypeError('The strongly convex constant is strictly positive number') class SumFunction(Function): @@ -240,58 +191,6 @@ def L(self, value): # call base class setter super(SumFunction, self.__class__).L.fset(self, value ) - @property - def gamma(self): - '''Strongly convex constant for the sum of two functions. If both are strongly convex - with constants a, b then the SumFunction is strongly convex with a+b. - - If one is strongly convex with constant a and the other is convex then the SumFunction is strongly convex - with constant a. - - In the following, we assume that we always deal with convex functions. - ''' - - # TODO better to use a .convex member = False by default - if self.function1.gamma is not None and self.function2.gamma is not None: - self._gamma = self.function1.gamma + self.function2.gamma - elif self.function1.gamma is None and self.function2.gamma is not None: - self._gamma = self.function2.gamma - elif self.function2.gamma is None and self.function1.gamma is not None: - self._gamma = self.function1.gamma - else: - self._gamma = None - return self._gamma - @gamma.setter - def gamma(self, value): - # call base class setter - super(SumFunction, self.__class__).gamma.fset(self, value ) - - @property - def gamma_conj(self): - '''Strongly convex constant for the sum of two functions. If both are strongly convex - with constants a, b then the SumFunction is strongly convex with a+b. - If one is strongly convex (a) and the other is convex then the SumFunction is strongly convex - with constant a. - - In the following, we assume that we always deal with convex functions. - ''' - - # TODO better to use a .convex member = False by default - if self.function1.gamma_conj is not None and self.function2.gamma_conj is not None: - self._gamma_conj = self.function1.gamma_conj + self.function2.gamma_conj - elif self.function1.gamma_conj is None and self.function2.gamma_conj is not None: - self._gamma_conj= self.function2.gamma_conj - elif self.function2.gamma_conj is None and self.function1.gamma_conj is not None: - self._gamma_conj = self.function1.gamma_conj - else: - self._gamma_conj = None - return self._gamma_conj - @gamma_conj.setter - def gamma_conj(self, value): - # call base class setter - super(SumFunction, self.__class__).gamma_conj.fset(self, value ) - - def __call__(self,x): r"""Returns the value of the sum of functions :math:`F_{1}` and :math:`F_{2}` at x @@ -353,39 +252,6 @@ def L(self, value): # call base class setter super(ScaledFunction, self.__class__).L.fset(self, value ) - @property - def gamma(self): - if self._gamma is None: - if self.function.gamma is not None: - if self.scalar <= 0: - # If it strongly convex and multiply with a number <=0, it is not strongl convex - self._gamma = None - else: - self._gamma = self.scalar * self.function.gamma - else: - self._gamma = None - return self._gamma - @gamma.setter - def gamma(self, value): - # call base class setter - super(ScaledFunction, self.__class__).gamma.fset(self, value ) - - @property - def gamma_conj(self): - if self._gamma_conj is None: - if self.function.gamma_conj is not None: - if self.scalar <= 0: - self._gamma_conj= None - else: - self._gamma_conj = self.scalar * self.function.gamma_conj - else: - self._gamma_conj = None - return self._gamma_conj - @gamma_conj.setter - def gamma_conj(self, value): - # call base class setter - super(ScaledFunction, self.__class__).gamma_conj.fset(self, value ) - @property def scalar(self): return self._scalar @@ -446,29 +312,6 @@ def proximal(self, x, tau, out=None): return self.function.proximal(x, tau*self.scalar, out=out) - def proximal_conjugate(self, x, tau, out = None): - r"""This returns the proximal operator for the function at x, tau - """ - try: - tmp = x - x.divide(tau, out = tmp) - except TypeError: - tmp = x.divide(tau, dtype=np.float32) - - if out is None: - val = self.function.proximal(tmp, self.scalar/tau ) - else: - self.function.proximal(tmp, self.scalar/tau, out = out) - val = out - - if id(tmp) == id(x): - x.multiply(tau, out = x) - - val.axpby(-tau, 1.0, x, out=val) - - if out is None: - return val - class SumScalarFunction(SumFunction): """ SumScalarFunction represents the sum a function with a scalar. @@ -523,32 +366,6 @@ def L(self, value): # call base class setter super(SumScalarFunction, self.__class__).L.fset(self, value ) - @property - def gamma(self): - if self._gamma is None: - if self.function.gamma is not None: - self._gamma = self.function.gamma - else: - self._gamma = None - return self._gamma - @gamma.setter - def gamma(self, value): - # call base class setter - super(SumScalarFunction, self.__class__).gamma.fset(self, value ) - - @property - def gamma_conj(self): - if self._gamma_conj is None: - if self.function.gamma_conj is not None: - self._gamma_conj = self.function.gamma_conj - else: - self._gamma_conj = None - return self._gamma_conj - @gamma_conj.setter - def gamma_conj(self, value): - # call base class setter - super(SumScalarFunction, self.__class__).gamma_conj.fset(self, value ) - class ConstantFunction(Function): @@ -655,9 +472,7 @@ class TranslateFunction(Function): def __init__(self, function, center): - super(TranslateFunction, self).__init__(L = function.L, - gamma = function.gamma, - gamma_conj = function.gamma_conj) + super(TranslateFunction, self).__init__(L = function.L) self.function = function self.center = center From 3782c405efa6409e5238b144f1a415fdd4757bbe Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 5 Nov 2021 15:47:05 +0000 Subject: [PATCH 22/88] remove strongly convex from BlockFunction, comment proximal_conjugate --- .../optimisation/functions/BlockFunction.py | 81 ++++++------------- 1 file changed, 25 insertions(+), 56 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/BlockFunction.py b/Wrappers/Python/cil/optimisation/functions/BlockFunction.py index 218c558f08..6781e9efb9 100644 --- a/Wrappers/Python/cil/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/cil/optimisation/functions/BlockFunction.py @@ -45,12 +45,6 @@ def __init__(self, *functions): super(BlockFunction, self).__init__() self.functions = functions self.length = len(self.functions) - - tmp_gamma_conj = sum(filter(None, [func.gamma_conj for func in functions])) - if tmp_gamma_conj == 0: - self.gamma_conj = None - else: - self.gamma_conj = tmp_gamma_conj @property def L(self): @@ -63,32 +57,7 @@ def L(self): tmp_L = None break return tmp_L - - # @property - # def gamma_conj(self): - # sum_gamma_conj = 0 - # for func in self.functions: - # if func.gamma_conj is not None: - # sum_gamma_conj += func.gamma_conj - # else: - # pass - # if sum_gamma_conj==0: - # return None - # else: - # return sum_gamma_conj - - @gamma_conj.setter - def gamma_conj(self, value): - # call base class setter - - if self.gamma_conj==0: - if value is not None: - raise ValueError("No strongly convex functions in this direct sum. `gamma_conj` should be None. {} is passed".format(value)) - if value!=self.gamma_conj: - raise ValueError("The strongly convex constant of a direct sum of strongly convex or convex functions should agree with the sum of the strongly convex constants of the strongly convex functions. {} is passed. {} is needed".format(value, self.gamma_conj)) - - super(BlockFunction, self.__class__).gamma_conj.fset(self, value ) - + def __call__(self, x): r""" Returns the value of the BlockFunction :math:`F` @@ -186,39 +155,39 @@ def gradient(self, x, out=None): return BlockDataContainer(*out) - def proximal_conjugate(self, x, tau, out = None): + # def proximal_conjugate(self, x, tau, out = None): - r"""Proximal operator of the convex conjugate of BlockFunction at x: + # r"""Proximal operator of the convex conjugate of BlockFunction at x: - .. math:: \mathrm{prox}_{\tau F^{*}}(x) = (\mathrm{prox}_{\tau f^{*}_{i}}(x^{*}_{i}))_{i=1}^{m} + # .. math:: \mathrm{prox}_{\tau F^{*}}(x) = (\mathrm{prox}_{\tau f^{*}_{i}}(x^{*}_{i}))_{i=1}^{m} - Parameter: + # Parameter: - x : BlockDataContainer and must have as many rows as self.length - """ + # x : BlockDataContainer and must have as many rows as self.length + # """ - if self.length != x.shape[0]: - raise ValueError('BlockFunction and BlockDataContainer have incompatible size') + # if self.length != x.shape[0]: + # raise ValueError('BlockFunction and BlockDataContainer have incompatible size') - if out is not None: - if isinstance(tau, Number): - for i in range(self.length): - self.functions[i].proximal_conjugate(x.get_item(i), tau, out=out.get_item(i)) - else: - for i in range(self.length): - self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i),out=out.get_item(i)) + # if out is not None: + # if isinstance(tau, Number): + # for i in range(self.length): + # self.functions[i].proximal_conjugate(x.get_item(i), tau, out=out.get_item(i)) + # else: + # for i in range(self.length): + # self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i),out=out.get_item(i)) - else: + # else: - out = [None]*self.length - if isinstance(tau, Number): - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) - else: - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) + # out = [None]*self.length + # if isinstance(tau, Number): + # for i in range(self.length): + # out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + # else: + # for i in range(self.length): + # out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) - return BlockDataContainer(*out) + # return BlockDataContainer(*out) def __getitem__(self, row): return self.functions[row] From c1c2fa32743f5b1ff18db93911d8b157e21dde4a Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 5 Nov 2021 15:51:34 +0000 Subject: [PATCH 23/88] remove strongly convex L2NormSquared --- Wrappers/Python/cil/optimisation/functions/L2NormSquared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/L2NormSquared.py b/Wrappers/Python/cil/optimisation/functions/L2NormSquared.py index 7c07bc3c90..f65e928cc0 100644 --- a/Wrappers/Python/cil/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/cil/optimisation/functions/L2NormSquared.py @@ -49,7 +49,7 @@ def __init__(self, **kwargs): :param b: translation of the function :type b: :code:`DataContainer`, optional ''' - super(L2NormSquared, self).__init__(L = 2, gamma=2, gamma_conj=1/2) + super(L2NormSquared, self).__init__(L = 2) self.b = kwargs.get('b',None) From 8a1d8c6175d75da062d4d44bb313d85bc8f3bbc6 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 5 Nov 2021 15:53:17 +0000 Subject: [PATCH 24/88] remove strongly convex tests --- Wrappers/Python/test/test_functions.py | 118 ------------------------- 1 file changed, 118 deletions(-) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index dd8c8fab14..82d8329d07 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -152,124 +152,6 @@ def test_Function(self): # Compare convex conjugate of g a3 = 0.5 * d.squared_norm() + d.dot(noisy_data) self.assertAlmostEqual(a3, g.convex_conjugate(d), places=7) - - def test_Function_strongly_convexity(self): - - # F is not strongly convex, cc of F is not strongle convex - f = Function() - self.assertIsNone(f.gamma) - self.assertIsNone(f.gamma_conj) - - # change value - f.gamma = 1.0 - f.gamma_conj = 2.0 - self.assertIsNotNone(f.gamma) - self.assertIsNotNone(f.gamma_conj) - - # change to a negative value - try: - f.gamma = -1.0 - except TypeError as err: - print(err) - - # change to a negative value - try: - f.gamma_conj = -1.0 - except TypeError as err: - print(err) - - - def test_SumFunction_strongly_convexity(self): - - # Both functions not stronlgy convex - f = Function() - g = Function() - h = f + g - self.assertIsNone(h.gamma) - self.assertIsNone(h.gamma_conj) - - # First is convex, second is strongly convex: Function itself - f = Function() - g = Function() - g.gamma = 1.0 - h = f + g - self.assertEquals(h.gamma, g.gamma) - - # First is strongly convex, second is convex: Function itself - f = Function() - f.gamma = 2.0 - g = Function() - h = f + g - self.assertEquals(h.gamma, f.gamma) - - # Both strongly convex: Function itself - f = Function() - f.gamma = 2.0 - g = Function() - g.gamma = 4.0 - h = f + g - self.assertEquals(h.gamma, f.gamma + g.gamma) - - # First is convex, second is strongly convex: Convex conjugate of the Function - f = Function() - g = Function() - g.gamma_conj = 1.0 - h = f + g - self.assertEquals(h.gamma_conj, g.gamma_conj) - - # First is strongly convex, second is convex: Convex conjugate of the Function - f = Function() - f.gamma_conj = 2.0 - g = Function() - h = f + g - self.assertEquals(h.gamma_conj, f.gamma_conj) - - # Both strongly convex: Convex conjugate of the Function - f = Function() - f.gamma_conj = 2.0 - g = Function() - g.gamma_conj = 4.0 - h = f + g - self.assertEquals(h.gamma_conj, f.gamma_conj + g.gamma_conj) - - def test_ScaledFunction_strongly_convexity(self): - - # Scaled Function, gamma and gamma_conj default values are None - scalar = 3 - f = Function() - g = scalar * f - self.assertIsNone(g.gamma) - self.assertIsNone(g.gamma_conj) - - # Check strongly convexity - f.gamma = 3.0 - f.gamma_conj = 2.0 - g = scalar * f - self.assertEquals(g.gamma, scalar*f.gamma) - self.assertEquals(g.gamma_conj, scalar*f.gamma_conj) - - # Check is scalar is Negative, hence ScaledFunction is not convex - f.gamma = 3.0 - f.gamma_conj = 2.0 - g = -3.0 * f - self.assertIsNone(g.gamma) - self.assertIsNone(g.gamma_conj) - - def test_SumFunction_scalar_strongly_convexity(self): - - # gamma and gamma_conj default values are None - f = Function() - g = f + 3 - self.assertIsNone(g.gamma) - self.assertIsNone(g.gamma_conj) - - # gamma and gamma_conj - f.gamma = 3.0 - f.gamma_conj = 2.0 - g = f + 3 - self.assertEquals(g.gamma, f.gamma) - self.assertEquals(g.gamma_conj, f.gamma_conj) - def test_L2NormSquared(self): # TESTS for L2 and scalar * L2 From 8ac85c555e2879a99b6a2a7b09935311e68986d0 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 5 Nov 2021 15:58:36 +0000 Subject: [PATCH 25/88] remove main code from BlockFunction --- .../optimisation/functions/BlockFunction.py | 68 +++++++------------ 1 file changed, 25 insertions(+), 43 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/BlockFunction.py b/Wrappers/Python/cil/optimisation/functions/BlockFunction.py index 6781e9efb9..78f9a02394 100644 --- a/Wrappers/Python/cil/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/cil/optimisation/functions/BlockFunction.py @@ -155,39 +155,39 @@ def gradient(self, x, out=None): return BlockDataContainer(*out) - # def proximal_conjugate(self, x, tau, out = None): + def proximal_conjugate(self, x, tau, out = None): - # r"""Proximal operator of the convex conjugate of BlockFunction at x: + r"""Proximal operator of the convex conjugate of BlockFunction at x: - # .. math:: \mathrm{prox}_{\tau F^{*}}(x) = (\mathrm{prox}_{\tau f^{*}_{i}}(x^{*}_{i}))_{i=1}^{m} + .. math:: \mathrm{prox}_{\tau F^{*}}(x) = (\mathrm{prox}_{\tau f^{*}_{i}}(x^{*}_{i}))_{i=1}^{m} - # Parameter: + Parameter: - # x : BlockDataContainer and must have as many rows as self.length - # """ + x : BlockDataContainer and must have as many rows as self.length + """ - # if self.length != x.shape[0]: - # raise ValueError('BlockFunction and BlockDataContainer have incompatible size') + if self.length != x.shape[0]: + raise ValueError('BlockFunction and BlockDataContainer have incompatible size') - # if out is not None: - # if isinstance(tau, Number): - # for i in range(self.length): - # self.functions[i].proximal_conjugate(x.get_item(i), tau, out=out.get_item(i)) - # else: - # for i in range(self.length): - # self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i),out=out.get_item(i)) + if out is not None: + if isinstance(tau, Number): + for i in range(self.length): + self.functions[i].proximal_conjugate(x.get_item(i), tau, out=out.get_item(i)) + else: + for i in range(self.length): + self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i),out=out.get_item(i)) - # else: + else: - # out = [None]*self.length - # if isinstance(tau, Number): - # for i in range(self.length): - # out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) - # else: - # for i in range(self.length): - # out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) + out = [None]*self.length + if isinstance(tau, Number): + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + else: + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) - # return BlockDataContainer(*out) + return BlockDataContainer(*out) def __getitem__(self, row): return self.functions[row] @@ -204,22 +204,4 @@ def __rmul__(self, other): - -if __name__ == "__main__": - - from cil.framework import ImageGeometry - from cil.optimisation.functions import L1Norm, L2NormSquared - - f = BlockFunction(L2NormSquared()) - - # print(f.gamma_conj) - # f.gamma_conj = None - - f = BlockFunction(L1Norm(), L1Norm()) - - print(f.gamma_conj) - # f.gamma_conj = None - - - - + \ No newline at end of file From 8e93ba0d618a2856a248ab4b5bbbfca2a91835db Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 5 Nov 2021 16:12:19 +0000 Subject: [PATCH 26/88] go back to old PDHG implementation, improve docs --- .../Python/cil/optimisation/algorithms/PDHG.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index f275610321..34a1c7a828 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -69,10 +69,13 @@ def __init__(self, f=None, g=None, operator=None, tau=None, sigma=1.,initial=Non :param operator: a Linear Operator :param f: Convex function with "simple" proximal of its conjugate. - :param g: Convex function with "simple" proximal + :param g: Convex function with "simple" proximal . :param tau: Step size parameter for Primal problem :param sigma: Step size parameter for Dual problem :param initial: Initial guess ( Default initial = 0) + :param gamma_g: Strongly convex constant for the function g. + :param gamma_fconj: Strongly convex constant for the convex conjugate of the function f. + ''' super(PDHG, self).__init__(**kwargs) if kwargs.get('x_init', None) is not None: @@ -130,13 +133,21 @@ def set_up(self, f, g, operator, tau=None, sigma=1., initial=None, **kwargs): # relaxation parameter, default value is 1.0 self.theta = kwargs.get('theta',1.0) - # Strongly convex case g + # Primal Acceleration: Function g is strongly convex self.gamma_g = kwargs.get('gamma_g', None) - # Strongly convex case f + # Dual Acceleration : Convex conjugate of f is strongly convex self.gamma_fconj = kwargs.get('gamma_fconj', None) + + if self.gamma_g is not None: + warnings.warn("Primal Acceleration of PDHG: The function f is assumed to be strongly convex \ + with parameter `gamma_g`. Need to be sure that gamma_g = {} is the correct strongly convex constant for g".format(self.gamma_g)) + if self.gamma_fconj is not None: + warnings.warn("Dual Acceleration of PDHG: The convex conjugate of function f is assumed to be strongly convex \ + with parameter `gamma_fconj`. Need to be sure that gamma_fconj = {} is the correct strongly convex constant".format(self.gamma_fconj)) + self.configured = True print("{} configured".format(self.__class__.__name__, )) From 6f75a520f08e411a8e572dbcbd86fba5d6452418 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 5 Nov 2021 16:26:35 +0000 Subject: [PATCH 27/88] add try/except rules for gamma values --- .../cil/optimisation/algorithms/PDHG.py | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 34a1c7a828..5df79046d4 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -111,14 +111,9 @@ def set_up(self, f, g, operator, tau=None, sigma=1., initial=None, **kwargs): self.g = g self.operator = operator - self.tau = tau - self.sigma = sigma - - if self.tau is None: - # Compute operator Norm - normK = self.operator.norm() - # Primal & dual stepsizes - self.tau = 1 / (self.sigma * normK ** 2) + normK = self.operator.norm() + self.tau = 1./normK + self.sigma = 1./normK if initial is None: self.x_old = self.operator.domain_geometry().allocate(0) @@ -139,6 +134,17 @@ def set_up(self, f, g, operator, tau=None, sigma=1., initial=None, **kwargs): # Dual Acceleration : Convex conjugate of f is strongly convex self.gamma_fconj = kwargs.get('gamma_fconj', None) + try: + self.gamma_g = self.g.gamma + except AttributeError: + pass + + try: + self.gamma_fconj = self.f.conjugate.gamma + except AttributeError: + pass + + if self.gamma_g is not None: warnings.warn("Primal Acceleration of PDHG: The function f is assumed to be strongly convex \ with parameter `gamma_g`. Need to be sure that gamma_g = {} is the correct strongly convex constant for g".format(self.gamma_g)) From 5e1c871b0fae5a50459a57fbb52201ce2e8525e5 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 5 Nov 2021 16:38:30 +0000 Subject: [PATCH 28/88] ../Wrappers/Python/cil/optimisation/algorithms/PDHG.py --- .../Python/cil/optimisation/algorithms/PDHG.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 5df79046d4..f5fc93c806 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -35,12 +35,20 @@ class PDHG(Algorithm): :param g: Convex function with "simple" proximal :param tau: Step size parameter for Primal problem :param sigma: Step size parameter for Dual problem + :param gamma_g: Strongly convex constant for the function g. It can be used to accelerate PDHG: Primal Acceleration. + :type gamma_g: Default is None + :param gamma_g: Strongly convex constant for the convex conjugate of the function f. It can be used to accelerate PDHG: Dual Acceleration. + :type gamma_fconj: Default is None - If the function g is strongly convex, you can use `gamma_g` to accelerate PDHG. - If the convex conjugate of f is strongly convex, you can use `gamma_fconj` to accelerate PDHG. + Note: A function f is strongly convex with parameter :math: $\gamma$ iff the function + + .. math:: + + f(x) - \frac{\gamma}{2}\|x\|^{2} + + is convex. - :param gamma_g: Strongly convex constant for the function - :param gamma_fconj: Strongly convex constant for the function + For more information, see https://en.wikipedia.org/wiki/Convex_function#Strongly_convex_functions Remark: Convergence is guaranted provided that From 675df445fccd61297f6d8434bab1240d76c0ac5e Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 5 Nov 2021 17:01:43 +0000 Subject: [PATCH 29/88] udpate docs --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index f5fc93c806..aaa7add3ea 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -152,14 +152,11 @@ def set_up(self, f, g, operator, tau=None, sigma=1., initial=None, **kwargs): except AttributeError: pass - if self.gamma_g is not None: - warnings.warn("Primal Acceleration of PDHG: The function f is assumed to be strongly convex \ - with parameter `gamma_g`. Need to be sure that gamma_g = {} is the correct strongly convex constant for g".format(self.gamma_g)) + warnings.warn("Primal Acceleration of PDHG: The function g is assumed to be strongly convex with parameter `gamma_g`. Need to be sure that gamma_g = {} is the correct strongly convex constant for g. ".format(self.gamma_g)) if self.gamma_fconj is not None: - warnings.warn("Dual Acceleration of PDHG: The convex conjugate of function f is assumed to be strongly convex \ - with parameter `gamma_fconj`. Need to be sure that gamma_fconj = {} is the correct strongly convex constant".format(self.gamma_fconj)) + warnings.warn("Dual Acceleration of PDHG: The convex conjugate of function f is assumed to be strongly convex with parameter `gamma_fconj`. Need to be sure that gamma_fconj = {} is the correct strongly convex constant".format(self.gamma_fconj)) self.configured = True From d63ca7c163be6d9890a54c376484d94c0794bb79 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 5 Nov 2021 17:08:37 +0000 Subject: [PATCH 30/88] remove repeated docs --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index aaa7add3ea..e2a9684502 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -29,16 +29,9 @@ class PDHG(Algorithm): .. math:: \min_{x} f(Kx) + g(x) - - :param operator: Linear Operator = K - :param f: Convex function with "simple" proximal of its conjugate. - :param g: Convex function with "simple" proximal - :param tau: Step size parameter for Primal problem - :param sigma: Step size parameter for Dual problem - :param gamma_g: Strongly convex constant for the function g. It can be used to accelerate PDHG: Primal Acceleration. - :type gamma_g: Default is None - :param gamma_g: Strongly convex constant for the convex conjugate of the function f. It can be used to accelerate PDHG: Dual Acceleration. - :type gamma_fconj: Default is None + + #TODO Add general information about PDHG, e.g, proximal, proximal conjugate, saddle point + #TODO Mention acceleration cases. Note: A function f is strongly convex with parameter :math: $\gamma$ iff the function From c7586e520f51d51e518087dd1570d51631e2f4f9 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 8 Nov 2021 14:31:23 +0000 Subject: [PATCH 31/88] improve PDHG docs --- .../cil/optimisation/algorithms/PDHG.py | 116 +++++++++++++----- 1 file changed, 87 insertions(+), 29 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index e2a9684502..0b89cb2e14 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -22,44 +22,102 @@ class PDHG(Algorithm): - r"""Primal Dual Hybrid Gradient - - Problem: - - .. math:: - - \min_{x} f(Kx) + g(x) + r"""Primal Dual Hybrid Gradient (PDHG) algorithm, see :cite:`CP2010`. - #TODO Add general information about PDHG, e.g, proximal, proximal conjugate, saddle point - #TODO Mention acceleration cases. + A first-order primal-dual algorithm for convex optimization problems with known saddle-point structure with applications in imaging. - Note: A function f is strongly convex with parameter :math: $\gamma$ iff the function - - .. math:: - - f(x) - \frac{\gamma}{2}\|x\|^{2} + The general problem considered in the PDHG algorithm is the generic saddle-point problem + + .. math:: \min_{x\in X}\max_{y\in Y} \langle Kx, y \rangle + g(x) - f^{*}(x) + + where :math:`f` and :math:`g` are convex functions with "simple" proximal operators. - is convex. + :math:`X` and :math:`Y` are two two finite-dimensional vector spaces with an inner product and representing the domain of :math:`g` and :math:`f^{*}`, the convex conjugate of :math:`f`, respectively. - For more information, see https://en.wikipedia.org/wiki/Convex_function#Strongly_convex_functions - - Remark: Convergence is guaranted provided that - - .. math:: + The operator :math:`K` is a continuous linear operator with operator norm defined as + + .. math:: \|K\| = \max\{ \|Kx\| : x\in X, \|x\|\leq1\} + + + The saddle point problem is decomposed into the primal problem: + + .. math:: \min_{x\in X} f(Kx) + g(x), + + and its corresponding dual problem + + .. math:: \max_{y\in Y} - g^{*}(-K^{*}y) - f^{*}(y). + + The PDHG algorithm consists of three steps: + + a) gradient ascent step for the dual problem, + b) gradient descent step for the primal problem and + c) an over-relaxation of the primal variable. + + + Notes + ----- + + - Convergence is guaranteed if the operator norm :math:`\|K\|`, \the dual step size :math:`\sigma` and the primal step size :math:`\tau`, satisfy the following inequality: + + .. math:: - \tau \sigma \|K\|^{2} <1 + \tau \sigma \|L\|^2 < 1 + + - By default, the step sizes :math:`\sigma` and :math:`\tau` are: + + .. math:: + + \sigma = \frac{1}{\|K\|}, \tau = \frac{1}{\|K\|} + + - PDHG algorithm can be accelerated if the functions :math:`f^{*}` and/or :math:`g` are strongly convex. + A function :math:`f` is strongly convex with constant :math:`\gamma>0` if + + .. math:: + + f(x) - \frac{\gamma}{2}\|x\|^{2} + + is convex. - Reference: - - - (a) A. Chambolle and T. Pock (2011), "A first-order primal–dual algorithm for convex - problems with applications to imaging", J. Math. Imaging Vision 40, 120–145. + For instance the :math:`\frac{1}{2}\|x\|^{2}_{2}` is :math:`\gamma` strongly convex\ + for :math:`\gamma\in(-\infty,1]`. We say it is 1-strongly convex because it is the largest constant for which \ + :math:`f - \frac{1}{2}\|\cdot\|^{2}` is convex. + + The :math:`\|\cdot\|_{1}` norm is not strongly convex. + + For more information, see https://en.wikipedia.org/wiki/Convex_function#Strongly_convex_functions + + Example + ------- + Least Squares minimisation with PDHG. + + .. math:: \min_{u\in X} \|A u - g\|^{2} + + >>> operator = A + >>> f = L2NormSquared(b = g) + >>> g = ZeroFunction() + >>> pdhg = PDHG(f = f, g = g, operator = operator) + >>> pdhg.run(10) + + Example + ------- + Total variation denoising with with PDHG. + + .. math:: \min_{x\in X} \|u - g\|^{2} + \alpha\|\nabla u\|_{2,1} + + >>> ig = g.geometry + >>> operator = GradientOperator(ig) + >>> f = MixedL21Norm() + >>> g = L2NormSquared(b=g) + >>> pdhg = PDHG(f = f, g = g, operator = operator) + >>> pdhg.run(10) + + References + ---------- + .. [1] A. Chambolle and T. Pock (2011), "A first-order primal–dual algorithm for convex problems with applications to imaging", J. Math. Imaging Vision 40, 120–145. - (b) E. Esser, X. Zhang and T. F. Chan (2010), "A general framework for a class of first - order primal–dual algorithms for convex optimization in imaging science", - SIAM J. Imaging Sci. 3, 1015–1046. + .. [2] E. Esser, X. Zhang and T. F. Chan (2010), "A general framework for a class of first order primal–dual algorithms for convex optimization in imaging science", SIAM J. Imaging Sci. 3, 1015–1046. """ From b1d3cd66364f914b9285fdb383fd648179fdb0c9 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 8 Nov 2021 14:31:54 +0000 Subject: [PATCH 32/88] add sphinx bib requirements --- docs/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/requirements.txt b/docs/requirements.txt index 2b824f1e35..6424a8320a 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,5 @@ sphinx_rtd_theme +sphinxcontrib-bibtex numpy scipy matplotlib >=3.3.0,<=3.4.2 # temporary fix due to issue with installing matplotlib version 3.4.3 From 22afecf105dccae564199796244c411e13d32b42 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 8 Nov 2021 14:32:24 +0000 Subject: [PATCH 33/88] add napoleon extension --- docs/source/conf.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/source/conf.py b/docs/source/conf.py index 53e3c0e9eb..d496c41f80 100755 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -41,12 +41,14 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ + 'sphinx.ext.napoleon', 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', + 'sphinxcontrib.bibtex', ] # Add any paths that contain templates here, relative to this directory. @@ -187,3 +189,8 @@ # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True + + +bibtex_bibfiles = ['refs.bib'] +bibtex_encoding = 'latin' +bibtex_default_style = 'unsrt' From 2f728aca0a293e79751ac3223b43745eea94fa09 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 8 Nov 2021 14:33:01 +0000 Subject: [PATCH 34/88] add refs bib file --- docs/source/refs.bib | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 docs/source/refs.bib diff --git a/docs/source/refs.bib b/docs/source/refs.bib new file mode 100644 index 0000000000..2c1ef886a7 --- /dev/null +++ b/docs/source/refs.bib @@ -0,0 +1,12 @@ +@article{CP2010, + doi = {10.1007/s10851-010-0251-1}, + url = {https://doi.org/10.1007/s10851-010-0251-1}, + year = {2010}, + month = dec, + publisher = {Springer Science and Business Media {LLC}}, + volume = {40}, + number = {1}, + pages = {120--145}, + author = {Antonin Chambolle and Thomas Pock}, + title = {A First-Order Primal-Dual Algorithm for Convex Problems with~Applications to Imaging} +} \ No newline at end of file From a42bc2998c3e837433bf89e39be501dd097b43b5 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 8 Nov 2021 14:54:49 +0000 Subject: [PATCH 35/88] add refs --- .../cil/optimisation/algorithms/PDHG.py | 13 +++-- docs/source/conf.py | 5 +- docs/source/refs.bib | 50 ++++++++++++++----- 3 files changed, 48 insertions(+), 20 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 0b89cb2e14..69855e25a5 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -22,7 +22,7 @@ class PDHG(Algorithm): - r"""Primal Dual Hybrid Gradient (PDHG) algorithm, see :cite:`CP2010`. + r"""Primal Dual Hybrid Gradient (PDHG) algorithm, see :cite:`CP2011`, :cite:`EZXC2010`. A first-order primal-dual algorithm for convex optimization problems with known saddle-point structure with applications in imaging. @@ -110,14 +110,13 @@ class PDHG(Algorithm): >>> f = MixedL21Norm() >>> g = L2NormSquared(b=g) >>> pdhg = PDHG(f = f, g = g, operator = operator) - >>> pdhg.run(10) - + >>> pdhg.run(10) + References ---------- - - .. [1] A. Chambolle and T. Pock (2011), "A first-order primal–dual algorithm for convex problems with applications to imaging", J. Math. Imaging Vision 40, 120–145. - - .. [2] E. Esser, X. Zhang and T. F. Chan (2010), "A general framework for a class of first order primal–dual algorithms for convex optimization in imaging science", SIAM J. Imaging Sci. 3, 1015–1046. + + .. bibliography:: + """ diff --git a/docs/source/conf.py b/docs/source/conf.py index d496c41f80..bdac1dbfe7 100755 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -193,4 +193,7 @@ bibtex_bibfiles = ['refs.bib'] bibtex_encoding = 'latin' -bibtex_default_style = 'unsrt' +bibtex_reference_style = 'label' +bibtex_default_style = 'plain' + + diff --git a/docs/source/refs.bib b/docs/source/refs.bib index 2c1ef886a7..252a445a9c 100644 --- a/docs/source/refs.bib +++ b/docs/source/refs.bib @@ -1,12 +1,38 @@ -@article{CP2010, - doi = {10.1007/s10851-010-0251-1}, - url = {https://doi.org/10.1007/s10851-010-0251-1}, - year = {2010}, - month = dec, - publisher = {Springer Science and Business Media {LLC}}, - volume = {40}, - number = {1}, - pages = {120--145}, - author = {Antonin Chambolle and Thomas Pock}, - title = {A First-Order Primal-Dual Algorithm for Convex Problems with~Applications to Imaging} -} \ No newline at end of file +@Article{CP2011, +author={Chambolle, Antonin +and Pock, Thomas}, +title={A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging}, +journal={Journal of Mathematical Imaging and Vision}, +year={2011}, +month={May}, +day={01}, +volume={40}, +number={1}, +pages={120-145}, +abstract={In this paper we study a first-order primal-dual algorithm for non-smooth convex optimization problems with known saddle-point structure. We prove convergence to a saddle-point with rate O(1/N) in finite dimensions for the complete class of problems. We further show accelerations of the proposed algorithm to yield improved rates on problems with some degree of smoothness. In particular we show that we can achieve O(1/N2) convergence on problems, where the primal or the dual objective is uniformly convex, and we can show linear convergence, i.e. O($\omega$N) for some $\omega$∈(0,1), on smooth problems. The wide applicability of the proposed algorithm is demonstrated on several imaging problems such as image denoising, image deconvolution, image inpainting, motion estimation and multi-label image segmentation.}, +issn={1573-7683}, +doi={10.1007/s10851-010-0251-1}, +url={https://doi.org/10.1007/s10851-010-0251-1} +} + + +@article{EZXC2010, +author = {Esser, Ernie and Zhang, Xiaoqun and Chan, Tony F.}, +title = {A General Framework for a Class of First Order Primal-Dual Algorithms for Convex Optimization in Imaging Science}, +journal = {SIAM Journal on Imaging Sciences}, +volume = {3}, +number = {4}, +pages = {1015-1046}, +year = {2010}, +doi = {10.1137/09076934X}, + +URL = { + https://doi.org/10.1137/09076934X + +}, +eprint = { + https://doi.org/10.1137/09076934X + +} + +} From 8479f24f1f48b311e0dfa84daa0ebbd1b2d82f36 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 9 Nov 2021 13:46:33 +0000 Subject: [PATCH 36/88] improve PDHG docs --- .../cil/optimisation/algorithms/PDHG.py | 216 +++++++++++++----- 1 file changed, 153 insertions(+), 63 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 69855e25a5..51478b4443 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -24,6 +24,61 @@ class PDHG(Algorithm): r"""Primal Dual Hybrid Gradient (PDHG) algorithm, see :cite:`CP2011`, :cite:`EZXC2010`. + Parameters + ---------- + f : Function + A convex function with a "simple" proximal method of its conjugate. + g : Function + A convex function with a "simple" proximal. + operator : LinearOperator + A Linear Operator. + sigma : :obj:`float`, optional, default=1 + Step size for the dual problem. + tau : :obj:`float`, optional, default=None + Step size for the primal problem. + initial : DataContainer, optional, default=None + Initial point for the PDHG algorithm. + use_axbpy: :obj:`bool`, optional, default=True + Computes a*x + b*y in C. + gamma_g : :obj:`float`, optional, default=None + Strongly convex constant if the function g is strongly convex. Allows primal acceleration of the PDHG algorithm. + gamma_fconj : :obj:`float`, optional, default=None + Strongly convex constant if the convex conjugate of f is strongly convex. Allows dual acceleration of the PDHG algorithm. + + + **kwargs: + Keyward arguments used from the base class :class:`Algorithm`. + + max_iteration : :obj:`int`, optional, default=0 + Maximum number of iterations of the PDHG. + update_objective_interval : :obj:`int`, optional, default=1 + Evaluates objectives, e.g., primal/dual/primal-dual gap every ``update_objective_interval``. + + Example + ------- + Total variation denoising with with PDHG. + + .. math:: \min_{x\in X} \|u - b\|^{2} + \alpha\|\nabla u\|_{2,1} + + >>> data = dataexample.CAMERA.get() + >>> noisy_data = noise.gaussian(data, seed = 10, var = 0.02) + >>> ig = data.geometry + >>> operator = GradientOperator(ig) + >>> f = MixedL21Norm() + >>> g = L2NormSquared(b=g) + >>> pdhg = PDHG(f = f, g = g, operator = operator, max_iteration = 10) + >>> pdhg.run(10) + >>> solution = pdhg.solution + + Primal acceleration can be used, since :math:`g` is strongly convex with parameter ``gamma_g = 2``. + + >>> pdhg = PDHG(f = f, g = g, operator = operator, gamma_g = 2) + + For TV tomography reconstruction, see `CIL-Demos `_. + + + + A first-order primal-dual algorithm for convex optimization problems with known saddle-point structure with applications in imaging. The general problem considered in the PDHG algorithm is the generic saddle-point problem @@ -49,9 +104,9 @@ class PDHG(Algorithm): The PDHG algorithm consists of three steps: - a) gradient ascent step for the dual problem, - b) gradient descent step for the primal problem and - c) an over-relaxation of the primal variable. + * gradient ascent step for the dual problem, + * gradient descent step for the primal problem and + * an over-relaxation of the primal variable. Notes @@ -69,49 +124,21 @@ class PDHG(Algorithm): \sigma = \frac{1}{\|K\|}, \tau = \frac{1}{\|K\|} - - PDHG algorithm can be accelerated if the functions :math:`f^{*}` and/or :math:`g` are strongly convex. - - A function :math:`f` is strongly convex with constant :math:`\gamma>0` if - - .. math:: - - f(x) - \frac{\gamma}{2}\|x\|^{2} + PDHG algorithm can be accelerated if the functions :math:`f^{*}` and/or :math:`g` are strongly convex. A function :math:`f` is strongly convex with constant :math:`\gamma>0` if - is convex. - - For instance the :math:`\frac{1}{2}\|x\|^{2}_{2}` is :math:`\gamma` strongly convex\ - for :math:`\gamma\in(-\infty,1]`. We say it is 1-strongly convex because it is the largest constant for which \ - :math:`f - \frac{1}{2}\|\cdot\|^{2}` is convex. - - The :math:`\|\cdot\|_{1}` norm is not strongly convex. - - For more information, see https://en.wikipedia.org/wiki/Convex_function#Strongly_convex_functions - - Example - ------- - Least Squares minimisation with PDHG. - - .. math:: \min_{u\in X} \|A u - g\|^{2} - - >>> operator = A - >>> f = L2NormSquared(b = g) - >>> g = ZeroFunction() - >>> pdhg = PDHG(f = f, g = g, operator = operator) - >>> pdhg.run(10) + .. math:: - Example - ------- - Total variation denoising with with PDHG. + f(x) - \frac{\gamma}{2}\|x\|^{2} - .. math:: \min_{x\in X} \|u - g\|^{2} + \alpha\|\nabla u\|_{2,1} + is convex. + + For instance the :math:`\frac{1}{2}\|x\|^{2}_{2}` is :math:`\gamma` strongly convex\ + for :math:`\gamma\in(-\infty,1]`. We say it is 1-strongly convex because it is the largest constant for which \ + :math:`f - \frac{1}{2}\|\cdot\|^{2}` is convex. - >>> ig = g.geometry - >>> operator = GradientOperator(ig) - >>> f = MixedL21Norm() - >>> g = L2NormSquared(b=g) - >>> pdhg = PDHG(f = f, g = g, operator = operator) - >>> pdhg.run(10) + The :math:`\|\cdot\|_{1}` norm is not strongly convex. For more information, see `Strongly Convex `_. + References ---------- @@ -120,21 +147,12 @@ class PDHG(Algorithm): """ - def __init__(self, f=None, g=None, operator=None, tau=None, sigma=1.,initial=None, use_axpby=True, **kwargs): - '''PDHG algorithm creator - - Optional parameters + def __init__(self, f, g, operator, tau=None, sigma=1.,initial=None, use_axpby=True, **kwargs): + """ + Constructor method + """ - :param operator: a Linear Operator - :param f: Convex function with "simple" proximal of its conjugate. - :param g: Convex function with "simple" proximal . - :param tau: Step size parameter for Primal problem - :param sigma: Step size parameter for Dual problem - :param initial: Initial guess ( Default initial = 0) - :param gamma_g: Strongly convex constant for the function g. - :param gamma_fconj: Strongly convex constant for the convex conjugate of the function f. - ''' super(PDHG, self).__init__(**kwargs) if kwargs.get('x_init', None) is not None: if initial is None: @@ -150,15 +168,8 @@ def __init__(self, f=None, g=None, operator=None, tau=None, sigma=1.,initial=Non self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma, initial=initial, **kwargs) def set_up(self, f, g, operator, tau=None, sigma=1., initial=None, **kwargs): - '''initialisation of the algorithm - - :param operator: a Linear Operator - :param f: Convex function with "simple" proximal of its conjugate. - :param g: Convex function with "simple" proximal - :param tau: Step size parameter for Primal problem - :param sigma: Step size parameter for Dual problem - :param initial: Initial guess ( Default initial = 0)''' - + """initialisation of the algorithm + """ print("{} setting up".format(self.__class__.__name__, )) # can't happen with default sigma @@ -224,6 +235,29 @@ def get_output(self): def update(self): + r""" + + Performs a single iteration of the PDHG algorithm + + + .. math:: + + y^{n+1} = \mathrm{prox}_{\sigma f^{*}}(y^{n} + \sigma K \bar{x}^{n}) + + .. math:: + + x^{n+1} = \mathrm{prox}_{\tau g}(x^{n} + \tau K^{*}y^{n+1}) + + .. math:: + + \bar{x}^{n+1} = x^{n+1} + \theta (x^{n+1} - x^{n}) + + In the case of primal/dual acceleration using the strongly convexity property of function :math:`f^{*}` or :math:`g` \ + the stepsize :math:`\sigma` and :math:`\tau` are updated using the :meth:`update_step_sizes` method. + + + """ + #calculate x-bar and store in self.x_tmp if self._use_axpby: self.x_old.axpby((self.theta + 1.0), -self.theta , self.x, out=self.x_tmp) @@ -262,6 +296,20 @@ def update(self): def update_step_sizes(self): + + r""" + + Updates the step sizes :math:`\sigma` and :math:`\tau` and :math:`\theta` in the cases of primal or dual acceleration using the strongly convexity property. + + - :math:`g` is strongly convex with constant :math:`\gamma=` ``gamma_g``. + - :math:`f^{*}` is strongly convex with constant :math:`\gamma=` ``gamma_fconj``. + + Note + ---- + + The case where both functions are strongly convex is not available at the moment. + + """ # Update sigma and tau based on the strong convexity of G if self.gamma_g is not None: @@ -282,6 +330,48 @@ def update_step_sizes(self): def update_objective(self): + """ + + Evaluate the primal objective, the dual objective and the primal-dual gap + + Primal objective + + .. math:: + + f(Kx) + g(x) + + Dual objective: + + .. math:: + + - g^{*}(-K^{*}y) - f^{*}(y) + + Primal-Dual gap (or Duality gap) + + .. math:: + + f(Kx) + g(x) + g^{*}(-K^{*}y) + f^{*}(y) + + Note + ---- + + - The primal objective is printed if `verbose=1`. + - All the objective are printed if `verbose=2`. + + Computing these objective can be costly, so it is better to compute every some iterations. To do this, use ``update_objective_interval = #number``. + + Note + ---- + + The primal-dual gap (or duality gap) measures how close is the primal-dual pair (x,y) to the primal-dual solution. \ + It is always non-negative and is used to monitor convergence of the PDHG algorithm. + + + + For more information, see `Duality Gap `_. + + """ + self.operator.direct(self.x_old, out=self.y_tmp) f_eval_p = self.f(self.y_tmp) g_eval_p = self.g(self.x_old) From 5b77d6d5646eb475e0691605603e1179b3c2e160 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 9 Nov 2021 13:46:53 +0000 Subject: [PATCH 37/88] add members to present --- docs/source/optimisation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/optimisation.rst b/docs/source/optimisation.rst index 9341532250..a15b6d7ad3 100644 --- a/docs/source/optimisation.rst +++ b/docs/source/optimisation.rst @@ -64,7 +64,7 @@ print to screen of the status of the optimisation. :members: :special-members: .. autoclass:: cil.optimisation.algorithms.PDHG - :members: + :members: update, update_objective, update_step_sizes .. autoclass:: cil.optimisation.algorithms.LADMM :members: .. autoclass:: cil.optimisation.algorithms.SPDHG From acfc36c3df70cb1527ee529de2e274b3cf0e1070 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 9 Nov 2021 13:47:19 +0000 Subject: [PATCH 38/88] change to pydata style --- docs/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index bdac1dbfe7..f69dc6760e 100755 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -84,7 +84,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' +html_theme = 'pydata_sphinx_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the From 6f908cad94ba8148a817265e8758292c7eff7bfe Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 9 Nov 2021 13:50:39 +0000 Subject: [PATCH 39/88] add pydata theme --- docs/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/requirements.txt b/docs/requirements.txt index 6424a8320a..e557409c8d 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,6 @@ sphinx_rtd_theme sphinxcontrib-bibtex +pydata_sphinx_theme numpy scipy matplotlib >=3.3.0,<=3.4.2 # temporary fix due to issue with installing matplotlib version 3.4.3 From aea94c4f694a7eaf54152a6bcd7b625f6cad4995 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 9 Nov 2021 14:09:59 +0000 Subject: [PATCH 40/88] add CIL refs --- docs/source/refs.bib | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/docs/source/refs.bib b/docs/source/refs.bib index 252a445a9c..efd2a4bbd9 100644 --- a/docs/source/refs.bib +++ b/docs/source/refs.bib @@ -36,3 +36,42 @@ @article{EZXC2010 } } + + + +@article{Papoutsellis_et_al_2021, +author = {Papoutsellis, Evangelos and Ametova, Evelina and Delplancke, Claire and Fardell, Gemma and J\"{o}rgensen, Jakob S. and Pasca, Edoardo and Turner, Martin and Warr, Ryan and Lionheart, William R. B. and Withers, Philip J. }, +title = {Core Imaging Library - Part II: multichannel reconstruction for dynamic and spectral tomography}, +journal = {Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering Sciences}, +volume = {379}, +number = {2204}, +pages = {20200193}, +year = {2021}, +doi = {10.1098/rsta.2020.0193}, + +URL = {https://royalsocietypublishing.org/doi/abs/10.1098/rsta.2020.0193}, +eprint = {https://royalsocietypublishing.org/doi/pdf/10.1098/rsta.2020.0193} +, + abstract = { The newly developed core imaging library (CIL) is a flexible plug and play library for tomographic imaging with a specific focus on iterative reconstruction. CIL provides building blocks for tailored regularized reconstruction algorithms and explicitly supports multichannel tomographic data. In the first part of this two-part publication, we introduced the fundamentals of CIL. This paper focuses on applications of CIL for multichannel data, e.g. dynamic and spectral. We formalize different optimization problems for colour processing, dynamic and hyperspectral tomography and demonstrate CIL’s capabilities for designing state-of-the-art reconstruction methods through case studies and code snapshots. This article is part of the theme issue β€˜Synergistic tomographic image reconstruction: part 2’. } +} + + + + + +@article{Jorgensen_et_al_2021, +author = {J\"{o}rgensen, J. S. and Ametova, E. and Burca, G. and Fardell, G. and Papoutsellis, E. and Pasca, E. and Thielemans, K. and Turner, M. and Warr, R. and Lionheart, W. R. B. and Withers, P. J. }, +title = {Core Imaging Library - Part I: a versatile Python framework for tomographic imaging}, +journal = {Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering Sciences}, +volume = {379}, +number = {2204}, +pages = {20200192}, +year = {2021}, +doi = {10.1098/rsta.2020.0192}, + +URL = {https://royalsocietypublishing.org/doi/abs/10.1098/rsta.2020.0192}, +eprint = {https://royalsocietypublishing.org/doi/pdf/10.1098/rsta.2020.0192} +, + abstract = { We present the Core Imaging Library (CIL), an open-source Python framework for tomographic imaging with particular emphasis on reconstruction of challenging datasets. Conventional filtered back-projection reconstruction tends to be insufficient for highly noisy, incomplete, non-standard or multi-channel data arising for example in dynamic, spectral and in situ tomography. CIL provides an extensive modular optimization framework for prototyping reconstruction methods including sparsity and total variation regularization, as well as tools for loading, preprocessing and visualizing tomographic data. The capabilities of CIL are demonstrated on a synchrotron example dataset and three challenging cases spanning golden-ratio neutron tomography, cone-beam X-ray laminography and positron emission tomography. This article is part of the theme issue β€˜Synergistic tomographic image reconstruction: part 2’. } +} + From 30cd0e0b60a43a65b1e4bd889ce2acd63174ebad Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 9 Nov 2021 14:10:33 +0000 Subject: [PATCH 41/88] fix PDHG exception, f, g, operator are required --- Wrappers/Python/test/test_algorithms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py index eb9fcda891..b28a38ac95 100755 --- a/Wrappers/Python/test/test_algorithms.py +++ b/Wrappers/Python/test/test_algorithms.py @@ -561,7 +561,7 @@ def test_exception_initial_LADMM(self): def test_exception_initial_PDHG(self): initial = 1 try: - algo = PDHG(initial = initial, x_init=initial) + algo = PDHG(initial = initial, x_init=initial, f=None, g=None, operator=None) assert False except ValueError as ve: assert True From 119a74722fbd524018afedb8cffc091fe055d006 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 9 Nov 2021 14:10:54 +0000 Subject: [PATCH 42/88] update docs --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 51478b4443..faddf48cc5 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -74,9 +74,8 @@ class PDHG(Algorithm): >>> pdhg = PDHG(f = f, g = g, operator = operator, gamma_g = 2) - For TV tomography reconstruction, see `CIL-Demos `_. - - + For a TV tomography reconstruction example, see `CIL-Demos `_. + More examples can be found in :cite:`Jorgensen_et_al_2021`, :cite:`Papoutsellis_et_al_2021`. A first-order primal-dual algorithm for convex optimization problems with known saddle-point structure with applications in imaging. From 604de38f675099d3bb63d84b648af18f7cfe1e94 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 9 Nov 2021 14:45:03 +0000 Subject: [PATCH 43/88] split test for strongly convex cases --- Wrappers/Python/test/test_algorithms.py | 63 +++++++++++++++++++++---- 1 file changed, 55 insertions(+), 8 deletions(-) diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py index b28a38ac95..089ba1a518 100755 --- a/Wrappers/Python/test/test_algorithms.py +++ b/Wrappers/Python/test/test_algorithms.py @@ -395,7 +395,7 @@ def setup(data, dnoise): print ("RMSE", rmse) self.assertLess(rmse, 2e-4) - def test_PDHG_strongly_convex(self): + def test_PDHG_step_sizes(self): ig = ImageGeometry(3,3) data = ig.allocate('random') @@ -404,26 +404,73 @@ def test_PDHG_strongly_convex(self): g = L2NormSquared() operator = IdentityOperator(ig) + # sigma, tau no update. Operator norm is 1. sigma = 1.0 tau = 1.0 pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10) pdhg.run(verbose=0) self.assertEqual(pdhg.sigma, sigma) - self.assertEqual(pdhg.tau, sigma) + self.assertEqual(pdhg.tau, tau) - pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10, gamma_g=0.5) - pdhg.run(verbose=0) + def test_PDHG_strongly_convex_gamma_g(self): + + ig = ImageGeometry(3,3) + data = ig.allocate('random') + + f = L2NormSquared(b=data) + g = L2NormSquared() + operator = IdentityOperator(ig) + + # sigma, tau + sigma = 1.0 + tau = 1.0 + + pdhg = PDHG(f=f, g=g, operator=operator, sigma = sigma, tau=tau, + max_iteration=5, gamma_g=0.5) + pdhg.run(1, verbose=0) + self.assertEquals(pdhg.theta, 1.0/ np.sqrt(1 + 2 * pdhg.gamma_g * tau)) + self.assertEquals(pdhg.tau, tau * pdhg.theta) + self.assertEquals(pdhg.sigma, sigma / pdhg.theta) + pdhg.run(4, verbose=0) self.assertNotEqual(pdhg.sigma, sigma) - self.assertNotEqual(pdhg.tau, tau) + self.assertNotEqual(pdhg.tau, tau) - pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10, gamma_fconj=0.5) - pdhg.run(verbose=0) + def test_PDHG_strongly_convex_gamma_fcong(self): + + ig = ImageGeometry(3,3) + data = ig.allocate('random') + + f = L2NormSquared(b=data) + g = L2NormSquared() + operator = IdentityOperator(ig) + + # sigma, tau + sigma = 1.0 + tau = 1.0 + + pdhg = PDHG(f=f, g=g, operator=operator, sigma = sigma, tau=tau, + max_iteration=5, gamma_fconj=0.5) + pdhg.run(1, verbose=0) + self.assertEquals(pdhg.theta, 1.0/ np.sqrt(1 + 2 * pdhg.gamma_fconj * sigma)) + self.assertEquals(pdhg.tau, tau / pdhg.theta) + self.assertEquals(pdhg.sigma, sigma * pdhg.theta) + pdhg.run(4, verbose=0) self.assertNotEqual(pdhg.sigma, sigma) self.assertNotEqual(pdhg.tau, tau) + def test_PDHG_strongly_convex_both_fconj_or_g(self): + + ig = ImageGeometry(3,3) + data = ig.allocate('random') + + f = L2NormSquared(b=data) + g = L2NormSquared() + operator = IdentityOperator(ig) + try: - pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10, gamma_g = 0.5, gamma_fconj=0.5) + pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10, + gamma_g = 0.5, gamma_fconj=0.5) pdhg.run(verbose=0) except NotImplementedError as err: print(err) From d90c455e9ea02fe4b408151f8c26e125872ec5aa Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 9 Nov 2021 14:46:53 +0000 Subject: [PATCH 44/88] remove init docs --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index faddf48cc5..5cd7d85134 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -147,8 +147,7 @@ class PDHG(Algorithm): """ def __init__(self, f, g, operator, tau=None, sigma=1.,initial=None, use_axpby=True, **kwargs): - """ - Constructor method + """Constructor method """ From bca20719025ce50ef23ec979129f69993ada59f8 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 9 Nov 2021 14:48:18 +0000 Subject: [PATCH 45/88] remove init docs --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 5cd7d85134..f253e4f524 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -149,8 +149,6 @@ class PDHG(Algorithm): def __init__(self, f, g, operator, tau=None, sigma=1.,initial=None, use_axpby=True, **kwargs): """Constructor method """ - - super(PDHG, self).__init__(**kwargs) if kwargs.get('x_init', None) is not None: if initial is None: From 0026f2792b8c0c680031ad80e539028c84d12f63 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 9 Nov 2021 14:52:43 +0000 Subject: [PATCH 46/88] update docs --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index f253e4f524..e203d5f842 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -231,10 +231,7 @@ def get_output(self): def update(self): - r""" - - Performs a single iteration of the PDHG algorithm - + r""" Performs a single iteration of the PDHG algorithm .. math:: From 328c97bb224b862b632db05985040b10e616e529 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 9 Nov 2021 21:20:44 +0000 Subject: [PATCH 47/88] update PDHG docs --- .../cil/optimisation/algorithms/PDHG.py | 101 ++++++++++++------ docs/source/conf.py | 2 +- docs/source/optimisation.rst | 2 +- 3 files changed, 69 insertions(+), 36 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index e203d5f842..de29fa67fb 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -32,17 +32,17 @@ class PDHG(Algorithm): A convex function with a "simple" proximal. operator : LinearOperator A Linear Operator. - sigma : :obj:`float`, optional, default=1 + sigma : positive :obj:`float`, optional, default=None Step size for the dual problem. - tau : :obj:`float`, optional, default=None + tau : positive :obj:`float`, optional, default=None Step size for the primal problem. initial : DataContainer, optional, default=None Initial point for the PDHG algorithm. use_axbpy: :obj:`bool`, optional, default=True Computes a*x + b*y in C. - gamma_g : :obj:`float`, optional, default=None + gamma_g : positive :obj:`float`, optional, default=None Strongly convex constant if the function g is strongly convex. Allows primal acceleration of the PDHG algorithm. - gamma_fconj : :obj:`float`, optional, default=None + gamma_fconj : positive :obj:`float`, optional, default=None Strongly convex constant if the convex conjugate of f is strongly convex. Allows dual acceleration of the PDHG algorithm. @@ -115,7 +115,7 @@ class PDHG(Algorithm): .. math:: - \tau \sigma \|L\|^2 < 1 + \tau \sigma \|K\|^2 < 1 - By default, the step sizes :math:`\sigma` and :math:`\tau` are: @@ -123,19 +123,15 @@ class PDHG(Algorithm): \sigma = \frac{1}{\|K\|}, \tau = \frac{1}{\|K\|} - PDHG algorithm can be accelerated if the functions :math:`f^{*}` and/or :math:`g` are strongly convex. A function :math:`f` is strongly convex with constant :math:`\gamma>0` if + - PDHG algorithm can be accelerated if the functions :math:`f^{*}` and/or :math:`g` are strongly convex. A function :math:`f` is strongly convex with constant :math:`\gamma>0` if .. math:: - f(x) - \frac{\gamma}{2}\|x\|^{2} - - is convex. + f(x) - \frac{\gamma}{2}\|x\|^{2} \quad\mbox{ is convex. } - For instance the :math:`\frac{1}{2}\|x\|^{2}_{2}` is :math:`\gamma` strongly convex\ - for :math:`\gamma\in(-\infty,1]`. We say it is 1-strongly convex because it is the largest constant for which \ - :math:`f - \frac{1}{2}\|\cdot\|^{2}` is convex. + - For instance the function :math:`\frac{1}{2}\|x\|^{2}_{2}` is :math:`\gamma` strongly convex for :math:`\gamma\in(-\infty,1]`. We say it is 1-strongly convex because it is the largest constant for which :math:`f - \frac{1}{2}\|\cdot\|^{2}` is convex. - The :math:`\|\cdot\|_{1}` norm is not strongly convex. For more information, see `Strongly Convex `_. + - The :math:`\|\cdot\|_{1}` norm is not strongly convex. For more information, see `Strongly Convex `_. References @@ -146,7 +142,7 @@ class PDHG(Algorithm): """ - def __init__(self, f, g, operator, tau=None, sigma=1.,initial=None, use_axpby=True, **kwargs): + def __init__(self, f, g, operator, tau=None, sigma=None,initial=None, use_axpby=True, **kwargs): """Constructor method """ super(PDHG, self).__init__(**kwargs) @@ -163,22 +159,22 @@ def __init__(self, f, g, operator, tau=None, sigma=1.,initial=None, use_axpby=Tr if f is not None and operator is not None and g is not None: self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma, initial=initial, **kwargs) - def set_up(self, f, g, operator, tau=None, sigma=1., initial=None, **kwargs): - """initialisation of the algorithm + def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): + """Initialisation of the algorithm """ print("{} setting up".format(self.__class__.__name__, )) - # can't happen with default sigma - if sigma is None and tau is None: - raise ValueError('Need sigma*tau||K||^2<1') - # algorithmic parameters + # Triplet (f, g, K) self.f = f self.g = g self.operator = operator - normK = self.operator.norm() - self.tau = 1./normK - self.sigma = 1./normK + # step sizes + self.sigma = sigma + self.tau = tau + + # Default values for the pdhg stepsizes + self.pdhg_step_sizes() if initial is None: self.x_old = self.operator.domain_geometry().allocate(0) @@ -198,7 +194,7 @@ def set_up(self, f, g, operator, tau=None, sigma=1., initial=None, **kwargs): # Dual Acceleration : Convex conjugate of f is strongly convex self.gamma_fconj = kwargs.get('gamma_fconj', None) - + try: self.gamma_g = self.g.gamma except AttributeError: @@ -209,13 +205,12 @@ def set_up(self, f, g, operator, tau=None, sigma=1., initial=None, **kwargs): except AttributeError: pass - if self.gamma_g is not None: - warnings.warn("Primal Acceleration of PDHG: The function g is assumed to be strongly convex with parameter `gamma_g`. Need to be sure that gamma_g = {} is the correct strongly convex constant for g. ".format(self.gamma_g)) + if self.gamma_g is not None: + warnings.warn("Primal Acceleration of PDHG: The function g is assumed to be strongly convex with positive parameter `gamma_g`. Need to be sure that gamma_g = {} is the correct strongly convex constant for g. ".format(self.gamma_g)) if self.gamma_fconj is not None: - warnings.warn("Dual Acceleration of PDHG: The convex conjugate of function f is assumed to be strongly convex with parameter `gamma_fconj`. Need to be sure that gamma_fconj = {} is the correct strongly convex constant".format(self.gamma_fconj)) + warnings.warn("Dual Acceleration of PDHG: The convex conjugate of function f is assumed to be strongly convex with positive parameter `gamma_fconj`. Need to be sure that gamma_fconj = {} is the correct strongly convex constant".format(self.gamma_fconj)) - self.configured = True print("{} configured".format(self.__class__.__name__, )) @@ -239,7 +234,7 @@ def update(self): .. math:: - x^{n+1} = \mathrm{prox}_{\tau g}(x^{n} + \tau K^{*}y^{n+1}) + x^{n+1} = \mathrm{prox}_{\tau g}(x^{n} - \tau K^{*}y^{n+1}) .. math:: @@ -287,6 +282,44 @@ def update(self): # update the step sizes for special cases self.update_step_sizes() + def pdhg_step_sizes(self): + + r"""Default step sizes for the PDHG algorithm + + * If ``sigma`` is ``None`` and ``tau`` is ``None``: + + .. math:: + + \sigma = \frac{1}{\|K\|}, \tau = \frac{1}{\|K\|} + + * If ``tau`` is ``None``: + + .. math:: + + \tau = \frac{1}{\sigma\|K\|^{2}} + + * If ``sigma`` is ``None``: + + .. math:: + + \sigma = \frac{1}{\tau\|K\|^{2}} + + + """ + + # Compute operator norm + self.norm_op = self.operator.norm() + + if self.tau is None and self.sigma is None: + self.sigma = 1./self.norm_op + self.tau = 1./self.norm_op + elif self.tau is None: + self.tau = 1./self.sigma*self.norm_op**2 + elif self.sigma is None: + self.sigma = 1./self.tau*self.norm_op**2 + else: + pass + def update_step_sizes(self): @@ -325,9 +358,9 @@ def update_objective(self): """ - Evaluate the primal objective, the dual objective and the primal-dual gap + Evaluates the primal objective, the dual objective and the primal-dual gap. - Primal objective + Primal objective: .. math:: @@ -339,7 +372,7 @@ def update_objective(self): - g^{*}(-K^{*}y) - f^{*}(y) - Primal-Dual gap (or Duality gap) + Primal-Dual gap (or Duality gap): .. math:: @@ -348,8 +381,8 @@ def update_objective(self): Note ---- - - The primal objective is printed if `verbose=1`. - - All the objective are printed if `verbose=2`. + - The primal objective is printed if `verbose=1`, ``pdhg.run(verbose=1)``. + - All the objective are printed if `verbose=2`, ``pdhg.run(verbose=2)``. Computing these objective can be costly, so it is better to compute every some iterations. To do this, use ``update_objective_interval = #number``. diff --git a/docs/source/conf.py b/docs/source/conf.py index f69dc6760e..00ded75c8d 100755 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -48,7 +48,7 @@ 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', - 'sphinxcontrib.bibtex', + 'sphinxcontrib.bibtex' ] # Add any paths that contain templates here, relative to this directory. diff --git a/docs/source/optimisation.rst b/docs/source/optimisation.rst index a15b6d7ad3..8a65106316 100644 --- a/docs/source/optimisation.rst +++ b/docs/source/optimisation.rst @@ -64,7 +64,7 @@ print to screen of the status of the optimisation. :members: :special-members: .. autoclass:: cil.optimisation.algorithms.PDHG - :members: update, update_objective, update_step_sizes + :members: update, pdhg_step_sizes, update_step_sizes, update_objective .. autoclass:: cil.optimisation.algorithms.LADMM :members: .. autoclass:: cil.optimisation.algorithms.SPDHG From 37609907b391652b708f14d5e0e312de530e6602 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Wed, 10 Nov 2021 08:50:03 +0000 Subject: [PATCH 48/88] remove method, is called from base class --- .../cil/optimisation/algorithms/PDHG.py | 32 ++++++++++--------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index de29fa67fb..8edfbf857e 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -77,6 +77,16 @@ class PDHG(Algorithm): For a TV tomography reconstruction example, see `CIL-Demos `_. More examples can be found in :cite:`Jorgensen_et_al_2021`, :cite:`Papoutsellis_et_al_2021`. + Note + ---- + + Currently, the strongly convex constants are passed as parameters of PDHG. + In the future, these parameters will be properties of the corresponding functions. + + + + Notes + ----- A first-order primal-dual algorithm for convex optimization problems with known saddle-point structure with applications in imaging. @@ -107,7 +117,6 @@ class PDHG(Algorithm): * gradient descent step for the primal problem and * an over-relaxation of the primal variable. - Notes ----- @@ -133,7 +142,6 @@ class PDHG(Algorithm): - The :math:`\|\cdot\|_{1}` norm is not strongly convex. For more information, see `Strongly Convex `_. - References ---------- @@ -214,12 +222,6 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): self.configured = True print("{} configured".format(self.__class__.__name__, )) - def update_previous_solution(self): - # swap the pointers to current and previous solution - tmp = self.x_old - self.x_old = self.x - self.x = tmp - def get_output(self): # returns the current solution return self.x_old @@ -333,9 +335,13 @@ def update_step_sizes(self): Note ---- - The case where both functions are strongly convex is not available at the moment. - + The case where both functions are strongly convex is not available at the moment. + + + .. todo:: Implement acceleration of PDHG when both functions are strongly convex. + """ + # Update sigma and tau based on the strong convexity of G if self.gamma_g is not None: @@ -390,11 +396,7 @@ def update_objective(self): ---- The primal-dual gap (or duality gap) measures how close is the primal-dual pair (x,y) to the primal-dual solution. \ - It is always non-negative and is used to monitor convergence of the PDHG algorithm. - - - - For more information, see `Duality Gap `_. + It is always non-negative and is used to monitor convergence of the PDHG algorithm. For more information, see `Duality Gap `_. """ From c2400c531b9567f1ffe1259387a6cc761d9b8c84 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 11 Nov 2021 06:47:43 +0000 Subject: [PATCH 49/88] improve docs --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 11 ++++++++++- docs/source/optimisation.rst | 4 ++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 8edfbf857e..13978064ee 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -45,7 +45,6 @@ class PDHG(Algorithm): gamma_fconj : positive :obj:`float`, optional, default=None Strongly convex constant if the convex conjugate of f is strongly convex. Allows dual acceleration of the PDHG algorithm. - **kwargs: Keyward arguments used from the base class :class:`Algorithm`. @@ -54,6 +53,13 @@ class PDHG(Algorithm): update_objective_interval : :obj:`int`, optional, default=1 Evaluates objectives, e.g., primal/dual/primal-dual gap every ``update_objective_interval``. + + + + :param f1: A convex function with a "simple" proximal method of its conjugate. + :type f1: Function + + Example ------- Total variation denoising with with PDHG. @@ -182,6 +188,7 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): self.tau = tau # Default values for the pdhg stepsizes + # self.set_step_sizes(sigma, tau) self.pdhg_step_sizes() if initial is None: @@ -427,3 +434,5 @@ def dual_objective(self): def primal_dual_gap(self): return [x[2] for x in self.loss] + + diff --git a/docs/source/optimisation.rst b/docs/source/optimisation.rst index 8a65106316..95e85aa1d5 100644 --- a/docs/source/optimisation.rst +++ b/docs/source/optimisation.rst @@ -22,8 +22,7 @@ Gradient Descent (GD), Conjugate Gradient Least Squares (CGLS), Simultaneous Iterative Reconstruction Technique (SIRT), Primal Dual Hybrid Gradient (PDHG) and Fast Iterative Shrinkage Thresholding Algorithm (FISTA). -An algorithm is designed for a -particular generic optimisation problem accepts and number of +An algorithm is designed for a particular generic optimisation problem accepts and number of :code:`Function`s and/or :code:`Operator`s as input to define a specific instance of the generic optimisation problem to be solved. They are iterable objects which can be run in a for loop. @@ -65,6 +64,7 @@ print to screen of the status of the optimisation. :special-members: .. autoclass:: cil.optimisation.algorithms.PDHG :members: update, pdhg_step_sizes, update_step_sizes, update_objective + :member-order: bysource .. autoclass:: cil.optimisation.algorithms.LADMM :members: .. autoclass:: cil.optimisation.algorithms.SPDHG From c5568bf3757b66822a1773d2d3dfeff590d45339 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 12 Nov 2021 13:13:26 +0000 Subject: [PATCH 50/88] stepsizes as properties --- .../cil/optimisation/algorithms/PDHG.py | 33 ++++++++++--------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 13978064ee..a364e165fe 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -169,10 +169,19 @@ def __init__(self, f, g, operator, tau=None, sigma=None,initial=None, use_axpby= raise ValueError('{} received both initial and the deprecated x_init parameter. It is not clear which one we should use.'\ .format(self.__class__.__name__)) self._use_axpby = use_axpby + self._tau = None + self._sigma = None if f is not None and operator is not None and g is not None: self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma, initial=initial, **kwargs) + @property + def tau(self): + return self._tau + @property + def sigma(self): + return self._sigma + def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): """Initialisation of the algorithm """ @@ -183,13 +192,8 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): self.g = g self.operator = operator - # step sizes - self.sigma = sigma - self.tau = tau - # Default values for the pdhg stepsizes - # self.set_step_sizes(sigma, tau) - self.pdhg_step_sizes() + self.set_step_sizes(sigma, tau) if initial is None: self.x_old = self.operator.domain_geometry().allocate(0) @@ -291,7 +295,7 @@ def update(self): # update the step sizes for special cases self.update_step_sizes() - def pdhg_step_sizes(self): + def set_step_sizes(self, sigma=None, tau=None): r"""Default step sizes for the PDHG algorithm @@ -319,13 +323,13 @@ def pdhg_step_sizes(self): # Compute operator norm self.norm_op = self.operator.norm() - if self.tau is None and self.sigma is None: - self.sigma = 1./self.norm_op - self.tau = 1./self.norm_op - elif self.tau is None: - self.tau = 1./self.sigma*self.norm_op**2 - elif self.sigma is None: - self.sigma = 1./self.tau*self.norm_op**2 + if tau is None and sigma is None: + self._sigma = 1./self.norm_op + self._tau = 1./self.norm_op + elif tau is None: + self._tau = 1./(sigma*self.norm_op**2) + elif sigma is None: + self._sigma = 1./(tau*self.norm_op**2) else: pass @@ -435,4 +439,3 @@ def primal_dual_gap(self): return [x[2] for x in self.loss] - From 25a803a91c8ca2dc155c155bf3a83bb249bc2fbd Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 12 Nov 2021 14:02:53 +0000 Subject: [PATCH 51/88] improve dovs --- .../cil/optimisation/algorithms/PDHG.py | 50 +++++++------------ 1 file changed, 18 insertions(+), 32 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index a364e165fe..26363eafd1 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -54,12 +54,6 @@ class PDHG(Algorithm): Evaluates objectives, e.g., primal/dual/primal-dual gap every ``update_objective_interval``. - - - :param f1: A convex function with a "simple" proximal method of its conjugate. - :type f1: Function - - Example ------- Total variation denoising with with PDHG. @@ -89,7 +83,6 @@ class PDHG(Algorithm): Currently, the strongly convex constants are passed as parameters of PDHG. In the future, these parameters will be properties of the corresponding functions. - Notes ----- @@ -134,10 +127,25 @@ class PDHG(Algorithm): - By default, the step sizes :math:`\sigma` and :math:`\tau` are: - .. math:: + * If ``sigma`` is ``None`` and ``tau`` is ``None``: + .. math:: + \sigma = \frac{1}{\|K\|}, \tau = \frac{1}{\|K\|} + * If ``tau`` is ``None``: + + .. math:: + + \tau = \frac{1}{\sigma\|K\|^{2}} + + * If ``sigma`` is ``None``: + + .. math:: + + \sigma = \frac{1}{\tau\|K\|^{2}} + + - PDHG algorithm can be accelerated if the functions :math:`f^{*}` and/or :math:`g` are strongly convex. A function :math:`f` is strongly convex with constant :math:`\gamma>0` if .. math:: @@ -256,7 +264,6 @@ def update(self): In the case of primal/dual acceleration using the strongly convexity property of function :math:`f^{*}` or :math:`g` \ the stepsize :math:`\sigma` and :math:`\tau` are updated using the :meth:`update_step_sizes` method. - """ #calculate x-bar and store in self.x_tmp @@ -297,29 +304,8 @@ def update(self): def set_step_sizes(self, sigma=None, tau=None): - r"""Default step sizes for the PDHG algorithm - - * If ``sigma`` is ``None`` and ``tau`` is ``None``: - - .. math:: - - \sigma = \frac{1}{\|K\|}, \tau = \frac{1}{\|K\|} - - * If ``tau`` is ``None``: - - .. math:: - - \tau = \frac{1}{\sigma\|K\|^{2}} - - * If ``sigma`` is ``None``: - - .. math:: - - \sigma = \frac{1}{\tau\|K\|^{2}} - - - """ - + """Default step sizes for the PDHG algorithm""" + # Compute operator norm self.norm_op = self.operator.norm() From a71abb01ffe69b25a4574ac15402d2138bed85bf Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 08:16:47 +0000 Subject: [PATCH 52/88] add properties --- .../cil/optimisation/algorithms/PDHG.py | 106 +++++++++++++++--- 1 file changed, 92 insertions(+), 14 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 26363eafd1..82ac1bae4f 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -18,6 +18,7 @@ from cil.optimisation.algorithms import Algorithm import warnings import numpy as np +from numpy.core.numeric import identity class PDHG(Algorithm): @@ -179,16 +180,17 @@ def __init__(self, f, g, operator, tau=None, sigma=None,initial=None, use_axpby= self._use_axpby = use_axpby self._tau = None self._sigma = None + # Default values for the pdhg stepsizes if f is not None and operator is not None and g is not None: self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma, initial=initial, **kwargs) @property def tau(self): - return self._tau + return self._tau @property def sigma(self): - return self._sigma + return self._sigma def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): """Initialisation of the algorithm @@ -200,9 +202,9 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): self.g = g self.operator = operator - # Default values for the pdhg stepsizes - self.set_step_sizes(sigma, tau) - + #Default step sizes + self.set_step_sizes(sigma=sigma, tau=tau) + if initial is None: self.x_old = self.operator.domain_geometry().allocate(0) else: @@ -313,13 +315,13 @@ def set_step_sizes(self, sigma=None, tau=None): self._sigma = 1./self.norm_op self._tau = 1./self.norm_op elif tau is None: - self._tau = 1./(sigma*self.norm_op**2) + self._tau = 1./(self.sigma*self.norm_op**2) elif sigma is None: - self._sigma = 1./(tau*self.norm_op**2) + self._sigma = 1./(self.tau*self.norm_op**2) else: - pass + self._sigma = sigma + self._tau = tau - def update_step_sizes(self): r""" @@ -339,19 +341,18 @@ def update_step_sizes(self): """ - # Update sigma and tau based on the strong convexity of G if self.gamma_g is not None: self.theta = 1.0/ np.sqrt(1 + 2 * self.gamma_g * self.tau) - self.tau *= self.theta - self.sigma /= self.theta + self._tau *= self.theta + self._sigma /= self.theta # Update sigma and tau based on the strong convexity of F # Following operations are reversed due to symmetry, sigma --> tau, tau -->sigma if self.gamma_fconj is not None: self.theta = 1.0 / np.sqrt(1 + 2 * self.gamma_fconj * self.sigma) - self.sigma *= self.theta - self.tau /= self.theta + self._sigma *= self.theta + self._tau /= self.theta if self.gamma_g is not None and self.gamma_fconj is not None: raise NotImplementedError("This case is not implemented") @@ -425,3 +426,80 @@ def primal_dual_gap(self): return [x[2] for x in self.loss] + + + +if __name__ == "__main__": + + from cil.optimisation.functions import L2NormSquared, MixedL21Norm, L1Norm, KullbackLeibler + from cil.optimisation.operators import IdentityOperator + from cil.framework import ImageGeometry + from cil.utilities import dataexample + from cil.utilities import noise as applynoise + from cil.optimisation.operators import GradientOperator + + data = dataexample.PEPPERS.get(size=(256,256)) + ig = data.geometry + ag = ig + + which_noise = 0 + # Create noisy data. + noises = ['gaussian', 'poisson', 's&p'] + dnoise = noises[which_noise] + + def setup(data, dnoise): + if dnoise == 's&p': + n1 = applynoise.saltnpepper(data, salt_vs_pepper = 0.9, amount=0.2, seed=10) + elif dnoise == 'poisson': + scale = 5 + n1 = applynoise.poisson( data.as_array()/scale, seed = 10)*scale + elif dnoise == 'gaussian': + n1 = applynoise.gaussian(data.as_array(), seed = 10) + else: + raise ValueError('Unsupported Noise ', noise) + noisy_data = ig.allocate() + noisy_data.fill(n1) + + # Regularisation Parameter depending on the noise distribution + if dnoise == 's&p': + alpha = 0.8 + elif dnoise == 'poisson': + alpha = 1 + elif dnoise == 'gaussian': + alpha = .3 + # fidelity + if dnoise == 's&p': + g = L1Norm(b=noisy_data) + elif dnoise == 'poisson': + g = KullbackLeibler(b=noisy_data) + elif dnoise == 'gaussian': + g = 0.5 * L2NormSquared(b=noisy_data) + return noisy_data, alpha, g + + noisy_data, alpha, g = setup(data, dnoise) + operator = GradientOperator(ig, correlation=GradientOperator.CORRELATION_SPACE) + + f1 = alpha * MixedL21Norm() + + + + # Compute operator Norm + normK = operator.norm() + + # Primal & dual stepsizes + sigma = 1. + tau = 1/(sigma*normK**2) + + # Setup and run the PDHG algorithm + pdhg1 = PDHG(f=f1,g=g,operator=operator, sigma=sigma, tau=tau) + pdhg1.max_iteration = 2000 + pdhg1.update_objective_interval = 200 + pdhg1.run(1000, verbose=0) + print(pdhg1.sigma) + print(pdhg1.tau) + + rmse = (pdhg1.get_output() - data).norm() / data.as_array().size + print(rmse) + # if debug_print: + # print ("RMSE", rmse) + # self.assertLess(rmse, 2e-4) \ No newline at end of file From 8ba4fbc2317a4afaa5c53735a73dffd8f85f23bf Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 09:28:12 +0000 Subject: [PATCH 53/88] add properties test failed --- .../cil/optimisation/algorithms/PDHG.py | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 82ac1bae4f..7bb1576ed7 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -179,8 +179,7 @@ def __init__(self, f, g, operator, tau=None, sigma=None,initial=None, use_axpby= .format(self.__class__.__name__)) self._use_axpby = use_axpby self._tau = None - self._sigma = None - # Default values for the pdhg stepsizes + self._sigma = None if f is not None and operator is not None and g is not None: self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma, initial=initial, **kwargs) @@ -223,17 +222,7 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): # Dual Acceleration : Convex conjugate of f is strongly convex self.gamma_fconj = kwargs.get('gamma_fconj', None) - - try: - self.gamma_g = self.g.gamma - except AttributeError: - pass - - try: - self.gamma_fconj = self.f.conjugate.gamma - except AttributeError: - pass - + if self.gamma_g is not None: warnings.warn("Primal Acceleration of PDHG: The function g is assumed to be strongly convex with positive parameter `gamma_g`. Need to be sure that gamma_g = {} is the correct strongly convex constant for g. ".format(self.gamma_g)) @@ -304,6 +293,7 @@ def update(self): # update the step sizes for special cases self.update_step_sizes() + def set_step_sizes(self, sigma=None, tau=None): """Default step sizes for the PDHG algorithm""" @@ -344,15 +334,15 @@ def update_step_sizes(self): # Update sigma and tau based on the strong convexity of G if self.gamma_g is not None: self.theta = 1.0/ np.sqrt(1 + 2 * self.gamma_g * self.tau) - self._tau *= self.theta - self._sigma /= self.theta + self.tau *= self.theta + self.sigma /= self.theta # Update sigma and tau based on the strong convexity of F # Following operations are reversed due to symmetry, sigma --> tau, tau -->sigma if self.gamma_fconj is not None: self.theta = 1.0 / np.sqrt(1 + 2 * self.gamma_fconj * self.sigma) - self._sigma *= self.theta - self._tau /= self.theta + self.sigma *= self.theta + self.tau /= self.theta if self.gamma_g is not None and self.gamma_fconj is not None: raise NotImplementedError("This case is not implemented") From 61f789893dacad259e0409312bc7ccf35e305218 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 10:09:42 +0000 Subject: [PATCH 54/88] add back update_prev_sol --- .../Python/cil/optimisation/algorithms/PDHG.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 7bb1576ed7..fc10243f7b 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -232,6 +232,12 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): self.configured = True print("{} configured".format(self.__class__.__name__, )) + def update_previous_solution(self): + # swap the pointers to current and previous solution + tmp = self.x_old + self.x_old = self.x + self.x = tmp + def get_output(self): # returns the current solution return self.x_old @@ -287,7 +293,7 @@ def update(self): self.g.proximal(self.x_tmp, self.tau, out=self.x) - #update_previous_solution() called after update by base class + # update_previous_solution() #called after update by base class #i.e current solution is now in x_old, previous solution is now in x # update the step sizes for special cases @@ -334,15 +340,15 @@ def update_step_sizes(self): # Update sigma and tau based on the strong convexity of G if self.gamma_g is not None: self.theta = 1.0/ np.sqrt(1 + 2 * self.gamma_g * self.tau) - self.tau *= self.theta - self.sigma /= self.theta + self._tau *= self.theta + self._sigma /= self.theta # Update sigma and tau based on the strong convexity of F # Following operations are reversed due to symmetry, sigma --> tau, tau -->sigma if self.gamma_fconj is not None: self.theta = 1.0 / np.sqrt(1 + 2 * self.gamma_fconj * self.sigma) - self.sigma *= self.theta - self.tau /= self.theta + self._sigma *= self.theta + self._tau /= self.theta if self.gamma_g is not None and self.gamma_fconj is not None: raise NotImplementedError("This case is not implemented") @@ -485,8 +491,6 @@ def setup(data, dnoise): pdhg1.max_iteration = 2000 pdhg1.update_objective_interval = 200 pdhg1.run(1000, verbose=0) - print(pdhg1.sigma) - print(pdhg1.tau) rmse = (pdhg1.get_output() - data).norm() / data.as_array().size print(rmse) From 16e990600b90bcc77a5327a2ddcf38076c11bd64 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 10:14:10 +0000 Subject: [PATCH 55/88] remove main tests --- .../cil/optimisation/algorithms/PDHG.py | 79 +------------------ 1 file changed, 1 insertion(+), 78 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index fc10243f7b..271c7bec50 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -419,81 +419,4 @@ def dual_objective(self): @property def primal_dual_gap(self): - return [x[2] for x in self.loss] - - - - - -if __name__ == "__main__": - - from cil.optimisation.functions import L2NormSquared, MixedL21Norm, L1Norm, KullbackLeibler - from cil.optimisation.operators import IdentityOperator - from cil.framework import ImageGeometry - from cil.utilities import dataexample - from cil.utilities import noise as applynoise - from cil.optimisation.operators import GradientOperator - - data = dataexample.PEPPERS.get(size=(256,256)) - ig = data.geometry - ag = ig - - which_noise = 0 - # Create noisy data. - noises = ['gaussian', 'poisson', 's&p'] - dnoise = noises[which_noise] - - def setup(data, dnoise): - if dnoise == 's&p': - n1 = applynoise.saltnpepper(data, salt_vs_pepper = 0.9, amount=0.2, seed=10) - elif dnoise == 'poisson': - scale = 5 - n1 = applynoise.poisson( data.as_array()/scale, seed = 10)*scale - elif dnoise == 'gaussian': - n1 = applynoise.gaussian(data.as_array(), seed = 10) - else: - raise ValueError('Unsupported Noise ', noise) - noisy_data = ig.allocate() - noisy_data.fill(n1) - - # Regularisation Parameter depending on the noise distribution - if dnoise == 's&p': - alpha = 0.8 - elif dnoise == 'poisson': - alpha = 1 - elif dnoise == 'gaussian': - alpha = .3 - # fidelity - if dnoise == 's&p': - g = L1Norm(b=noisy_data) - elif dnoise == 'poisson': - g = KullbackLeibler(b=noisy_data) - elif dnoise == 'gaussian': - g = 0.5 * L2NormSquared(b=noisy_data) - return noisy_data, alpha, g - - noisy_data, alpha, g = setup(data, dnoise) - operator = GradientOperator(ig, correlation=GradientOperator.CORRELATION_SPACE) - - f1 = alpha * MixedL21Norm() - - - - # Compute operator Norm - normK = operator.norm() - - # Primal & dual stepsizes - sigma = 1. - tau = 1/(sigma*normK**2) - - # Setup and run the PDHG algorithm - pdhg1 = PDHG(f=f1,g=g,operator=operator, sigma=sigma, tau=tau) - pdhg1.max_iteration = 2000 - pdhg1.update_objective_interval = 200 - pdhg1.run(1000, verbose=0) - - rmse = (pdhg1.get_output() - data).norm() / data.as_array().size - print(rmse) - # if debug_print: - # print ("RMSE", rmse) - # self.assertLess(rmse, 2e-4) \ No newline at end of file + return [x[2] for x in self.loss] \ No newline at end of file From 085e28da51bf767955085668a753e114a556ea04 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 10:42:10 +0000 Subject: [PATCH 56/88] merge master --- docs/source/conf.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 61f406edb3..b9c0022f2c 100755 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -192,10 +192,8 @@ # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True - +# Add bibtex files and style bibtex_bibfiles = ['refs.bib'] bibtex_encoding = 'latin' bibtex_reference_style = 'label' -bibtex_default_style = 'plain' - - +bibtex_default_style = 'plain' \ No newline at end of file From 051a6fff09b9c7efdf55b0b3ed6846eee458e2dc Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 10:44:28 +0000 Subject: [PATCH 57/88] merge master --- docs/source/conf.py | 399 ++++++++++++++++++++++---------------------- 1 file changed, 199 insertions(+), 200 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 879c6e4c3e..b9c0022f2c 100755 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,200 +1,199 @@ -# -*- coding: utf-8 -*- -# -# Configuration file for the Sphinx documentation builder. -# -# This file does only contain a selection of the most common options. For a -# full list see the documentation: -# http://www.sphinx-doc.org/en/master/config - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys -import re - -sys.path.insert(0, os.path.abspath('../../Wrappers/Python/')) - -from cil import version - -# -- Project information ----------------------------------------------------- - -project = 'CIL' - -copyright = '2017-2021 UKRI-STFC, University of Manchester' - -author = 'Edoardo Pasca' - -version = version.version -release = version - -# -- General configuration --------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.napoleon', - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.mathjax', - 'sphinx.ext.viewcode', - 'sphinxcontrib.bibtex' -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -source_suffix = ['.rst', '.md'] -#source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = 'en' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = None - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'pydata_sphinx_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -html_sidebars = { - '**': ['globaltoc.html', 'sourcelink.html', 'searchbox.html'], - 'using/windows': ['windowssidebar.html', 'searchbox.html'], -} -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['docsstatic'] - -# Custom sidebar templates, must be a dictionary that maps document names -# to template names. -# -# The default sidebars (for documents that don't match any pattern) are -# defined by theme itself. Builtin themes are using these templates by -# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', -# 'searchbox.html']``. -# -# html_sidebars = {} - - -# -- Options for HTMLHelp output --------------------------------------------- - -# Output file base name for HTML help builder. -htmlhelp_basename = 'CILdoc' - - -# -- Options for LaTeX output ------------------------------------------------ - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'CIL.tex', 'CIL Documentation', - 'Edoardo Pasca', 'manual'), -] - - -# -- Options for manual page output ------------------------------------------ - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'cil', 'CIL Documentation', - [author], 1) -] - - -# -- Options for Texinfo output ---------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'CIL', 'CIL Documentation', - author, 'CIL', 'One line description of project.', - 'Miscellaneous'), -] - - -# -- Options for Epub output ------------------------------------------------- - -# Bibliographic Dublin Core info. -epub_title = project - -# The unique identifier of the text. This can be a ISBN number -# or the project homepage. -# -# epub_identifier = '' - -# A unique identification for the text. -# -# epub_uid = '' - -# A list of files that should not be packed into the epub file. -epub_exclude_files = ['search.html'] - - -# -- Extension configuration ------------------------------------------------- - -# -- Options for todo extension ---------------------------------------------- - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - -# Add bibtex files and style -bibtex_bibfiles = ['refs.bib'] -bibtex_encoding = 'latin' -bibtex_reference_style = 'label' -bibtex_default_style = 'plain' - +# -*- coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +import re + +sys.path.insert(0, os.path.abspath('../../Wrappers/Python/')) + +from cil import version + +# -- Project information ----------------------------------------------------- + +project = 'CIL' + +copyright = '2017-2021 UKRI-STFC, University of Manchester' + +author = 'Edoardo Pasca' + +version = version.version +release = version + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.napoleon', + 'sphinx.ext.autodoc', + 'sphinx.ext.doctest', + 'sphinx.ext.todo', + 'sphinx.ext.coverage', + 'sphinx.ext.mathjax', + 'sphinx.ext.viewcode', + 'sphinxcontrib.bibtex' +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = ['.rst', '.md'] +#source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = None + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pydata_sphinx_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_sidebars = { + '**': ['globaltoc.html', 'sourcelink.html', 'searchbox.html'], + 'using/windows': ['windowssidebar.html', 'searchbox.html'], +} +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['docsstatic'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'CILdoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'CIL.tex', 'CIL Documentation', + 'Edoardo Pasca', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'cil', 'CIL Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'CIL', 'CIL Documentation', + author, 'CIL', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + + +# -- Extension configuration ------------------------------------------------- + +# -- Options for todo extension ---------------------------------------------- + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + +# Add bibtex files and style +bibtex_bibfiles = ['refs.bib'] +bibtex_encoding = 'latin' +bibtex_reference_style = 'label' +bibtex_default_style = 'plain' \ No newline at end of file From 2635ca5f3435d1a0b2d99999cbdc2ddb5ad32613 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 10:46:33 +0000 Subject: [PATCH 58/88] merge master --- Wrappers/Python/cil/optimisation/functions/BlockFunction.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/BlockFunction.py b/Wrappers/Python/cil/optimisation/functions/BlockFunction.py index 78f9a02394..8caa7a83e0 100644 --- a/Wrappers/Python/cil/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/cil/optimisation/functions/BlockFunction.py @@ -57,7 +57,7 @@ def L(self): tmp_L = None break return tmp_L - + def __call__(self, x): r""" Returns the value of the BlockFunction :math:`F` @@ -204,4 +204,4 @@ def __rmul__(self, other): - \ No newline at end of file + From 7787418d5e4e2a953f426aef8c51a1f4a937b268 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 10:48:08 +0000 Subject: [PATCH 59/88] merge master --- docs/source/refs.bib | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/refs.bib b/docs/source/refs.bib index af6c1b1b23..8860f2ce75 100644 --- a/docs/source/refs.bib +++ b/docs/source/refs.bib @@ -73,4 +73,4 @@ @article{Jorgensen_et_al_2021 eprint = {https://royalsocietypublishing.org/doi/pdf/10.1098/rsta.2020.0192} , abstract = { We present the Core Imaging Library (CIL), an open-source Python framework for tomographic imaging with particular emphasis on reconstruction of challenging datasets. Conventional filtered back-projection reconstruction tends to be insufficient for highly noisy, incomplete, non-standard or multi-channel data arising for example in dynamic, spectral and in situ tomography. CIL provides an extensive modular optimization framework for prototyping reconstruction methods including sparsity and total variation regularization, as well as tools for loading, preprocessing and visualizing tomographic data. The capabilities of CIL are demonstrated on a synchrotron example dataset and three challenging cases spanning golden-ratio neutron tomography, cone-beam X-ray laminography and positron emission tomography. This article is part of the theme issue β€˜Synergistic tomographic image reconstruction: part 2’. } -} \ No newline at end of file +} From 9f863cefe69ff20309a2686a06a0feccfe21d081 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 11:35:22 +0000 Subject: [PATCH 60/88] improve docs --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 271c7bec50..36ceedba73 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -224,10 +224,10 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): self.gamma_fconj = kwargs.get('gamma_fconj', None) if self.gamma_g is not None: - warnings.warn("Primal Acceleration of PDHG: The function g is assumed to be strongly convex with positive parameter `gamma_g`. Need to be sure that gamma_g = {} is the correct strongly convex constant for g. ".format(self.gamma_g)) + warnings.warn("Primal Acceleration of PDHG: The function g is assumed to be strongly convex with positive parameter `gamma_g`. You need to be sure that gamma_g = {} is the correct strongly convex constant for g. ".format(self.gamma_g)) if self.gamma_fconj is not None: - warnings.warn("Dual Acceleration of PDHG: The convex conjugate of function f is assumed to be strongly convex with positive parameter `gamma_fconj`. Need to be sure that gamma_fconj = {} is the correct strongly convex constant".format(self.gamma_fconj)) + warnings.warn("Dual Acceleration of PDHG: The convex conjugate of function f is assumed to be strongly convex with positive parameter `gamma_fconj`. You need to be sure that gamma_fconj = {} is the correct strongly convex constant".format(self.gamma_fconj)) self.configured = True print("{} configured".format(self.__class__.__name__, )) From 9a9ed187f725fc8aba03c8f3457e98e277f326c7 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 12:07:52 +0000 Subject: [PATCH 61/88] revert Function module --- .../cil/optimisation/functions/Function.py | 60 +++++++++++++------ 1 file changed, 42 insertions(+), 18 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/Function.py b/Wrappers/Python/cil/optimisation/functions/Function.py index a035d98899..e308adb764 100644 --- a/Wrappers/Python/cil/optimisation/functions/Function.py +++ b/Wrappers/Python/cil/optimisation/functions/Function.py @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import warnings from numbers import Number import numpy as np @@ -24,18 +25,17 @@ class Function(object): """ Abstract class representing a function :param L: Lipschitz constant of the gradient of the function F(x), when it is differentiable. - :type L: number, positive, default None + :type L: number, positive, default None + :param domain: The domain of the function. - # TODO Add definition of Lipschitz - # Lipschitz of the gradient of the function; it is a positive real number, such that |f'(x) - f'(y)| <= L ||x-y||, assuming f: IG --> R + Lipschitz of the gradient of the function; it is a positive real number, such that |f'(x) - f'(y)| <= L ||x-y||, assuming f: IG --> R """ def __init__(self, L = None): - - # Lipschitz constant for the gradient of the Function - self._L = L + # overrides the type check to allow None as initial value + self._L = L def __call__(self,x): @@ -100,6 +100,8 @@ def proximal_conjugate(self, x, tau, out = None): if out is None: return val + + # Algebra for Function Class # Add functions @@ -151,10 +153,11 @@ def centered_at(self, center): @property def L(self): - '''Lipschitz constant of the gradient of function f.''' - + '''Lipschitz of the gradient of function f. + + L is positive real number, such that |f'(x) - f'(y)| <= L ||x-y||, assuming f: IG --> R''' return self._L - + # return self._L @L.setter def L(self, value): '''Setter for Lipschitz constant''' @@ -162,7 +165,6 @@ def L(self, value): self._L = value else: raise TypeError('The Lipschitz constant is a real positive number') - class SumFunction(Function): @@ -250,7 +252,7 @@ def L(self): @L.setter def L(self, value): # call base class setter - super(ScaledFunction, self.__class__).L.fset(self, value ) + super(ScaledFunction, self.__class__).L.fset(self, value ) @property def scalar(self): @@ -312,6 +314,29 @@ def proximal(self, x, tau, out=None): return self.function.proximal(x, tau*self.scalar, out=out) + def proximal_conjugate(self, x, tau, out = None): + r"""This returns the proximal operator for the function at x, tau + """ + try: + tmp = x + x.divide(tau, out = tmp) + except TypeError: + tmp = x.divide(tau, dtype=np.float32) + + if out is None: + val = self.function.proximal(tmp, self.scalar/tau ) + else: + self.function.proximal(tmp, self.scalar/tau, out = out) + val = out + + if id(tmp) == id(x): + x.multiply(tau, out = x) + + val.axpby(-tau, 1.0, x, out=val) + + if out is None: + return val + class SumScalarFunction(SumFunction): """ SumScalarFunction represents the sum a function with a scalar. @@ -330,8 +355,7 @@ class SumScalarFunction(SumFunction): def __init__(self, function, constant): - super(SumScalarFunction, self).__init__(function, - ConstantFunction(constant)) + super(SumScalarFunction, self).__init__(function, ConstantFunction(constant)) self.constant = constant self.function = function @@ -471,8 +495,11 @@ class TranslateFunction(Function): """ def __init__(self, function, center): - - super(TranslateFunction, self).__init__(L = function.L) + try: + L = function.L + except NotImplementedError as nie: + L = None + super(TranslateFunction, self).__init__(L = L) self.function = function self.center = center @@ -558,6 +585,3 @@ def convex_conjugate(self, x): """ return self.function.convex_conjugate(x) + self.center.dot(x) - - - From 83319751cf1d9cf0528064ddb07367749f5bb8a0 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 12:08:43 +0000 Subject: [PATCH 62/88] remove import --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 36ceedba73..d26e11dc65 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -18,7 +18,6 @@ from cil.optimisation.algorithms import Algorithm import warnings import numpy as np -from numpy.core.numeric import identity class PDHG(Algorithm): From 429c72ac3a8b7dedccdfee3051af08b181e7cc88 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 12:10:09 +0000 Subject: [PATCH 63/88] remove space --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index d26e11dc65..535cdfdab0 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -235,7 +235,7 @@ def update_previous_solution(self): # swap the pointers to current and previous solution tmp = self.x_old self.x_old = self.x - self.x = tmp + self.x = tmp def get_output(self): # returns the current solution From 097087dc90d66ca47db980c42acb9ff9bde3b292 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 12:11:09 +0000 Subject: [PATCH 64/88] fix comment --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 535cdfdab0..56a560fa6b 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -292,7 +292,7 @@ def update(self): self.g.proximal(self.x_tmp, self.tau, out=self.x) - # update_previous_solution() #called after update by base class + # update_previous_solution() called after update by base class #i.e current solution is now in x_old, previous solution is now in x # update the step sizes for special cases From 89fb41e414303c7c0eed37518edc9bf8924b0aa7 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 12:22:46 +0000 Subject: [PATCH 65/88] fix comment --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 56a560fa6b..cd8e816cba 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -336,6 +336,9 @@ def update_step_sizes(self): """ + if self.gamma_g is not None and self.gamma_fconj is not None: + raise NotImplementedError("This case is not implemented.") + # Update sigma and tau based on the strong convexity of G if self.gamma_g is not None: self.theta = 1.0/ np.sqrt(1 + 2 * self.gamma_g * self.tau) @@ -348,11 +351,7 @@ def update_step_sizes(self): self.theta = 1.0 / np.sqrt(1 + 2 * self.gamma_fconj * self.sigma) self._sigma *= self.theta self._tau /= self.theta - - if self.gamma_g is not None and self.gamma_fconj is not None: - raise NotImplementedError("This case is not implemented") - - + def update_objective(self): """ From 9109b7eca622c037f7d7654eac0dff0e36b865c0 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 20:30:23 +0000 Subject: [PATCH 66/88] update PDHG --- .../cil/optimisation/algorithms/PDHG.py | 77 +++++++++++++++---- 1 file changed, 61 insertions(+), 16 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index cd8e816cba..9c49f1510d 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -15,9 +15,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from cil.framework.framework import DataContainer from cil.optimisation.algorithms import Algorithm import warnings import numpy as np +from numbers import Number class PDHG(Algorithm): @@ -175,10 +177,20 @@ def __init__(self, f, g, operator, tau=None, sigma=None,initial=None, use_axpby= initial = kwargs.get('x_init', None) else: raise ValueError('{} received both initial and the deprecated x_init parameter. It is not clear which one we should use.'\ - .format(self.__class__.__name__)) + .format(self.__class__.__name__)) self._use_axpby = use_axpby + self._tau = None self._sigma = None + + # check for gamma_g, gamma_fconj, strongly convex constants + self._gamma_g = None + self._gamma_fconj = None + + self.set_gamma_g(kwargs.get('gamma_g', None)) + self.set_gamma_fconj(kwargs.get('gamma_fconj', None)) + if self.gamma_g is not None and self.gamma_fconj is not None: + raise ValueError("The adaptive update of the PDHG stepsizes in the case where both functions are strongly convex is not implemented at the moment.") if f is not None and operator is not None and g is not None: self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma, initial=initial, **kwargs) @@ -186,10 +198,33 @@ def __init__(self, f, g, operator, tau=None, sigma=None,initial=None, use_axpby= @property def tau(self): return self._tau + @property def sigma(self): - return self._sigma + return self._sigma + @property + def gamma_g(self): + return self._gamma_g + + @property + def gamma_fconj(self): + return self._gamma_fconj + + def set_gamma_g(self, value): + + if isinstance (value, Number): + if value <= 0: + raise ValueError("Strongly convex constant is a positive number, {} is passed for the strongly convex function g.".format(value)) + self._gamma_g = value + + def set_gamma_fconj(self, value): + + if isinstance (value, Number): + if value <= 0: + raise ValueError("Strongly convex constant is positive, {} is passed for the strongly convex conjugate function of f.".format(value)) + self._gamma_fconj = value + def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): """Initialisation of the algorithm """ @@ -217,10 +252,10 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): self.theta = kwargs.get('theta',1.0) # Primal Acceleration: Function g is strongly convex - self.gamma_g = kwargs.get('gamma_g', None) + # self.gamma_g = kwargs.get('gamma_g', None) # Dual Acceleration : Convex conjugate of f is strongly convex - self.gamma_fconj = kwargs.get('gamma_fconj', None) + # self.gamma_fconj = kwargs.get('gamma_fconj', None) if self.gamma_g is not None: warnings.warn("Primal Acceleration of PDHG: The function g is assumed to be strongly convex with positive parameter `gamma_g`. You need to be sure that gamma_g = {} is the correct strongly convex constant for g. ".format(self.gamma_g)) @@ -306,17 +341,28 @@ def set_step_sizes(self, sigma=None, tau=None): # Compute operator norm self.norm_op = self.operator.norm() + # check if tau/sigma are numbers positive + # check if the inequality is satisfied, raise a Warning if not + # check if there are array with the correct shape. + + if isinstance(tau, Number) and tau<=0: + raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(tau)) + + if isinstance(sigma, Number) and sigma<=0: + raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(sigma)) + if tau is None and sigma is None: - self._sigma = 1./self.norm_op - self._tau = 1./self.norm_op - elif tau is None: - self._tau = 1./(self.sigma*self.norm_op**2) - elif sigma is None: - self._sigma = 1./(self.tau*self.norm_op**2) + self._sigma = 1.0/self.norm_op + self._tau = 1.0/self.norm_op + elif tau is None and sigma>0: + self._tau = 1./(sigma*self.norm_op**2) + elif sigma is None and tau>0: + self._sigma = 1./(tau*self.norm_op**2) else: self._sigma = sigma self._tau = tau - + + def update_step_sizes(self): r""" @@ -336,9 +382,6 @@ def update_step_sizes(self): """ - if self.gamma_g is not None and self.gamma_fconj is not None: - raise NotImplementedError("This case is not implemented.") - # Update sigma and tau based on the strong convexity of G if self.gamma_g is not None: self.theta = 1.0/ np.sqrt(1 + 2 * self.gamma_g * self.tau) @@ -347,7 +390,7 @@ def update_step_sizes(self): # Update sigma and tau based on the strong convexity of F # Following operations are reversed due to symmetry, sigma --> tau, tau -->sigma - if self.gamma_fconj is not None: + if self.gamma_fconj is not None: self.theta = 1.0 / np.sqrt(1 + 2 * self.gamma_fconj * self.sigma) self._sigma *= self.theta self._tau /= self.theta @@ -417,4 +460,6 @@ def dual_objective(self): @property def primal_dual_gap(self): - return [x[2] for x in self.loss] \ No newline at end of file + return [x[2] for x in self.loss] + + From 482bebeb1ddec9028ee866a96a1f746e3b0d3d02 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Nov 2021 20:48:58 +0000 Subject: [PATCH 67/88] udpate test --- .../cil/optimisation/algorithms/PDHG.py | 20 +++++-------------- Wrappers/Python/test/test_algorithms.py | 2 +- 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 9c49f1510d..fb327e6107 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from cil.framework.framework import DataContainer +from cil.framework import DataContainer, BlockDataContainer from cil.optimisation.algorithms import Algorithm import warnings import numpy as np @@ -127,7 +127,7 @@ class PDHG(Algorithm): \tau \sigma \|K\|^2 < 1 - - By default, the step sizes :math:`\sigma` and :math:`\tau` are: + - By default, the step sizes :math:`\sigma` and :math:`\tau` are positive scalars and defined as below: * If ``sigma`` is ``None`` and ``tau`` is ``None``: @@ -145,8 +145,8 @@ class PDHG(Algorithm): .. math:: - \sigma = \frac{1}{\tau\|K\|^{2}} - + \sigma = \frac{1}{\tau\|K\|^{2}} + - PDHG algorithm can be accelerated if the functions :math:`f^{*}` and/or :math:`g` are strongly convex. A function :math:`f` is strongly convex with constant :math:`\gamma>0` if @@ -250,12 +250,6 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): # relaxation parameter, default value is 1.0 self.theta = kwargs.get('theta',1.0) - - # Primal Acceleration: Function g is strongly convex - # self.gamma_g = kwargs.get('gamma_g', None) - - # Dual Acceleration : Convex conjugate of f is strongly convex - # self.gamma_fconj = kwargs.get('gamma_fconj', None) if self.gamma_g is not None: warnings.warn("Primal Acceleration of PDHG: The function g is assumed to be strongly convex with positive parameter `gamma_g`. You need to be sure that gamma_g = {} is the correct strongly convex constant for g. ".format(self.gamma_g)) @@ -341,15 +335,11 @@ def set_step_sizes(self, sigma=None, tau=None): # Compute operator norm self.norm_op = self.operator.norm() - # check if tau/sigma are numbers positive - # check if the inequality is satisfied, raise a Warning if not - # check if there are array with the correct shape. - if isinstance(tau, Number) and tau<=0: raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(tau)) if isinstance(sigma, Number) and sigma<=0: - raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(sigma)) + raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(sigma)) if tau is None and sigma is None: self._sigma = 1.0/self.norm_op diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py index 089ba1a518..e09c9a16e5 100755 --- a/Wrappers/Python/test/test_algorithms.py +++ b/Wrappers/Python/test/test_algorithms.py @@ -472,7 +472,7 @@ def test_PDHG_strongly_convex_both_fconj_or_g(self): pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10, gamma_g = 0.5, gamma_fconj=0.5) pdhg.run(verbose=0) - except NotImplementedError as err: + except ValueError as err: print(err) From 71babb8b69876299f9784fe2f88037f4e22dd6cc Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Nov 2021 07:27:33 +0000 Subject: [PATCH 68/88] imporove docs --- .../cil/optimisation/algorithms/PDHG.py | 182 ++++++++++-------- 1 file changed, 98 insertions(+), 84 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index fb327e6107..47fe15a0e8 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -118,45 +118,122 @@ class PDHG(Algorithm): * gradient descent step for the primal problem and * an over-relaxation of the primal variable. + .. math:: + + y^{n+1} = \mathrm{prox}_{\sigma f^{*}}(y^{n} + \sigma K \bar{x}^{n}) + + .. math:: + + x^{n+1} = \mathrm{prox}_{\tau g}(x^{n} - \tau K^{*}y^{n+1}) + + .. math:: + + \bar{x}^{n+1} = x^{n+1} + \theta (x^{n+1} - x^{n}) + Notes ----- - - Convergence is guaranteed if the operator norm :math:`\|K\|`, \the dual step size :math:`\sigma` and the primal step size :math:`\tau`, satisfy the following inequality: + - Convergence is guaranteed if :math:`\theta` = 1.0, the operator norm :math:`\|K\|`, \the dual step size :math:`\sigma` and the primal step size :math:`\tau`, satisfy the following inequality: + + .. math:: + + \tau \sigma \|K\|^2 < 1 + + + - By default, the step sizes :math:`\sigma` and :math:`\tau` are positive scalars and defined as below: + + * If ``sigma`` is ``None`` and ``tau`` is ``None``: - .. math:: + .. math:: + + \sigma = \frac{1}{\|K\|}, \tau = \frac{1}{\|K\|} - \tau \sigma \|K\|^2 < 1 + * If ``tau`` is ``None``: + + .. math:: + + \tau = \frac{1}{\sigma\|K\|^{2}} + + * If ``sigma`` is ``None``: - - By default, the step sizes :math:`\sigma` and :math:`\tau` are positive scalars and defined as below: + .. math:: - * If ``sigma`` is ``None`` and ``tau`` is ``None``: + \sigma = \frac{1}{\tau\|K\|^{2}} - .. math:: + + - To monitor the convergence of the algorithm, we compute the primal/dual objectives and the primal-dual gap in :meth:`update_objective`.\ - \sigma = \frac{1}{\|K\|}, \tau = \frac{1}{\|K\|} + The primal objective is + + .. math:: + + f(Kx) + g(x) - * If ``tau`` is ``None``: + and the dual objective is - .. math:: + .. math:: + + - g^{*}(-K^{*}y) - f^{*}(y) - \tau = \frac{1}{\sigma\|K\|^{2}} + The primal-dual gap (or duality gap) is + + .. math:: - * If ``sigma`` is ``None``: + f(Kx) + g(x) + g^{*}(-K^{*}y) + f^{*}(y) + + and measures how close is the primal-dual pair (x,y) to the primal-dual solution. It is always non-negative and is used to monitor convergence of the PDHG algorithm. \ + For more information, see `Duality Gap `_. + - .. math:: + Note + ---- + + - The primal objective is printed if `verbose=1`, ``pdhg.run(verbose=1)``. + - All the objectives are printed if `verbose=2`, ``pdhg.run(verbose=2)``. + + Computing these objectives can be costly, so it is better to compute every some iterations. To do this, use ``update_objective_interval = #number``. + + + + + - PDHG algorithm can be accelerated if the functions :math:`f^{*}` and/or :math:`g` are strongly convex. In these cases, the step-sizes :math:`\sigma` and :math:`\tau` are updated using the :meth:`update_step_sizes` method. A function :math:`f` is strongly convex with constant :math:`\gamma>0` if + + .. math:: + + f(x) - \frac{\gamma}{2}\|x\|^{2} \quad\mbox{ is convex. } - \sigma = \frac{1}{\tau\|K\|^{2}} - + + * For instance the function :math:`\frac{1}{2}\|x\|^{2}_{2}` is :math:`\gamma` strongly convex for :math:`\gamma\in(-\infty,1]`. We say it is 1-strongly convex because it is the largest constant for which :math:`f - \frac{1}{2}\|\cdot\|^{2}` is convex. + + + * The :math:`\|\cdot\|_{1}` norm is not strongly convex. For more information, see `Strongly Convex `_. + + + * If :math:`g` is strongly convex with constant :math:`\gamma` then the step-sizes :math:`\sigma`, :math:`\tau` and :math:`\theta` are updated as: - - PDHG algorithm can be accelerated if the functions :math:`f^{*}` and/or :math:`g` are strongly convex. A function :math:`f` is strongly convex with constant :math:`\gamma>0` if - .. math:: + .. math:: + :nowrap: - f(x) - \frac{\gamma}{2}\|x\|^{2} \quad\mbox{ is convex. } + + \begin{aligned} + + \theta_{n} & = \frac{1}{\sqrt{1 + 2\gamma\tau_{n}}}\\ + \tau_{n+1} & = \theta_{n}\tau_{n}\\ + \sigma_{n+1} & = \frac{\sigma_{n}}{\theta_{n}} - - For instance the function :math:`\frac{1}{2}\|x\|^{2}_{2}` is :math:`\gamma` strongly convex for :math:`\gamma\in(-\infty,1]`. We say it is 1-strongly convex because it is the largest constant for which :math:`f - \frac{1}{2}\|\cdot\|^{2}` is convex. + \end{aligned} - - The :math:`\|\cdot\|_{1}` norm is not strongly convex. For more information, see `Strongly Convex `_. + * If :math:`f^{*}` is strongly convex, we swap :math:`\sigma` with :math:`\tau`. + + Note + ---- + + The case where both functions are strongly convex is not available at the moment. + + + .. todo:: Implement acceleration of PDHG when both functions are strongly convex. + References ---------- @@ -273,22 +350,6 @@ def get_output(self): def update(self): r""" Performs a single iteration of the PDHG algorithm - - .. math:: - - y^{n+1} = \mathrm{prox}_{\sigma f^{*}}(y^{n} + \sigma K \bar{x}^{n}) - - .. math:: - - x^{n+1} = \mathrm{prox}_{\tau g}(x^{n} - \tau K^{*}y^{n+1}) - - .. math:: - - \bar{x}^{n+1} = x^{n+1} + \theta (x^{n+1} - x^{n}) - - In the case of primal/dual acceleration using the strongly convexity property of function :math:`f^{*}` or :math:`g` \ - the stepsize :math:`\sigma` and :math:`\tau` are updated using the :meth:`update_step_sizes` method. - """ #calculate x-bar and store in self.x_tmp @@ -355,21 +416,7 @@ def set_step_sizes(self, sigma=None, tau=None): def update_step_sizes(self): - r""" - - Updates the step sizes :math:`\sigma` and :math:`\tau` and :math:`\theta` in the cases of primal or dual acceleration using the strongly convexity property. - - - :math:`g` is strongly convex with constant :math:`\gamma=` ``gamma_g``. - - :math:`f^{*}` is strongly convex with constant :math:`\gamma=` ``gamma_fconj``. - - Note - ---- - - The case where both functions are strongly convex is not available at the moment. - - - .. todo:: Implement acceleration of PDHG when both functions are strongly convex. - + r""" Updates step sizes in the cases of primal or dual acceleration using the strongly convexity property. The case where both functions are strongly convex is not available at the moment. """ # Update sigma and tau based on the strong convexity of G @@ -388,43 +435,10 @@ def update_step_sizes(self): def update_objective(self): """ - Evaluates the primal objective, the dual objective and the primal-dual gap. - - Primal objective: - - .. math:: - - f(Kx) + g(x) - - Dual objective: - - .. math:: - - - g^{*}(-K^{*}y) - f^{*}(y) - - Primal-Dual gap (or Duality gap): - - .. math:: - - f(Kx) + g(x) + g^{*}(-K^{*}y) + f^{*}(y) - - Note - ---- - - - The primal objective is printed if `verbose=1`, ``pdhg.run(verbose=1)``. - - All the objective are printed if `verbose=2`, ``pdhg.run(verbose=2)``. - - Computing these objective can be costly, so it is better to compute every some iterations. To do this, use ``update_objective_interval = #number``. - - Note - ---- - - The primal-dual gap (or duality gap) measures how close is the primal-dual pair (x,y) to the primal-dual solution. \ - It is always non-negative and is used to monitor convergence of the PDHG algorithm. For more information, see `Duality Gap `_. - """ + self.operator.direct(self.x_old, out=self.y_tmp) f_eval_p = self.f(self.y_tmp) g_eval_p = self.g(self.x_old) From 661bca029938edfcca86ab16aeee0e9b5746cb4e Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Nov 2021 07:37:24 +0000 Subject: [PATCH 69/88] imporove docs --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 47fe15a0e8..2d2393dba2 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -72,7 +72,7 @@ class PDHG(Algorithm): >>> pdhg.run(10) >>> solution = pdhg.solution - Primal acceleration can be used, since :math:`g` is strongly convex with parameter ``gamma_g = 2``. + Primal acceleration can also be used, since :math:`g` is strongly convex with parameter ``gamma_g = 2``. >>> pdhg = PDHG(f = f, g = g, operator = operator, gamma_g = 2) @@ -194,8 +194,6 @@ class PDHG(Algorithm): Computing these objectives can be costly, so it is better to compute every some iterations. To do this, use ``update_objective_interval = #number``. - - - PDHG algorithm can be accelerated if the functions :math:`f^{*}` and/or :math:`g` are strongly convex. In these cases, the step-sizes :math:`\sigma` and :math:`\tau` are updated using the :meth:`update_step_sizes` method. A function :math:`f` is strongly convex with constant :math:`\gamma>0` if .. math:: From 6a9daf32132aec5a1e325342b271d10f10d7b11e Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Nov 2021 07:40:26 +0000 Subject: [PATCH 70/88] imporove docs --- docs/source/optimisation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/optimisation.rst b/docs/source/optimisation.rst index 95e85aa1d5..f4f1872cd9 100644 --- a/docs/source/optimisation.rst +++ b/docs/source/optimisation.rst @@ -63,7 +63,7 @@ print to screen of the status of the optimisation. :members: :special-members: .. autoclass:: cil.optimisation.algorithms.PDHG - :members: update, pdhg_step_sizes, update_step_sizes, update_objective + :members: update, set_step_sizes, update_step_sizes, update_objective :member-order: bysource .. autoclass:: cil.optimisation.algorithms.LADMM :members: From f13796f20e7db59c0808765ba83f79587d4313f0 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Nov 2021 07:42:50 +0000 Subject: [PATCH 71/88] imporve docs --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 2d2393dba2..e8f7c32557 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -389,7 +389,8 @@ def update(self): def set_step_sizes(self, sigma=None, tau=None): - """Default step sizes for the PDHG algorithm""" + """Default step sizes for the PDHG algorithm + """ # Compute operator norm self.norm_op = self.operator.norm() From e0c2b027399702b5ea25632862bf1b1b6c6b8d48 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Nov 2021 10:47:54 +0000 Subject: [PATCH 72/88] add checks for sigma, tau and strongly convex constant, supports step-sizes as scalar and array-like objects --- .../cil/optimisation/algorithms/PDHG.py | 139 ++++++++++++++++-- 1 file changed, 125 insertions(+), 14 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index e8f7c32557..db8a1f7dad 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -22,6 +22,7 @@ from numbers import Number + class PDHG(Algorithm): r"""Primal Dual Hybrid Gradient (PDHG) algorithm, see :cite:`CP2011`, :cite:`EZXC2010`. @@ -34,9 +35,9 @@ class PDHG(Algorithm): A convex function with a "simple" proximal. operator : LinearOperator A Linear Operator. - sigma : positive :obj:`float`, optional, default=None + sigma : positive :obj:`float`, `np.ndarray`, `DataContainer`, `BlockDataContainer`, optional, default=None Step size for the dual problem. - tau : positive :obj:`float`, optional, default=None + tau : positive :obj:`float`, `np.ndarray`, `DataContainer`, `BlockDataContainer`, optional, default=None Step size for the primal problem. initial : DataContainer, optional, default=None Initial point for the PDHG algorithm. @@ -213,7 +214,6 @@ class PDHG(Algorithm): .. math:: :nowrap: - \begin{aligned} \theta_{n} & = \frac{1}{\sqrt{1 + 2\gamma\tau_{n}}}\\ @@ -267,8 +267,7 @@ def __init__(self, f, g, operator, tau=None, sigma=None,initial=None, use_axpby= if self.gamma_g is not None and self.gamma_fconj is not None: raise ValueError("The adaptive update of the PDHG stepsizes in the case where both functions are strongly convex is not implemented at the moment.") - if f is not None and operator is not None and g is not None: - self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma, initial=initial, **kwargs) + self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma, initial=initial, **kwargs) @property def tau(self): @@ -291,6 +290,9 @@ def set_gamma_g(self, value): if isinstance (value, Number): if value <= 0: raise ValueError("Strongly convex constant is a positive number, {} is passed for the strongly convex function g.".format(value)) + else: + if value is not None: + raise ValueError("Positive float is expected for the strongly convex constant of function g, {} is passed".format(value)) self._gamma_g = value def set_gamma_fconj(self, value): @@ -298,6 +300,9 @@ def set_gamma_fconj(self, value): if isinstance (value, Number): if value <= 0: raise ValueError("Strongly convex constant is positive, {} is passed for the strongly convex conjugate function of f.".format(value)) + else: + if value is not None: + raise ValueError("Positive float is expected for the strongly convex constant of the convex conjugate of function f, {} is passed".format(value)) self._gamma_fconj = value def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): @@ -389,28 +394,48 @@ def update(self): def set_step_sizes(self, sigma=None, tau=None): - """Default step sizes for the PDHG algorithm + """Default scalar step sizes for the PDHG algorithm """ # Compute operator norm self.norm_op = self.operator.norm() - if isinstance(tau, Number) and tau<=0: - raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(tau)) - - if isinstance(sigma, Number) and sigma<=0: - raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(sigma)) - + if isinstance(tau, Number): + if tau<=0: + raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(tau)) + elif isinstance(tau, (DataContainer, np.ndarray, BlockDataContainer)): + if tau.shape!= self.operator.domain_geometry().shape: + raise ValueError(" The shape of tau = {} is not the same as the shape of the domain_geometry = {}".format(tau.shape, self.operator.domain_geometry().shape)) + else: + raise ValueError( "Only scalar values and array-type objects are supported for the step-size tau , {} is passed".format(tau)) + + if isinstance(sigma, Number): + if sigma<=0: + raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(sigma)) + elif isinstance(sigma, (DataContainer, np.ndarray, BlockDataContainer)): + if sigma.shape!= self.operator.range_geometry().shape: + raise ValueError(" The shape of sigma = {} is not the same as the shape of the range_geometry = {}".format(sigma.shape, self.operator.range_geometry().shape)) + else: + raise ValueError( "Only scalar values and array-type objects are supported for the step-size sigma , {} is passed".format(tau)) + if tau is None and sigma is None: self._sigma = 1.0/self.norm_op self._tau = 1.0/self.norm_op - elif tau is None and sigma>0: + elif tau is None: self._tau = 1./(sigma*self.norm_op**2) - elif sigma is None and tau>0: + elif sigma is None: self._sigma = 1./(tau*self.norm_op**2) else: self._sigma = sigma self._tau = tau + try: + # convergence criterion for scalar step-sizes + condition = sigma * tau * self.norm_op**2 <=1 + if not condition: + raise Warning("Convergence criterion of PDHG for scalar step-sizes is not satisfied.") + except: + pass + def update_step_sizes(self): @@ -466,3 +491,89 @@ def primal_dual_gap(self): return [x[2] for x in self.loss] +if __name__ == "__main__": + + from cil.utilities import dataexample + from cil.utilities import noise as applynoise + from cil.framework import ImageGeometry + from cil.optimisation.operators import IdentityOperator, GradientOperator + from cil.optimisation.functions import L2NormSquared, MixedL21Norm + + data = dataexample.PEPPERS.get(size=(256,256)) + ig = data.geometry + ag = ig + + which_noise = 0 + # Create noisy data. + noises = ['gaussian', 'poisson', 's&p'] + dnoise = noises[which_noise] + + def setup(data, dnoise): + if dnoise == 's&p': + n1 = applynoise.saltnpepper(data, salt_vs_pepper = 0.9, amount=0.2, seed=10) + elif dnoise == 'poisson': + scale = 5 + n1 = applynoise.poisson( data.as_array()/scale, seed = 10)*scale + elif dnoise == 'gaussian': + n1 = applynoise.gaussian(data.as_array(), seed = 10) + else: + raise ValueError('Unsupported Noise ', noise) + noisy_data = ig.allocate() + noisy_data.fill(n1) + + # Regularisation Parameter depending on the noise distribution + if dnoise == 's&p': + alpha = 0.8 + elif dnoise == 'poisson': + alpha = 1 + elif dnoise == 'gaussian': + alpha = .3 + # fidelity + if dnoise == 's&p': + g = L1Norm(b=noisy_data) + elif dnoise == 'poisson': + g = KullbackLeibler(b=noisy_data) + elif dnoise == 'gaussian': + g = 0.5 * L2NormSquared(b=noisy_data) + return noisy_data, alpha, g + + noisy_data, alpha, g = setup(data, dnoise) + operator = GradientOperator(ig, correlation=GradientOperator.CORRELATION_SPACE) + + f1 = alpha * MixedL21Norm() + + # Compute operator Norm + normK = operator.norm() + + # Primal & dual stepsizes + sigma = operator.range_geometry().allocate(3) + # print(sigma.dtype) + tau = operator.domain_geometry().allocate(3) # + + # Setup and run the PDHG algorithm + pdhg1 = PDHG(f=f1,g=g,operator=operator, tau= tau , sigma=sigma, gamma_fconj = 1, use_axpby=False) + pdhg1.max_iteration = 2000 + pdhg1.update_objective_interval = 200 + pdhg1.run(1000, verbose=0) + + rmse = (pdhg1.get_output() - data).norm() / data.as_array().size + # if debug_print: + print ("RMSE", rmse) + # self.assertLess(rmse, 2e-4) + + + + +# from cil.framework import ImageGeometry +# from cil.optimisation.operators import IdentityOperator, GradientOperator +# from cil.optimisation.functions import L2NormSquared, MixedL21Norm +# ig = ImageGeometry(3,3) +# Id = IdentityOperator(ig) +# b = ig.allocate('random') +# G = GradientOperator(ig) +# pdhg = PDHG(f=MixedL21Norm(b=b), g=L2NormSquared(), operator=G, max_iteration=10, +# tau = ig.allocate(2), sigma = ig.allocate(4)) +# pdhg.run() +# print(pdhg.tau) + + From c8b4f4fcb04ae06eebf5035f4b96eccc55f48970 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Nov 2021 10:48:11 +0000 Subject: [PATCH 73/88] add checks for sigma, tau and strongly convex constant, supports step-sizes as scalar and array-like objects --- .../cil/optimisation/algorithms/PDHG.py | 86 ------------------- 1 file changed, 86 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index db8a1f7dad..a9afa458d2 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -491,89 +491,3 @@ def primal_dual_gap(self): return [x[2] for x in self.loss] -if __name__ == "__main__": - - from cil.utilities import dataexample - from cil.utilities import noise as applynoise - from cil.framework import ImageGeometry - from cil.optimisation.operators import IdentityOperator, GradientOperator - from cil.optimisation.functions import L2NormSquared, MixedL21Norm - - data = dataexample.PEPPERS.get(size=(256,256)) - ig = data.geometry - ag = ig - - which_noise = 0 - # Create noisy data. - noises = ['gaussian', 'poisson', 's&p'] - dnoise = noises[which_noise] - - def setup(data, dnoise): - if dnoise == 's&p': - n1 = applynoise.saltnpepper(data, salt_vs_pepper = 0.9, amount=0.2, seed=10) - elif dnoise == 'poisson': - scale = 5 - n1 = applynoise.poisson( data.as_array()/scale, seed = 10)*scale - elif dnoise == 'gaussian': - n1 = applynoise.gaussian(data.as_array(), seed = 10) - else: - raise ValueError('Unsupported Noise ', noise) - noisy_data = ig.allocate() - noisy_data.fill(n1) - - # Regularisation Parameter depending on the noise distribution - if dnoise == 's&p': - alpha = 0.8 - elif dnoise == 'poisson': - alpha = 1 - elif dnoise == 'gaussian': - alpha = .3 - # fidelity - if dnoise == 's&p': - g = L1Norm(b=noisy_data) - elif dnoise == 'poisson': - g = KullbackLeibler(b=noisy_data) - elif dnoise == 'gaussian': - g = 0.5 * L2NormSquared(b=noisy_data) - return noisy_data, alpha, g - - noisy_data, alpha, g = setup(data, dnoise) - operator = GradientOperator(ig, correlation=GradientOperator.CORRELATION_SPACE) - - f1 = alpha * MixedL21Norm() - - # Compute operator Norm - normK = operator.norm() - - # Primal & dual stepsizes - sigma = operator.range_geometry().allocate(3) - # print(sigma.dtype) - tau = operator.domain_geometry().allocate(3) # - - # Setup and run the PDHG algorithm - pdhg1 = PDHG(f=f1,g=g,operator=operator, tau= tau , sigma=sigma, gamma_fconj = 1, use_axpby=False) - pdhg1.max_iteration = 2000 - pdhg1.update_objective_interval = 200 - pdhg1.run(1000, verbose=0) - - rmse = (pdhg1.get_output() - data).norm() / data.as_array().size - # if debug_print: - print ("RMSE", rmse) - # self.assertLess(rmse, 2e-4) - - - - -# from cil.framework import ImageGeometry -# from cil.optimisation.operators import IdentityOperator, GradientOperator -# from cil.optimisation.functions import L2NormSquared, MixedL21Norm -# ig = ImageGeometry(3,3) -# Id = IdentityOperator(ig) -# b = ig.allocate('random') -# G = GradientOperator(ig) -# pdhg = PDHG(f=MixedL21Norm(b=b), g=L2NormSquared(), operator=G, max_iteration=10, -# tau = ig.allocate(2), sigma = ig.allocate(4)) -# pdhg.run() -# print(pdhg.tau) - - From a9dde160cef586f2ec6ac0c531442d2488754ada Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Nov 2021 11:20:06 +0000 Subject: [PATCH 74/88] add types for step-sizes in docs --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index a9afa458d2..4c7d55cdf5 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -35,9 +35,9 @@ class PDHG(Algorithm): A convex function with a "simple" proximal. operator : LinearOperator A Linear Operator. - sigma : positive :obj:`float`, `np.ndarray`, `DataContainer`, `BlockDataContainer`, optional, default=None + sigma : positive :obj:`float`, or `np.ndarray`, `DataContainer`, `BlockDataContainer`, optional, default=None Step size for the dual problem. - tau : positive :obj:`float`, `np.ndarray`, `DataContainer`, `BlockDataContainer`, optional, default=None + tau : positive :obj:`float`, or `np.ndarray`, `DataContainer`, `BlockDataContainer`, optional, default=None Step size for the primal problem. initial : DataContainer, optional, default=None Initial point for the PDHG algorithm. From 26d71cc55a9a387414d4c81ffe141ef9b605c0de Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Nov 2021 11:28:42 +0000 Subject: [PATCH 75/88] add types for step-sizes in docs --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 4c7d55cdf5..3f493cab08 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -406,18 +406,14 @@ def set_step_sizes(self, sigma=None, tau=None): elif isinstance(tau, (DataContainer, np.ndarray, BlockDataContainer)): if tau.shape!= self.operator.domain_geometry().shape: raise ValueError(" The shape of tau = {} is not the same as the shape of the domain_geometry = {}".format(tau.shape, self.operator.domain_geometry().shape)) - else: - raise ValueError( "Only scalar values and array-type objects are supported for the step-size tau , {} is passed".format(tau)) - + if isinstance(sigma, Number): if sigma<=0: raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(sigma)) elif isinstance(sigma, (DataContainer, np.ndarray, BlockDataContainer)): if sigma.shape!= self.operator.range_geometry().shape: raise ValueError(" The shape of sigma = {} is not the same as the shape of the range_geometry = {}".format(sigma.shape, self.operator.range_geometry().shape)) - else: - raise ValueError( "Only scalar values and array-type objects are supported for the step-size sigma , {} is passed".format(tau)) - + if tau is None and sigma is None: self._sigma = 1.0/self.norm_op self._tau = 1.0/self.norm_op @@ -435,8 +431,6 @@ def set_step_sizes(self, sigma=None, tau=None): raise Warning("Convergence criterion of PDHG for scalar step-sizes is not satisfied.") except: pass - - def update_step_sizes(self): From 1ecaf69a0c86708b40ea007852df5b9d0c246b08 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Nov 2021 14:25:46 +0000 Subject: [PATCH 76/88] fix checks in PDHG parameters --- .../cil/optimisation/algorithms/PDHG.py | 146 ++++++++++++++---- 1 file changed, 118 insertions(+), 28 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 3f493cab08..72911fe98f 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -288,22 +288,26 @@ def gamma_fconj(self): def set_gamma_g(self, value): if isinstance (value, Number): - if value <= 0: + if value <= 0: raise ValueError("Strongly convex constant is a positive number, {} is passed for the strongly convex function g.".format(value)) - else: - if value is not None: - raise ValueError("Positive float is expected for the strongly convex constant of function g, {} is passed".format(value)) - self._gamma_g = value + self._gamma_g = value + elif value is None: + pass + else: + raise ValueError("Positive float is expected for the strongly convex constant of function g, {} is passed".format(value)) + def set_gamma_fconj(self, value): if isinstance (value, Number): if value <= 0: - raise ValueError("Strongly convex constant is positive, {} is passed for the strongly convex conjugate function of f.".format(value)) - else: - if value is not None: - raise ValueError("Positive float is expected for the strongly convex constant of the convex conjugate of function f, {} is passed".format(value)) - self._gamma_fconj = value + raise ValueError("Strongly convex constant is positive, {} is passed for the strongly convex conjugate function of f.".format(value)) + self._gamma_fconj = value + elif value is None: + pass + else: + raise ValueError("Positive float is expected for the strongly convex constant of the convex conjugate of function f, {} is passed".format(value)) + def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): """Initialisation of the algorithm @@ -400,35 +404,44 @@ def set_step_sizes(self, sigma=None, tau=None): # Compute operator norm self.norm_op = self.operator.norm() - if isinstance(tau, Number): - if tau<=0: - raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(tau)) - elif isinstance(tau, (DataContainer, np.ndarray, BlockDataContainer)): - if tau.shape!= self.operator.domain_geometry().shape: + # Check acceptable type of the primal-dual step-sizes + if tau is not None: + if isinstance(tau, Number): + if tau<=0: + raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(tau)) + elif tau.shape!= self.operator.domain_geometry().shape: raise ValueError(" The shape of tau = {} is not the same as the shape of the domain_geometry = {}".format(tau.shape, self.operator.domain_geometry().shape)) - - if isinstance(sigma, Number): - if sigma<=0: - raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(sigma)) - elif isinstance(sigma, (DataContainer, np.ndarray, BlockDataContainer)): - if sigma.shape!= self.operator.range_geometry().shape: - raise ValueError(" The shape of sigma = {} is not the same as the shape of the range_geometry = {}".format(sigma.shape, self.operator.range_geometry().shape)) + + if sigma is not None: + if isinstance(sigma, Number): + if sigma<=0: + raise ValueError("The step-sizes of PDHG are positive, {} is passed".format(sigma)) + elif sigma.shape!= self.operator.range_geometry().shape: + raise ValueError(" The shape of tau = {} is not the same as the shape of the domain_geometry = {}".format(sigma.shape, self.operator.range_geometry().shape)) if tau is None and sigma is None: self._sigma = 1.0/self.norm_op self._tau = 1.0/self.norm_op - elif tau is None: - self._tau = 1./(sigma*self.norm_op**2) + elif tau is None: + if isinstance(sigma, Number): + self._tau = 1./(sigma*self.norm_op**2) + self._sigma = sigma + else: + raise ValueError(" Sigma is not a positive float, {} is passed".format(sigma)) elif sigma is None: - self._sigma = 1./(tau*self.norm_op**2) + if isinstance(tau, Number): + self._sigma = 1./(tau*self.norm_op**2) + self._tau = tau + else: + raise ValueError(" Tau is not a positive float, {} is passed".format(tau)) else: self._sigma = sigma self._tau = tau try: # convergence criterion for scalar step-sizes - condition = sigma * tau * self.norm_op**2 <=1 - if not condition: - raise Warning("Convergence criterion of PDHG for scalar step-sizes is not satisfied.") + condition = sigma * tau * self.norm_op**2 > 1 + if condition: + warnings.warn("Convergence criterion of PDHG for scalar step-sizes is not satisfied.") except: pass @@ -485,3 +498,80 @@ def primal_dual_gap(self): return [x[2] for x in self.loss] +if __name__ == "__main__": + + from cil.utilities import dataexample + from cil.utilities import noise as applynoise + from cil.optimisation.operators import IdentityOperator + from cil.optimisation.operators import GradientOperator, BlockOperator, FiniteDifferenceOperator + + from cil.optimisation.functions import LeastSquares, ZeroFunction, \ + L2NormSquared, OperatorCompositionFunction + from cil.optimisation.functions import MixedL21Norm, BlockFunction, L1Norm, KullbackLeibler + from cil.optimisation.functions import IndicatorBox + + print ("PDHG Denoising with 3 noises") + # adapted from demo PDHG_TV_Color_Denoising.py in CIL-Demos repository + + data = dataexample.PEPPERS.get(size=(256,256)) + ig = data.geometry + ag = ig + + which_noise = 0 + # Create noisy data. + noises = ['gaussian', 'poisson', 's&p'] + dnoise = noises[which_noise] + + def setup(data, dnoise): + if dnoise == 's&p': + n1 = applynoise.saltnpepper(data, salt_vs_pepper = 0.9, amount=0.2, seed=10) + elif dnoise == 'poisson': + scale = 5 + n1 = applynoise.poisson( data.as_array()/scale, seed = 10)*scale + elif dnoise == 'gaussian': + n1 = applynoise.gaussian(data.as_array(), seed = 10) + else: + raise ValueError('Unsupported Noise ', noise) + noisy_data = ig.allocate() + noisy_data.fill(n1) + + # Regularisation Parameter depending on the noise distribution + if dnoise == 's&p': + alpha = 0.8 + elif dnoise == 'poisson': + alpha = 1 + elif dnoise == 'gaussian': + alpha = .3 + # fidelity + if dnoise == 's&p': + g = L1Norm(b=noisy_data) + elif dnoise == 'poisson': + g = KullbackLeibler(b=noisy_data) + elif dnoise == 'gaussian': + g = 0.5 * L2NormSquared(b=noisy_data) + return noisy_data, alpha, g + + noisy_data, alpha, g = setup(data, dnoise) + operator = GradientOperator(ig, correlation=GradientOperator.CORRELATION_SPACE) + + f1 = alpha * MixedL21Norm() + + + + # Compute operator Norm + normK = operator.norm() + + # Primal & dual stepsizes + # sigma = operator.range_geometry().allocate(4) #1 + # tau = operator.domain_geometry().allocate(2) #1/(sigma*normK**2) + + # Setup and run the PDHG algorithm + pdhg1 = PDHG(f=f1,g=g,operator=operator, sigma=100, tau=1) + pdhg1.max_iteration = 2000 + pdhg1.update_objective_interval = 200 + pdhg1.run(1000, verbose=0) + + rmse = (pdhg1.get_output() - data).norm() / data.as_array().size + + print ("RMSE", rmse) + From ef63e4e12b12f3e52e71c3443c1026790b1201a8 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Nov 2021 14:26:24 +0000 Subject: [PATCH 77/88] test for types/cases of sigma, tau and strongly convex constants --- Wrappers/Python/test/test_algorithms.py | 112 +++++++++++++++++++++--- 1 file changed, 100 insertions(+), 12 deletions(-) diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py index e09c9a16e5..25f4a69a9c 100755 --- a/Wrappers/Python/test/test_algorithms.py +++ b/Wrappers/Python/test/test_algorithms.py @@ -45,6 +45,7 @@ from cil.utilities import dataexample from cil.utilities import noise as applynoise import os, sys, time +import warnings # Fast Gradient Projection algorithm for Total Variation(TV) @@ -402,17 +403,78 @@ def test_PDHG_step_sizes(self): f = L2NormSquared(b=data) g = L2NormSquared() - operator = IdentityOperator(ig) - - # sigma, tau no update. Operator norm is 1. - sigma = 1.0 - tau = 1.0 + operator = 3*IdentityOperator(ig) + #check if sigma, tau are None pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10) - pdhg.run(verbose=0) + self.assertEqual(pdhg.sigma, 1./operator.norm()) + self.assertEqual(pdhg.tau, 1./operator.norm()) + + #check if sigma is negative + try: + pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10, sigma = -1) + except ValueError as ve: + print(ve) + + #check if tau is negative + try: + pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10, tau = -1) + except ValueError as ve: + print(ve) + + #check if tau is None + sigma = 3.0 + pdhg = PDHG(f=f, g=g, operator=operator, sigma = sigma, max_iteration=10) self.assertEqual(pdhg.sigma, sigma) - self.assertEqual(pdhg.tau, tau) + self.assertEqual(pdhg.tau, 1./(sigma * operator.norm()**2)) + + #check if sigma is None + tau = 3.0 + pdhg = PDHG(f=f, g=g, operator=operator, tau = tau, max_iteration=10) + self.assertEqual(pdhg.tau, tau) + self.assertEqual(pdhg.sigma, 1./(tau * operator.norm()**2)) + + #check if sigma/tau are not None + tau = 1.0 + sigma = 1.0 + pdhg = PDHG(f=f, g=g, operator=operator, tau = tau, sigma = sigma, max_iteration=10) + self.assertEqual(pdhg.tau, tau) + self.assertEqual(pdhg.sigma, sigma) + + #check sigma/tau as arrays, sigma wrong shape + ig1 = ImageGeometry(2,2) + sigma = ig1.allocate() + try: + pdhg = PDHG(f=f, g=g, operator=operator, sigma = sigma, max_iteration=10) + except ValueError as ve: + print(ve) + + #check sigma/tau as arrays, tau wrong shape + tau = ig1.allocate() + try: + pdhg = PDHG(f=f, g=g, operator=operator, tau = tau, max_iteration=10) + except ValueError as ve: + print(ve) + # check sigma not Number or array-Like object + try: + pdhg = PDHG(f=f, g=g, operator=operator, sigma = "sigma", max_iteration=10) + except AttributeError as ve: + print(ve) + + # check tau not Number or array-Like object + try: + pdhg = PDHG(f=f, g=g, operator=operator, tau = "tau", max_iteration=10) + except AttributeError as ve: + print(ve) + + # check warning message if condition is not satisfied + sigma = 4 + tau = 1/3 + with warnings.catch_warnings(record=True) as wa: + pdhg = PDHG(f=f, g=g, operator=operator, tau = tau, sigma = sigma, max_iteration=10) + assert "Convergence criterion" in str(wa[0].message) + def test_PDHG_strongly_convex_gamma_g(self): ig = ImageGeometry(3,3) @@ -434,7 +496,21 @@ def test_PDHG_strongly_convex_gamma_g(self): self.assertEquals(pdhg.sigma, sigma / pdhg.theta) pdhg.run(4, verbose=0) self.assertNotEqual(pdhg.sigma, sigma) - self.assertNotEqual(pdhg.tau, tau) + self.assertNotEqual(pdhg.tau, tau) + + # check negative strongly convex constant + try: + pdhg = PDHG(f=f, g=g, operator=operator, sigma = sigma, tau=tau, + max_iteration=5, gamma_g=-0.5) + except ValueError as ve: + print(ve) + + # check strongly convex constant not a number + try: + pdhg = PDHG(f=f, g=g, operator=operator, sigma = sigma, tau=tau, + max_iteration=5, gamma_g="-0.5") + except ValueError as ve: + print(ve) def test_PDHG_strongly_convex_gamma_fcong(self): @@ -457,9 +533,23 @@ def test_PDHG_strongly_convex_gamma_fcong(self): self.assertEquals(pdhg.sigma, sigma * pdhg.theta) pdhg.run(4, verbose=0) self.assertNotEqual(pdhg.sigma, sigma) - self.assertNotEqual(pdhg.tau, tau) + self.assertNotEqual(pdhg.tau, tau) - def test_PDHG_strongly_convex_both_fconj_or_g(self): + # check negative strongly convex constant + try: + pdhg = PDHG(f=f, g=g, operator=operator, sigma = sigma, tau=tau, + max_iteration=5, gamma_fconj=-0.5) + except ValueError as ve: + print(ve) + + # check strongly convex constant not a number + try: + pdhg = PDHG(f=f, g=g, operator=operator, sigma = sigma, tau=tau, + max_iteration=5, gamma_fconj="-0.5") + except ValueError as ve: + print(ve) + + def test_PDHG_strongly_convex_both_fconj_and_g(self): ig = ImageGeometry(3,3) data = ig.allocate('random') @@ -474,8 +564,6 @@ def test_PDHG_strongly_convex_both_fconj_or_g(self): pdhg.run(verbose=0) except ValueError as err: print(err) - - def test_FISTA_Denoising(self): if debug_print: From 7ca2f691ffcaebbe3f88a9454116f4481d22bc91 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Nov 2021 14:26:50 +0000 Subject: [PATCH 78/88] fix checks in PDHG parameters --- .../cil/optimisation/algorithms/PDHG.py | 75 ------------------- 1 file changed, 75 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 72911fe98f..e11d6c920e 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -498,80 +498,5 @@ def primal_dual_gap(self): return [x[2] for x in self.loss] -if __name__ == "__main__": - from cil.utilities import dataexample - from cil.utilities import noise as applynoise - from cil.optimisation.operators import IdentityOperator - from cil.optimisation.operators import GradientOperator, BlockOperator, FiniteDifferenceOperator - - from cil.optimisation.functions import LeastSquares, ZeroFunction, \ - L2NormSquared, OperatorCompositionFunction - from cil.optimisation.functions import MixedL21Norm, BlockFunction, L1Norm, KullbackLeibler - from cil.optimisation.functions import IndicatorBox - - print ("PDHG Denoising with 3 noises") - # adapted from demo PDHG_TV_Color_Denoising.py in CIL-Demos repository - - data = dataexample.PEPPERS.get(size=(256,256)) - ig = data.geometry - ag = ig - - which_noise = 0 - # Create noisy data. - noises = ['gaussian', 'poisson', 's&p'] - dnoise = noises[which_noise] - - def setup(data, dnoise): - if dnoise == 's&p': - n1 = applynoise.saltnpepper(data, salt_vs_pepper = 0.9, amount=0.2, seed=10) - elif dnoise == 'poisson': - scale = 5 - n1 = applynoise.poisson( data.as_array()/scale, seed = 10)*scale - elif dnoise == 'gaussian': - n1 = applynoise.gaussian(data.as_array(), seed = 10) - else: - raise ValueError('Unsupported Noise ', noise) - noisy_data = ig.allocate() - noisy_data.fill(n1) - - # Regularisation Parameter depending on the noise distribution - if dnoise == 's&p': - alpha = 0.8 - elif dnoise == 'poisson': - alpha = 1 - elif dnoise == 'gaussian': - alpha = .3 - # fidelity - if dnoise == 's&p': - g = L1Norm(b=noisy_data) - elif dnoise == 'poisson': - g = KullbackLeibler(b=noisy_data) - elif dnoise == 'gaussian': - g = 0.5 * L2NormSquared(b=noisy_data) - return noisy_data, alpha, g - - noisy_data, alpha, g = setup(data, dnoise) - operator = GradientOperator(ig, correlation=GradientOperator.CORRELATION_SPACE) - - f1 = alpha * MixedL21Norm() - - - - # Compute operator Norm - normK = operator.norm() - - # Primal & dual stepsizes - # sigma = operator.range_geometry().allocate(4) #1 - # tau = operator.domain_geometry().allocate(2) #1/(sigma*normK**2) - - # Setup and run the PDHG algorithm - pdhg1 = PDHG(f=f1,g=g,operator=operator, sigma=100, tau=1) - pdhg1.max_iteration = 2000 - pdhg1.update_objective_interval = 200 - pdhg1.run(1000, verbose=0) - - rmse = (pdhg1.get_output() - data).norm() / data.as_array().size - - print ("RMSE", rmse) From 9704f0c8ea11eb257740b2631900d02fb0e34c0e Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Nov 2021 18:07:50 +0000 Subject: [PATCH 79/88] remove test --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index e11d6c920e..6a5f4c9351 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -495,8 +495,4 @@ def dual_objective(self): @property def primal_dual_gap(self): - return [x[2] for x in self.loss] - - - - + return [x[2] for x in self.loss] \ No newline at end of file From f4fc01ae7716c55253bee9218ad7e5f513b10278 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Nov 2021 18:08:19 +0000 Subject: [PATCH 80/88] improve tests --- Wrappers/Python/test/test_algorithms.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py index 25f4a69a9c..039d898cfd 100755 --- a/Wrappers/Python/test/test_algorithms.py +++ b/Wrappers/Python/test/test_algorithms.py @@ -454,7 +454,7 @@ def test_PDHG_step_sizes(self): try: pdhg = PDHG(f=f, g=g, operator=operator, tau = tau, max_iteration=10) except ValueError as ve: - print(ve) + print(ve) # check sigma not Number or array-Like object try: @@ -473,7 +473,7 @@ def test_PDHG_step_sizes(self): tau = 1/3 with warnings.catch_warnings(record=True) as wa: pdhg = PDHG(f=f, g=g, operator=operator, tau = tau, sigma = sigma, max_iteration=10) - assert "Convergence criterion" in str(wa[0].message) + assert "Convergence criterion" in str(wa[0].message) def test_PDHG_strongly_convex_gamma_g(self): From deadd5bd2b7f5d4902c0641ff65384845a353248 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 18 Nov 2021 09:36:31 +0000 Subject: [PATCH 81/88] improve docs, remove example refer to CIL-Demos only --- .../cil/optimisation/algorithms/PDHG.py | 32 +++++-------------- 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 6a5f4c9351..d6cf03a4a2 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -59,27 +59,11 @@ class PDHG(Algorithm): Example ------- - Total variation denoising with with PDHG. - - .. math:: \min_{x\in X} \|u - b\|^{2} + \alpha\|\nabla u\|_{2,1} - - >>> data = dataexample.CAMERA.get() - >>> noisy_data = noise.gaussian(data, seed = 10, var = 0.02) - >>> ig = data.geometry - >>> operator = GradientOperator(ig) - >>> f = MixedL21Norm() - >>> g = L2NormSquared(b=g) - >>> pdhg = PDHG(f = f, g = g, operator = operator, max_iteration = 10) - >>> pdhg.run(10) - >>> solution = pdhg.solution - - Primal acceleration can also be used, since :math:`g` is strongly convex with parameter ``gamma_g = 2``. - - >>> pdhg = PDHG(f = f, g = g, operator = operator, gamma_g = 2) - - For a TV tomography reconstruction example, see `CIL-Demos `_. - More examples can be found in :cite:`Jorgensen_et_al_2021`, :cite:`Papoutsellis_et_al_2021`. + In our `CIL-Demos `_ repository\ + you can find examples using the PDHG algorithm for different imaging problems, such as Total Variation denoising, Total Generalised Variation inpainting\ + and Total Variation Tomography reconstruction. More examples can also be found in :cite:`Jorgensen_et_al_2021`, :cite:`Papoutsellis_et_al_2021`. + Note ---- @@ -242,8 +226,7 @@ class PDHG(Algorithm): """ def __init__(self, f, g, operator, tau=None, sigma=None,initial=None, use_axpby=True, **kwargs): - """Constructor method - """ + super(PDHG, self).__init__(**kwargs) if kwargs.get('x_init', None) is not None: if initial is None: @@ -319,7 +302,7 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): self.g = g self.operator = operator - #Default step sizes + #Set sigma and tau step-sizes for PDHG self.set_step_sizes(sigma=sigma, tau=tau) if initial is None: @@ -398,7 +381,7 @@ def update(self): def set_step_sizes(self, sigma=None, tau=None): - """Default scalar step sizes for the PDHG algorithm + """ Sets sigma and tau step-sizes for the PDHG algorithm. The step sizes can be either scalar or array-objects. """ # Compute operator norm @@ -419,6 +402,7 @@ def set_step_sizes(self, sigma=None, tau=None): elif sigma.shape!= self.operator.range_geometry().shape: raise ValueError(" The shape of tau = {} is not the same as the shape of the domain_geometry = {}".format(sigma.shape, self.operator.range_geometry().shape)) + # Default sigma and tau step-sizes if tau is None and sigma is None: self._sigma = 1.0/self.norm_op self._tau = 1.0/self.norm_op From 78075315aa8507c6f968b1538180447cd82b4dc9 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 18 Nov 2021 16:25:51 +0000 Subject: [PATCH 82/88] add missing docstring, format methods --- .../cil/optimisation/algorithms/PDHG.py | 68 ++++++++++++++++--- 1 file changed, 57 insertions(+), 11 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index d6cf03a4a2..8dafe56717 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -252,23 +252,34 @@ def __init__(self, f, g, operator, tau=None, sigma=None,initial=None, use_axpby= self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma, initial=initial, **kwargs) + @property def tau(self): return self._tau + @property def sigma(self): return self._sigma + @property def gamma_g(self): return self._gamma_g + @property def gamma_fconj(self): return self._gamma_fconj - + + def set_gamma_g(self, value): + '''Set the value of the strongly convex constant for function G + + Parameters + ---------- + value : a positive number + ''' if isinstance (value, Number): if value <= 0: @@ -281,7 +292,12 @@ def set_gamma_g(self, value): def set_gamma_fconj(self, value): - + '''Set the value of the strongly convex constant for the convex conjugate of function F + + Parameters + ---------- + value : a positive number + ''' if isinstance (value, Number): if value <= 0: raise ValueError("Strongly convex constant is positive, {} is passed for the strongly convex conjugate function of f.".format(value)) @@ -294,6 +310,22 @@ def set_gamma_fconj(self, value): def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): """Initialisation of the algorithm + + Parameters + ---------- + f : Function + A convex function with a "simple" proximal method of its conjugate. + g : Function + A convex function with a "simple" proximal. + operator : LinearOperator + A Linear Operator. + sigma : positive :obj:`float`, or `np.ndarray`, `DataContainer`, `BlockDataContainer`, optional, default=None + Step size for the dual problem. + tau : positive :obj:`float`, or `np.ndarray`, `DataContainer`, `BlockDataContainer`, optional, default=None + Step size for the primal problem. + initial : DataContainer, optional, default=None + Initial point for the PDHG algorithm. + theta : Relaxation parameter, Number, default 1.0 """ print("{} setting up".format(self.__class__.__name__, )) @@ -327,18 +359,21 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): self.configured = True print("{} configured".format(self.__class__.__name__, )) + def update_previous_solution(self): # swap the pointers to current and previous solution tmp = self.x_old self.x_old = self.x self.x = tmp + def get_output(self): + '''Returns the solution found''' # returns the current solution return self.x_old - def update(self): + def update(self): r""" Performs a single iteration of the PDHG algorithm """ @@ -380,8 +415,18 @@ def update(self): def set_step_sizes(self, sigma=None, tau=None): - """ Sets sigma and tau step-sizes for the PDHG algorithm. The step sizes can be either scalar or array-objects. + + Parameters + ---------- + sigma : positive :obj:`float`, or `np.ndarray`, `DataContainer`, `BlockDataContainer`, optional, default=None + Step size for the dual problem. + tau : positive :obj:`float`, or `np.ndarray`, `DataContainer`, `BlockDataContainer`, optional, default=None + Step size for the primal problem. + + The user can set either, both or none. Values passed by the user will be accepted as long as they are positive numbers, + or correct shape array like objects. Warnings will be given in the case the values passed do not guarantee the algorithm + convergence. """ # Compute operator norm @@ -428,9 +473,9 @@ def set_step_sizes(self, sigma=None, tau=None): warnings.warn("Convergence criterion of PDHG for scalar step-sizes is not satisfied.") except: pass - - def update_step_sizes(self): + + def update_step_sizes(self): r""" Updates step sizes in the cases of primal or dual acceleration using the strongly convexity property. The case where both functions are strongly convex is not available at the moment. """ @@ -446,14 +491,13 @@ def update_step_sizes(self): self.theta = 1.0 / np.sqrt(1 + 2 * self.gamma_fconj * self.sigma) self._sigma *= self.theta self._tau /= self.theta - - def update_objective(self): + + def update_objective(self): """ Evaluates the primal objective, the dual objective and the primal-dual gap. """ - self.operator.direct(self.x_old, out=self.y_tmp) f_eval_p = self.f(self.y_tmp) g_eval_p = self.g(self.x_old) @@ -468,15 +512,17 @@ def update_objective(self): self.loss.append([p1, -d1, p1+d1]) + @property def objective(self): - '''alias of loss''' return [x[0] for x in self.loss] + @property def dual_objective(self): return [x[1] for x in self.loss] - + + @property def primal_dual_gap(self): return [x[2] for x in self.loss] \ No newline at end of file From a5e4c966e860563a6cb94971f8cc526526254808 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 18 Nov 2021 16:28:55 +0000 Subject: [PATCH 83/88] [ci skip] use appropriate function name --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 8dafe56717..ce387040f1 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -274,7 +274,7 @@ def gamma_fconj(self): def set_gamma_g(self, value): - '''Set the value of the strongly convex constant for function G + '''Set the value of the strongly convex constant for function `g` Parameters ---------- @@ -292,7 +292,7 @@ def set_gamma_g(self, value): def set_gamma_fconj(self, value): - '''Set the value of the strongly convex constant for the convex conjugate of function F + '''Set the value of the strongly convex constant for the convex conjugate of function `f` Parameters ---------- From 522d6ea7097db03894de42d2006881bf6609425f Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 18 Nov 2021 16:31:10 +0000 Subject: [PATCH 84/88] [ci skip] remove useless comment --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index ce387040f1..1bdad6575b 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -235,7 +235,7 @@ def __init__(self, f, g, operator, tau=None, sigma=None,initial=None, use_axpby= initial = kwargs.get('x_init', None) else: raise ValueError('{} received both initial and the deprecated x_init parameter. It is not clear which one we should use.'\ - .format(self.__class__.__name__)) + .format(self.__class__.__name__)) self._use_axpby = use_axpby self._tau = None @@ -334,7 +334,6 @@ def set_up(self, f, g, operator, tau=None, sigma=None, initial=None, **kwargs): self.g = g self.operator = operator - #Set sigma and tau step-sizes for PDHG self.set_step_sizes(sigma=sigma, tau=tau) if initial is None: From d644f46219b09f040b90e19bbef3b8c19b6fb9a5 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 19 Nov 2021 12:18:37 +0000 Subject: [PATCH 85/88] move the check of both gamma specified in setter methods --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 1bdad6575b..259632ca03 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -247,9 +247,7 @@ def __init__(self, f, g, operator, tau=None, sigma=None,initial=None, use_axpby= self.set_gamma_g(kwargs.get('gamma_g', None)) self.set_gamma_fconj(kwargs.get('gamma_fconj', None)) - if self.gamma_g is not None and self.gamma_fconj is not None: - raise ValueError("The adaptive update of the PDHG stepsizes in the case where both functions are strongly convex is not implemented at the moment.") - + self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma, initial=initial, **kwargs) @@ -280,7 +278,10 @@ def set_gamma_g(self, value): ---------- value : a positive number ''' - + if self.gamma_fconj is not None: + raise ValueError("The adaptive update of the PDHG stepsizes in the case where both functions are strongly convex is not implemented at the moment." +\ + "Currently the strongly convex constant of the convex conjugate of the function f has been specified.") + if isinstance (value, Number): if value <= 0: raise ValueError("Strongly convex constant is a positive number, {} is passed for the strongly convex function g.".format(value)) @@ -298,6 +299,10 @@ def set_gamma_fconj(self, value): ---------- value : a positive number ''' + if self.gamma_g is not None: + raise ValueError("The adaptive update of the PDHG stepsizes in the case where both functions are strongly convex is not implemented at the moment." +\ + "Currently the strongly convex constant of the function g has been specified.") + if isinstance (value, Number): if value <= 0: raise ValueError("Strongly convex constant is positive, {} is passed for the strongly convex conjugate function of f.".format(value)) From 17f6c5fa379ddd0af997815a5b2ffc8adc1cfa47 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 19 Nov 2021 12:18:52 +0000 Subject: [PATCH 86/88] addressing review comments --- Wrappers/Python/test/test_algorithms.py | 74 ++++++++++--------------- 1 file changed, 30 insertions(+), 44 deletions(-) diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py index 039d898cfd..44662d0d17 100755 --- a/Wrappers/Python/test/test_algorithms.py +++ b/Wrappers/Python/test/test_algorithms.py @@ -407,67 +407,55 @@ def test_PDHG_step_sizes(self): #check if sigma, tau are None pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10) - self.assertEqual(pdhg.sigma, 1./operator.norm()) - self.assertEqual(pdhg.tau, 1./operator.norm()) + self.assertAlmostEqual(pdhg.sigma, 1./operator.norm()) + self.assertAlmostEqual(pdhg.tau, 1./operator.norm()) #check if sigma is negative - try: + with self.assertRaises(ValueError): pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10, sigma = -1) - except ValueError as ve: - print(ve) - + #check if tau is negative - try: + with self.assertRaises(ValueError): pdhg = PDHG(f=f, g=g, operator=operator, max_iteration=10, tau = -1) - except ValueError as ve: - print(ve) - + #check if tau is None sigma = 3.0 pdhg = PDHG(f=f, g=g, operator=operator, sigma = sigma, max_iteration=10) - self.assertEqual(pdhg.sigma, sigma) - self.assertEqual(pdhg.tau, 1./(sigma * operator.norm()**2)) + self.assertAlmostEqual(pdhg.sigma, sigma) + self.assertAlmostEqual(pdhg.tau, 1./(sigma * operator.norm()**2)) #check if sigma is None tau = 3.0 pdhg = PDHG(f=f, g=g, operator=operator, tau = tau, max_iteration=10) - self.assertEqual(pdhg.tau, tau) - self.assertEqual(pdhg.sigma, 1./(tau * operator.norm()**2)) + self.assertAlmostEqual(pdhg.tau, tau) + self.assertAlmostEqual(pdhg.sigma, 1./(tau * operator.norm()**2)) #check if sigma/tau are not None tau = 1.0 sigma = 1.0 pdhg = PDHG(f=f, g=g, operator=operator, tau = tau, sigma = sigma, max_iteration=10) - self.assertEqual(pdhg.tau, tau) - self.assertEqual(pdhg.sigma, sigma) + self.assertAlmostEqual(pdhg.tau, tau) + self.assertAlmostEqual(pdhg.sigma, sigma) #check sigma/tau as arrays, sigma wrong shape ig1 = ImageGeometry(2,2) sigma = ig1.allocate() - try: + with self.assertRaises(ValueError): pdhg = PDHG(f=f, g=g, operator=operator, sigma = sigma, max_iteration=10) - except ValueError as ve: - print(ve) #check sigma/tau as arrays, tau wrong shape tau = ig1.allocate() - try: + with self.assertRaises(ValueError): pdhg = PDHG(f=f, g=g, operator=operator, tau = tau, max_iteration=10) - except ValueError as ve: - print(ve) - - # check sigma not Number or array-Like object - try: + + # check sigma not Number or object with correct shape + with self.assertRaises(AttributeError): pdhg = PDHG(f=f, g=g, operator=operator, sigma = "sigma", max_iteration=10) - except AttributeError as ve: - print(ve) - - # check tau not Number or array-Like object - try: + + # check tau not Number or object with correct shape + with self.assertRaises(AttributeError): pdhg = PDHG(f=f, g=g, operator=operator, tau = "tau", max_iteration=10) - except AttributeError as ve: - print(ve) - + # check warning message if condition is not satisfied sigma = 4 tau = 1/3 @@ -491,26 +479,24 @@ def test_PDHG_strongly_convex_gamma_g(self): pdhg = PDHG(f=f, g=g, operator=operator, sigma = sigma, tau=tau, max_iteration=5, gamma_g=0.5) pdhg.run(1, verbose=0) - self.assertEquals(pdhg.theta, 1.0/ np.sqrt(1 + 2 * pdhg.gamma_g * tau)) - self.assertEquals(pdhg.tau, tau * pdhg.theta) - self.assertEquals(pdhg.sigma, sigma / pdhg.theta) + self.assertAlmostEquals(pdhg.theta, 1.0/ np.sqrt(1 + 2 * pdhg.gamma_g * tau)) + self.assertAlmostEquals(pdhg.tau, tau * pdhg.theta) + self.assertAlmostEquals(pdhg.sigma, sigma / pdhg.theta) pdhg.run(4, verbose=0) - self.assertNotEqual(pdhg.sigma, sigma) - self.assertNotEqual(pdhg.tau, tau) + self.assertAlmostEquals(pdhg.sigma, sigma) + self.assertAlmostEquals(pdhg.tau, tau) # check negative strongly convex constant - try: + with self.assertRaises(ValueError): pdhg = PDHG(f=f, g=g, operator=operator, sigma = sigma, tau=tau, max_iteration=5, gamma_g=-0.5) - except ValueError as ve: - print(ve) + # check strongly convex constant not a number - try: + with self.assertRaises(ValueError): pdhg = PDHG(f=f, g=g, operator=operator, sigma = sigma, tau=tau, max_iteration=5, gamma_g="-0.5") - except ValueError as ve: - print(ve) + def test_PDHG_strongly_convex_gamma_fcong(self): From ce780fc16d5614d8042459e82c31dc71ec14e181 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 19 Nov 2021 12:20:33 +0000 Subject: [PATCH 87/88] [ci skip] update docstring --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index 259632ca03..c20d25dfbb 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -429,7 +429,7 @@ def set_step_sizes(self, sigma=None, tau=None): Step size for the primal problem. The user can set either, both or none. Values passed by the user will be accepted as long as they are positive numbers, - or correct shape array like objects. Warnings will be given in the case the values passed do not guarantee the algorithm + or correct shape array like objects. Warnings may be given in the case the scalar values passed do not guarantee the algorithm convergence. """ From 9892c894ecb2d6de4e3dbb76a2c3d658b3324c91 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 19 Nov 2021 13:29:48 +0000 Subject: [PATCH 88/88] bugfix gamma setters --- Wrappers/Python/cil/optimisation/algorithms/PDHG.py | 12 ++++++------ Wrappers/Python/test/test_algorithms.py | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py index c20d25dfbb..a434f5b3aa 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/cil/optimisation/algorithms/PDHG.py @@ -276,11 +276,11 @@ def set_gamma_g(self, value): Parameters ---------- - value : a positive number + value : a positive number or None ''' - if self.gamma_fconj is not None: + if self.gamma_fconj is not None and value is not None: raise ValueError("The adaptive update of the PDHG stepsizes in the case where both functions are strongly convex is not implemented at the moment." +\ - "Currently the strongly convex constant of the convex conjugate of the function f has been specified.") + "Currently the strongly convex constant of the convex conjugate of the function f has been specified as ", self.gamma_fconj) if isinstance (value, Number): if value <= 0: @@ -297,11 +297,11 @@ def set_gamma_fconj(self, value): Parameters ---------- - value : a positive number + value : a positive number or None ''' - if self.gamma_g is not None: + if self.gamma_g is not None and value is not None: raise ValueError("The adaptive update of the PDHG stepsizes in the case where both functions are strongly convex is not implemented at the moment." +\ - "Currently the strongly convex constant of the function g has been specified.") + "Currently the strongly convex constant of the function g has been specified as ", self.gamma_g) if isinstance (value, Number): if value <= 0: diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py index 44662d0d17..6f874e82f9 100755 --- a/Wrappers/Python/test/test_algorithms.py +++ b/Wrappers/Python/test/test_algorithms.py @@ -483,8 +483,8 @@ def test_PDHG_strongly_convex_gamma_g(self): self.assertAlmostEquals(pdhg.tau, tau * pdhg.theta) self.assertAlmostEquals(pdhg.sigma, sigma / pdhg.theta) pdhg.run(4, verbose=0) - self.assertAlmostEquals(pdhg.sigma, sigma) - self.assertAlmostEquals(pdhg.tau, tau) + self.assertNotEqual(pdhg.sigma, sigma) + self.assertNotEqual(pdhg.tau, tau) # check negative strongly convex constant with self.assertRaises(ValueError):