Skip to content

Commit

Permalink
Merge pull request #89 from sfarrens/step_size
Browse files Browse the repository at this point in the history
Added generic step_size parameter to algorithm SetUp
  • Loading branch information
sfarrens authored Feb 11, 2020
2 parents 6f14135 + 62eb615 commit 7d0f82d
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 6 deletions.
4 changes: 4 additions & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ branches:
only:
- master

# update pre-installed packages
before_install:
- pip list --outdated --format=freeze | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 pip install -U

# install package and dependencies
install:
- pip install coverage nose pytest pytest-cov
Expand Down
45 changes: 39 additions & 6 deletions modopt/opt/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,17 +58,33 @@ class SetUp(Observable):
"""Algorithm Set-Up
This class contains methods for checking the set-up of an optimisation
algotithm and produces warnings if they do not comply
algotithm and produces warnings if they do not comply.
Parameters
----------
metric_call_period : int, optional
Metric call period (default is ``5``)
metrics : dict, optional
Metrics to be used (default is ``{}``)
verbose : bool, optional
Option for verbose output (default is ``False``)
progress : bool, optional
Option to display progress bar (default is ``True``)
step_size : int, optional
Generic step size parameter to override default algorithm
parameter name (`e.g.` `step_size` will override the value set for
`beta_param` in `ForwardBackward`)
"""

def __init__(self, metric_call_period=5, metrics={}, verbose=False,
progress=True, **dummy_kwargs):
progress=True, step_size=None, **dummy_kwargs):

self.converge = False
self.verbose = verbose
self.progress = progress
self.metrics = metrics
self.step_size = step_size
self._op_parents = ('GradParent', 'ProximityParent', 'LinearParent',
'costObj')

Expand Down Expand Up @@ -494,6 +510,10 @@ class ForwardBackward(SetUp):
Option to automatically begin iterations upon initialisation (default
is 'True')
Notes
-----
The `beta_param` can also be set using the keyword `step_size`.
"""

def __init__(self, x, grad, prox, cost='auto', beta_param=1.0,
Expand Down Expand Up @@ -532,7 +552,7 @@ def __init__(self, x, grad, prox, cost='auto', beta_param=1.0,

# Set the algorithm parameters
(self._check_param(param) for param in (beta_param, lambda_param))
self._beta = beta_param
self._beta = self.step_size or beta_param
self._lambda = lambda_param

# Set the algorithm parameter update methods
Expand Down Expand Up @@ -678,6 +698,10 @@ class GenForwardBackward(SetUp):
Option to automatically begin iterations upon initialisation (default
is 'True')
Notes
-----
The `gamma_param` can also be set using the keyword `step_size`.
"""

def __init__(self, x, grad, prox_list, cost='auto', gamma_param=1.0,
Expand Down Expand Up @@ -716,7 +740,7 @@ def __init__(self, x, grad, prox_list, cost='auto', gamma_param=1.0,

# Set the algorithm parameters
(self._check_param(param) for param in (gamma_param, lambda_param))
self._gamma = gamma_param
self._gamma = self.step_size or gamma_param
self._lambda_param = lambda_param

# Set the algorithm parameter update methods
Expand Down Expand Up @@ -914,6 +938,10 @@ class Condat(SetUp):
n_rewightings : int, optional
Number of reweightings to perform (default is 1)
Notes
-----
The `tau_param` can also be set using the keyword `step_size`.
"""

def __init__(self, x, y, grad, prox, prox_dual, linear=None, cost='auto',
Expand Down Expand Up @@ -952,7 +980,7 @@ def __init__(self, x, y, grad, prox, prox_dual, linear=None, cost='auto',
(self._check_param(param) for param in (rho, sigma, tau))
self._rho = rho
self._sigma = sigma
self._tau = tau
self._tau = self.step_size or tau

# Set the algorithm parameter update methods
(self._check_param_update(param_update) for param_update in
Expand Down Expand Up @@ -1110,6 +1138,11 @@ class POGM(SetUp):
auto_iterate : bool, optional
Option to automatically begin iterations upon initialisation (default
is 'True')
Notes
-----
The `beta_param` can also be set using the keyword `step_size`.
"""
def __init__(self, u, x, y, z, grad, prox, cost='auto', linear=None,
beta_param=1.0, sigma_bar=1.0, auto_iterate=True,
Expand Down Expand Up @@ -1142,7 +1175,7 @@ def __init__(self, u, x, y, z, grad, prox, cost='auto', linear=None,
(self._check_param(param) for param in (beta_param, sigma_bar))
if not (0 <= sigma_bar <= 1):
raise ValueError('The sigma bar parameter needs to be in [0, 1]')
self._beta = beta_param
self._beta = self.step_size or beta_param
self._sigma_bar = sigma_bar
self._xi = self._sigma = self._t_old = 1.0
self._grad.get_grad(self._x_old)
Expand Down
12 changes: 12 additions & 0 deletions modopt/tests/test_opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,12 @@ def setUp(self):
prox_list=[prox_inst,
prox_dual_inst],
cost=cost_inst)
self.gfb3 = algorithms.GenForwardBackward(self.data1,
grad=grad_inst,
prox_list=[prox_inst,
prox_dual_inst],
cost=cost_inst,
step_size=2)
self.condat1 = algorithms.Condat(self.data1, self.data2,
grad=grad_inst,
prox=prox_inst,
Expand Down Expand Up @@ -165,6 +171,12 @@ def test_gen_forward_backward(self):
npt.assert_array_equal(self.gfb2.x_final, self.data1,
err_msg='Incorrect GenForwardBackward result.')

npt.assert_array_equal(self.gfb3.x_final, self.data1,
err_msg='Incorrect GenForwardBackward result.')

npt.assert_equal(self.gfb3.step_size, 2,
err_msg='Incorrect step size.')

npt.assert_raises(TypeError, algorithms.GenForwardBackward,
self.data1, self.dummy, [self.dummy], weights=1)

Expand Down

0 comments on commit 7d0f82d

Please sign in to comment.