Skip to content

Commit

Permalink
Merge pull request #42 from sfarrens/new_release
Browse files Browse the repository at this point in the history
Added pycodestyle tests and updated release
  • Loading branch information
sfarrens authored Mar 27, 2019
2 parents 45fc19d + 7793ebe commit 6000877
Show file tree
Hide file tree
Showing 11 changed files with 147 additions and 162 deletions.
4 changes: 2 additions & 2 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ ModOpt

:Author: Samuel Farrens `(samuel.farrens@cea.fr) <samuel.farrens@cea.fr>`_

:Version: 1.2.0
:Version: 1.3.0

:Date: 21/11/2018
:Date: 27/03/2019

:Documentation: |link-to-docs|

Expand Down
4 changes: 2 additions & 2 deletions docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@ ModOpt Documentation

:Author: Samuel Farrens <samuel.farrens@cea.fr>

:Version: 1.2.0
:Version: 1.3.0

:Date: 21/11/2018
:Date: 27/03/2019

ModOpt is a series of Modular Optimisation tools for solving inverse problems.

Expand Down
4 changes: 2 additions & 2 deletions modopt/info.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@
:Author: Samuel Farrens <samuel.farrens@cea.fr>
:Version: 1.2.0
:Version: 1.3.0
"""

# Package Version
version_info = (1, 2, 0)
version_info = (1, 3, 0)
__version__ = '.'.join(str(c) for c in version_info)

__about__ = ('ModOpt \n\n '
Expand Down
129 changes: 56 additions & 73 deletions modopt/opt/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
This module contains class implementations of various optimisation algoritms.
:Author: Samuel Farrens <samuel.farrens@cea.fr>, Zaccharie Ramzi <zaccharie.ramzi@cea.fr>
:Author: Samuel Farrens <samuel.farrens@cea.fr>,
Zaccharie Ramzi <zaccharie.ramzi@cea.fr>
NOTES
-----
Expand Down Expand Up @@ -260,58 +261,43 @@ class FISTA(object):
None, # no restarting
]

def __init__(
self,
restart_strategy=None,
min_beta=None,
s_greedy=None,
xi_restart=None,
a_cd=None,
p_lazy=1,
q_lazy=1,
r_lazy=4,
):
def __init__(self, restart_strategy=None, min_beta=None, s_greedy=None,
xi_restart=None, a_cd=None, p_lazy=1, q_lazy=1, r_lazy=4):

if isinstance(a_cd, type(None)):
self.mode = 'regular'
self.p_lazy = p_lazy
self.q_lazy = q_lazy
self.r_lazy = r_lazy

elif a_cd > 2:
self.mode = 'CD'
self.a_cd = a_cd
self._n = 0

else:
raise ValueError(
"a_cd must either be None (for regular mode) or a number > 2",
)
raise ValueError('a_cd must either be None (for regular mode) or '
'a number > 2')

if restart_strategy in self.__class__.__restarting_strategies__:
self._check_restart_params(
restart_strategy,
min_beta,
s_greedy,
xi_restart,
)
self._check_restart_params(restart_strategy, min_beta, s_greedy,
xi_restart)
self.restart_strategy = restart_strategy
self.min_beta = min_beta
self.s_greedy = s_greedy
self.xi_restart = xi_restart

else:
raise ValueError(
"Restarting strategy must be one of %s." %
", ".join(self.__class__.__restarting_strategies__)
)
raise ValueError('Restarting strategy must be one of {}.'.format(
', '.join(
self.__class__.__restarting_strategies__)))
self._t_now = 1.0
self._t_prev = 1.0
self._delta_0 = None
self._safeguard = False

def _check_restart_params(
self,
restart_strategy,
min_beta,
s_greedy,
xi_restart,
):
def _check_restart_params(self, restart_strategy, min_beta, s_greedy,
xi_restart):
r""" Check restarting parameters
This method checks that the restarting parameters are set and satisfy
Expand Down Expand Up @@ -346,23 +332,24 @@ def _check_restart_params(
When a parameter that should be set isn't or doesn't verify the
correct assumptions.
"""

if restart_strategy is None:
return True

if self.mode != 'regular':
raise ValueError(
"Restarting strategies can only be used with regular mode."
)
greedy_params_check = (
min_beta is None or s_greedy is None or s_greedy <= 1
)
raise ValueError('Restarting strategies can only be used with '
'regular mode.')

greedy_params_check = (min_beta is None or s_greedy is None or
s_greedy <= 1)

if restart_strategy == 'greedy' and greedy_params_check:
raise ValueError(
"You need a min_beta and an s_greedy > 1 for greedy restart."
)
raise ValueError('You need a min_beta and an s_greedy > 1 for '
'greedy restart.')

if xi_restart is None or xi_restart >= 1:
raise ValueError(
"You need a xi_restart < 1 for restart."
)
raise ValueError('You need a xi_restart < 1 for restart.')

return True

def is_restart(self, z_old, x_new, x_old):
Expand Down Expand Up @@ -393,18 +380,22 @@ def is_restart(self, z_old, x_new, x_old):
"""
if self.restart_strategy is None:
return False

criterion = np.vdot(z_old - x_new, x_new - x_old) >= 0

if criterion:
if 'adaptive' in self.restart_strategy:
self.r_lazy *= self.xi_restart
if self.restart_strategy in ['adaptive-ii', 'adaptive-2']:
self._t_now = 1

if self.restart_strategy == 'greedy':
cur_delta = np.linalg.norm(x_new - x_old)
if self._delta_0 is None:
self._delta_0 = self.s_greedy * cur_delta
else:
self._safeguard = cur_delta >= self._delta_0

return criterion

def update_beta(self, beta):
Expand All @@ -422,9 +413,11 @@ def update_beta(self, beta):
-------
float: the new value for the beta parameter
"""

if self._safeguard:
beta *= self.xi_restart
beta = max(beta, self.min_beta)

return beta

def update_lambda(self, *args, **kwargs):
Expand All @@ -441,12 +434,17 @@ def update_lambda(self, *args, **kwargs):
Implements steps 3 and 4 from algoritm 10.7 in [B2011]_
"""

if self.restart_strategy == 'greedy':
return 2

# Steps 3 and 4 from alg.10.7.
self._t_prev = self._t_now

if self.mode == 'regular':
self._t_now = (self.p_lazy + np.sqrt(self.r_lazy * self._t_prev ** 2 + self.q_lazy)) * 0.5
self._t_now = (self.p_lazy + np.sqrt(self.r_lazy *
self._t_prev ** 2 + self.q_lazy)) * 0.5

elif self.mode == 'CD':
self._t_now = (self._n + self.a_cd - 1) / self.a_cd
self._n += 1
Expand Down Expand Up @@ -538,7 +536,7 @@ def __init__(self, x, grad, prox, cost='auto', beta_param=1.0,
else:
self._check_param_update(lambda_update)
self._lambda_update = lambda_update
self._is_restart = lambda *args, **kwargs:False
self._is_restart = lambda *args, **kwargs: False

# Automatically run the algorithm
if auto_iterate:
Expand Down Expand Up @@ -688,8 +686,8 @@ def __init__(self, x, grad, prox_list, cost='auto', gamma_param=1.0,
self._x_old = np.copy(x)

# Set the algorithm operators
(self._check_operator(operator) for operator in [grad, cost]
+ prox_list)
(self._check_operator(operator) for operator in [grad, cost] +
prox_list)
self._grad = grad
self._prox_list = np.array(prox_list)
self._linear = linear
Expand Down Expand Up @@ -910,7 +908,7 @@ class Condat(SetUp):
"""

def __init__(self, x, y, grad, prox, prox_dual, linear=None, cost='auto',
reweight=None, rho=0.5, sigma=1.0, tau=1.0, rho_update=None,
reweight=None, rho=0.5, sigma=1.0, tau=1.0, rho_update=None,
sigma_update=None, tau_update=None, auto_iterate=True,
max_iter=150, n_rewightings=1, metric_call_period=5,
metrics={}):
Expand Down Expand Up @@ -1070,6 +1068,7 @@ def retrieve_outputs(self):
metrics[obs.name] = obs.retrieve_metrics()
self.metrics = metrics


class POGM(SetUp):
r"""Proximal Optimised Gradient Method
Expand Down Expand Up @@ -1103,28 +1102,13 @@ class POGM(SetUp):
Option to automatically begin iterations upon initialisation (default
is 'True')
"""
def __init__(
self,
u,
x,
y,
z,
grad,
prox,
cost='auto',
linear=None,
beta_param=1.0,
sigma_bar=1.0,
auto_iterate=True,
metric_call_period=5,
metrics={},
):
def __init__(self, u, x, y, z, grad, prox, cost='auto', linear=None,
beta_param=1.0, sigma_bar=1.0, auto_iterate=True,
metric_call_period=5, metrics={}):

# Set default algorithm properties
super(POGM, self).__init__(
metric_call_period=metric_call_period,
metrics=metrics,
linear=linear,
)
super(POGM, self).__init__(metric_call_period=metric_call_period,
metrics=metrics, linear=linear)

# set the initial variable values
(self._check_input_data(data) for data in (u, x, y, z))
Expand All @@ -1145,7 +1129,7 @@ def __init__(

# Set the algorithm parameters
(self._check_param(param) for param in (beta_param, sigma_bar))
if not (0 <= sigma_bar <=1):
if not (0 <= sigma_bar <= 1):
raise ValueError('The sigma bar parameter needs to be in [0, 1]')
self._beta = beta_param
self._sigma_bar = sigma_bar
Expand All @@ -1169,7 +1153,7 @@ def _update(self):
"""
# Step 4 from alg. 3
self._grad.get_grad(self._x_old)
self._u_new = self._x_old - self._beta * self._grad.grad
self._u_new = self._x_old - self._beta * self._grad.grad

# Step 5 from alg. 3
self._t_new = 0.5 * (1 + np.sqrt(1 + 4 * self._t_old**2))
Expand Down Expand Up @@ -1218,7 +1202,6 @@ def _update(self):
self.converge = self.any_convergence_flag() or \
self._cost_func.get_cost(self._x_new)


def iterate(self, max_iter=150):
r"""Iterate
Expand Down
2 changes: 1 addition & 1 deletion modopt/opt/gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def cost(self, method):
self._cost = check_callable(method)

def trans_op_op(self, data):
"""Transpose Operation of the Operator
r"""Transpose Operation of the Operator
This method calculates the action of the transpose operator on
the action of the operator on the data
Expand Down
1 change: 0 additions & 1 deletion modopt/opt/proximity.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,6 @@ def __init__(self, linear_op, prox_op):
self.op = self._op_method
self.cost = self._cost_method


def _op_method(self, data, extra_factor=1.0):
r"""Operator method
Expand Down
2 changes: 1 addition & 1 deletion modopt/tests/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def test_rotate_stack(self):
npt.assert_array_equal(np_adjust.rotate_stack(self.data2),
np.array([[[8, 7, 6], [5, 4, 3], [2, 1, 0]],
[[17, 16, 15], [14, 13, 12],
[11, 10, 9]]]),
[11, 10, 9]]]),
err_msg='Incorrect stack rotation')

def test_pad2d(self):
Expand Down
1 change: 1 addition & 0 deletions modopt/tests/test_opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,7 @@ def test_pogm(self):
err_msg='Incorrect POGM result.',
)


class CostTestCase(TestCase):

def setUp(self):
Expand Down
Loading

0 comments on commit 6000877

Please sign in to comment.