Skip to content

Commit

Permalink
Merge pull request #137 from mrava87/doc-genproxgrad
Browse files Browse the repository at this point in the history
doc: improved consistency in code and doc in GeneralizedProximalGradient
  • Loading branch information
mrava87 authored Sep 17, 2023
2 parents 488dcbd + 8e3bcbb commit c894e85
Show file tree
Hide file tree
Showing 3 changed files with 67 additions and 15 deletions.
29 changes: 15 additions & 14 deletions pyproximal/optimization/primal.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None,
.. math::
\mathbf{x} = \argmin_\mathbf{x} \sum_{i=1}^n f_i(\mathbf{x})
+ \sum_{j=1}^m \tau_j g_j(\mathbf{x}),~~n,m \in \mathbb{N}^+
+ \sum_{j=1}^m \epsilon_j g_j(\mathbf{x}),~~n,m \in \mathbb{N}^+
where the :math:`f_i(\mathbf{x})` are smooth convex functions with a uniquely
defined gradient and the :math:`g_j(\mathbf{x})` are any convex function that
Expand All @@ -329,7 +329,7 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None,
backtracking is used to adaptively estimate the best tau at each
iteration.
epsg : :obj:`float` or :obj:`np.ndarray`, optional
Scaling factor of g function
Scaling factor(s) of ``g`` function(s)
niter : :obj:`int`, optional
Number of iterations of iterative scheme
acceleration: :obj:`str`, optional
Expand All @@ -352,11 +352,12 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None,
.. math::
\text{for } j=1,\cdots,n, \\
~~~~\mathbf z_j^{k+1} = \mathbf z_j^{k} + \eta_k (prox_{\frac{\tau^k}{\omega_j} g_j}(2 \mathbf{x}^{k} - z_j^{k})
- \tau^k \sum_{i=1}^n \nabla f_i(\mathbf{x}^{k})) - \mathbf{x}^{k} \\
~~~~\mathbf z_j^{k+1} = \mathbf z_j^{k} + \epsilon_j
\left[prox_{\frac{\tau^k}{\omega_j} g_j}\left(2 \mathbf{x}^{k} - \mathbf{z}_j^{k}
- \tau^k \sum_{i=1}^n \nabla f_i(\mathbf{x}^{k})\right) - \mathbf{x}^{k} \right] \\
\mathbf{x}^{k+1} = \sum_{j=1}^n \omega_j f_j \\
where :math:`\sum_{j=1}^n \omega_j=1`.
where :math:`\sum_{j=1}^n \omega_j=1`. In the current implementation :math:`\omega_j=1/n`.
"""
# check if epgs is a vector
if np.asarray(epsg).size == 1.:
Expand Down Expand Up @@ -393,23 +394,23 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None,
for iiter in range(niter):
xold = x.copy()

# proximal step
# gradient
grad = np.zeros_like(x)
for i, proxf in enumerate(proxfs):
grad += proxf.grad(x)

sol = np.zeros_like(x)
# proximal step
x = np.zeros_like(x)
for i, proxg in enumerate(proxgs):
tmp = 2 * y - zs[i] - tau * grad
tmp[:] = proxg.prox(tmp, tau *len(proxgs) )
zs[i] += epsg * (tmp - y)
sol += zs[i] / len(proxgs)
x[:] = sol.copy()
ztmp = 2 * y - zs[i] - tau * grad
ztmp = proxg.prox(ztmp, tau * len(proxgs))
zs[i] += epsg * (ztmp - y)
x += zs[i] / len(proxgs)

# update y
if acceleration == 'vandenberghe':
omega = iiter / (iiter + 3)
elif acceleration== 'fista':
elif acceleration == 'fista':
told = t
t = (1. + np.sqrt(1. + 4. * t ** 2)) / 2.
omega = ((told - 1.) / t)
Expand Down Expand Up @@ -781,7 +782,7 @@ def ADMML2(proxg, Op, b, A, x0, tau, niter=10, callback=None, show=False, **kwar

if show:
if iiter < 10 or niter - iiter < 10 or iiter % (niter // 10) == 0:
pf, pg = np.linalg.norm(Op @ x - b), proxg(Ax)
pf, pg = 0.5 * np.linalg.norm(Op @ x - b) ** 2, proxg(Ax)
msg = '%6g %12.5e %10.3e %10.3e %10.3e' % \
(iiter + 1, x[0], pf, pg, pf + pg)
print(msg)
Expand Down
10 changes: 10 additions & 0 deletions pyproximal/proximal/VStack.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,16 @@ def __init__(self, ops, nn=None, restr=None):
self.xin = cum_nn[:-1]
self.xin = np.insert(self.xin, 0, 0)
self.xend = cum_nn
# store required size of input
self.nx = cum_nn[-1]
else:
self.restr = restr
# store required size of input
self.nx = np.sum([restr.iava.size for restr in self.restr])

def __call__(self, x):
if x.size != self.nx:
raise ValueError(f'x must have size {self.nx}, instead the provided x has size {x.size}')
f = 0.
if hasattr(self, 'nn'):
for iop, op in enumerate(self.ops):
Expand All @@ -66,6 +72,8 @@ def __call__(self, x):

@_check_tau
def prox(self, x, tau):
if x.size != self.nx:
raise ValueError(f'x must have size {self.nx}, instead the provided x has size {x.size}')
if hasattr(self, 'nn'):
f = np.hstack([op.prox(x[self.xin[iop]:self.xend[iop]], tau)
for iop, op in enumerate(self.ops)])
Expand All @@ -76,6 +84,8 @@ def prox(self, x, tau):
return f

def grad(self, x):
if x.size != self.nx:
raise ValueError(f'x must have size {self.nx}, instead the provided x has size {x.size}')
if hasattr(self, 'nn'):
f = np.hstack([op.grad(x[self.xin[iop]:self.xend[iop]])
for iop, op in enumerate(self.ops)])
Expand Down
43 changes: 42 additions & 1 deletion pytests/test_proximal.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pylops import MatrixMult, Identity
from pylops import Identity, MatrixMult, Restriction

import pyproximal
from pyproximal.utils import moreau
Expand Down Expand Up @@ -84,6 +84,20 @@ def test_Orthogonal(par):
assert moreau(orth, x, tau)


@pytest.mark.parametrize("par", [(par1), (par2)])
def test_VStack_error(par):
"""VStack operator error when input has wrong dimensions
"""
np.random.seed(10)
nxs = [par['nx'] // 4] * 4
nxs[-1] = par['nx'] - np.sum(nxs[:-1])
l2 = L2()
vstack = VStack([l2] * 4, nxs)

with pytest.raises(ValueError):
vstack.prox(np.ones(nxs[0]), 2)


@pytest.mark.parametrize("par", [(par1), (par2)])
def test_VStack(par):
"""L2 functional with VStack operator of multiple L1s
Expand All @@ -109,6 +123,33 @@ def test_VStack(par):
assert moreau(vstack, x, tau)


@pytest.mark.parametrize("par", [(par1), ])
def test_VStack_restriction(par):
"""L2 functional with VStack operator of multiple L1s using restriction
"""
np.random.seed(10)
nxs = [par['nx'] // 2] * 2
nxs[-1] = par['nx'] - np.sum(nxs[:-1])
l2 = L2()
vstack = VStack([l2] * 2,
restr=[Restriction(par['nx'], np.arange(par['nx'] // 2)),
Restriction(par['nx'], par['nx'] // 2 + np.arange(par['nx'] // 2))])

# functional
x = np.random.normal(0., 1., par['nx']).astype(par['dtype'])
assert_array_almost_equal(l2(x), vstack(x), decimal=4)

# gradient
assert_array_almost_equal(l2.grad(x), vstack.grad(x), decimal=4)

# prox / dualprox
tau = 2.
assert_array_equal(l2.prox(x, tau), vstack.prox(x, tau))

# moreau
assert moreau(vstack, x, tau)


def test_Nonlinear():
"""Nonlinear proximal operator. Since this is a template class simply check
that errors are raised when not used properly
Expand Down

0 comments on commit c894e85

Please sign in to comment.