Skip to content

Commit

Permalink
Adds WaveletOperator and L1Sparsity
Browse files Browse the repository at this point in the history
Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com>
Co-authored-by: Edoardo Pasca <edo@mandorla.home>
Co-authored-by: Tommi <heik.tommi@gmail.com>
Co-authored-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com>
  • Loading branch information
4 people authored Apr 26, 2024
1 parent bc86eab commit 2f92286
Show file tree
Hide file tree
Showing 13 changed files with 993 additions and 34 deletions.
6 changes: 5 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
*XX.X.X

* XX.X.X
- Added wavelet operator, wrapping PyWavelets operator as a CIL operator
- Added L1Sparsity function, allowing calculations of `|Ax-b|_1` and its proximal, in the case of orthogonal operators, A
- Bug fix in gradient descent `update_objective` called twice on the initial point.


* 24.0.0
- Update to new CCPi-Regularisation toolkit v24.0.0. This is a backward incompatible release of the toolkit.
- CIL plugin support for TIGRE version v2.6
Expand Down
77 changes: 53 additions & 24 deletions Wrappers/Python/cil/optimisation/functions/L1Norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,24 +19,52 @@
from cil.optimisation.functions import Function
from cil.framework import BlockDataContainer
import numpy as np
import warnings

def soft_shrinkage(x, tau, out=None):

r"""Returns the value of the soft-shrinkage operator at x.
r"""Returns the value of the soft-shrinkage operator at x. This is used for the calculation of the proximal.
.. math:: soft_shrinkage (x) = \begin{cases}
x-\tau, \mbox{if } x > \tau \
x+\tau, \mbox{if } x < -\tau \
0, \mbox{otherwise}
\end{cases}.
Parameters
-----------
x : DataContainer
where to evaluate the soft-shrinkage operator.
tau : float, numpy ndarray, DataContainer
tau : float, non-negative real, numpy ndarray, DataContainer
out : DataContainer, default None
where to store the result. If None, a new DataContainer is created.
Returns
--------
the value of the soft-shrinkage operator at x: DataContainer.
Note
------
Note that this function can deal with complex inputs, defining the `sgn` function as:
.. math:: sgn (z) = \begin{cases}
0, \mbox{if } z = 0 \
\frac{z}{|z|}, \mbox{otherwise}
\end{cases}.
"""
should_return = False
should_return = False

if np.min(tau) < 0:
warnings.warn(
"tau should be non-negative!", UserWarning)
if np.linalg.norm(np.imag(tau))>0:
raise ValueError("tau should be real!")



# get the sign of the input
dsign = np.exp(1j*np.angle(x.as_array())) if np.iscomplexobj(x) else x.sign()

Expand Down Expand Up @@ -71,18 +99,18 @@ class L1Norm(Function):
a) .. math:: F(x) = ||x||_{1}
b) .. math:: F(x) = ||x - b||_{1}
In the weighted case, :math:`w` is an array of positive weights.
In the weighted case, :math:`w` is an array of non-negative weights.
a) .. math:: F(x) = ||x||_{L^1(w)}
b) .. math:: F(x) = ||x - b||_{L^1(w)}
with :math:`||x||_{L^1(w)} = || x \cdot w||_1 = \sum_{i=1}^{n} |x_i| w_i`.
with :math:`||x||_{L^1(w)} = || x w||_1 = \sum_{i=1}^{n} |x_i| w_i`.
Parameters
-----------
weight: DataContainer, numpy ndarray, default None
Array of positive weights. If :code:`None` returns the L1 Norm.
Array of non-negative weights. If :code:`None` returns the L1 Norm.
b: DataContainer, default None
Translation of the function.
Expand Down Expand Up @@ -119,15 +147,15 @@ def convex_conjugate(self, x):
\end{cases}
In the weighted case the convex conjugate is the indicator of the unit
:math:`L^{\infty}` norm.
:math:`L^{\infty}(w^{-1})` norm.
See:
https://math.stackexchange.com/questions/1533217/convex-conjugate-of-l1-norm-function-with-weight
a) .. math:: F^{*}(x^{*}) = \mathbb{I}_{\{\|\cdot\|_{L^\infty(w^{-1})}\leq 1\}}(x^{*})
b) .. math:: F^{*}(x^{*}) = \mathbb{I}_{\{\|\cdot\|_{L^\infty(w^{-1})}\leq 1\}}(x^{*}) + \langle x^{*},b\rangle
with :math:`\|x\|_{L^\infty(w^{-1})} = \max_{i} \frac{|x_i|}{w_i}`.
with :math:`\|x\|_{L^\infty(w^{-1})} = \max_{i} \frac{|x_i|}{w_i}` and possible cases of 0/0 are defined to be 1..
Parameters
-----------
Expand All @@ -148,8 +176,8 @@ def proximal(self, x, tau, out=None):
Consider the following cases:
a) .. math:: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}(x)
b) .. math:: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}(x) + b
a) .. math:: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}_\tau(x)
b) .. math:: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}_\tau(x - b) + b
where,
Expand All @@ -159,13 +187,13 @@ def proximal(self, x, tau, out=None):
by Amir Beck, SIAM 2017 https://archive.siam.org/books/mo25/mo25_ch6.pdf
a) .. math:: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}_{\tau*w}(x)
b) .. math:: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}_{\tau*w}(x) + b
b) .. math:: \mathrm{prox}_{\tau F}(x) = \mathrm{ShinkOperator}_{\tau*w}(x - b) + b
Parameters
-----------
x: DataContainer
tau: float, ndarray, DataContainer
tau: float, real, ndarray, DataContainer
out: DataContainer, default None
If not None, the result will be stored in this object.
Expand All @@ -174,6 +202,7 @@ def proximal(self, x, tau, out=None):
The value of the proximal operator of the L1 norm function at x: DataContainer.
"""

return self.function.proximal(x, tau, out=out)


Expand Down Expand Up @@ -207,8 +236,7 @@ def __call__(self, x):
return y.abs().sum()

def convex_conjugate(self,x):
tmp = x.abs().max() - 1
if tmp<=1e-5:
if x.abs().max() - 1 <=0:
if self.b is not None:
return self.b.dot(x)
else:
Expand Down Expand Up @@ -238,31 +266,32 @@ def __init__(self, weight, b=None):
self.weight = weight
self.b = b

if np.min(weight) <= 0:
raise ValueError("Weights should be strictly positive!")
if np.min(weight) < 0:
raise ValueError("Weights should be non-negative!")

if np.linalg.norm(np.imag(weight))>0:
raise ValueError("Weights should be real!")

def __call__(self, x):
y = x*self.weight

if self.b is not None:
y -= self.b
y -= self.b*self.weight

return y.abs().sum()

def convex_conjugate(self,x):
tmp = (x.abs()/self.weight).max() - 1

if tmp<=1e-5:
if np.any(x.abs() > self.weight): # This handles weight being zero problems
return np.inf
else:
if self.b is not None:
return self.b.dot(x)
else:
return 0.
return np.inf


def proximal(self, x, tau, out=None):
tau *= self.weight
ret = _L1Norm.proximal(self, x, tau, out=out)
tau /= self.weight
ret = _L1Norm.proximal(self, x, tau*self.weight, out=out)
return ret

class MixedL11Norm(Function):
Expand Down
152 changes: 152 additions & 0 deletions Wrappers/Python/cil/optimisation/functions/L1Sparsity.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
# -*- coding: utf-8 -*-
# Copyright 2023 United Kingdom Research and Innovation
# Copyright 2023 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# CIL Developers, listed at: https://github.com/TomographicImaging/CIL/blob/master/NOTICE.txt

from cil.optimisation.functions import Function, L1Norm
import warnings

class L1Sparsity(Function):

r"""L1Sparsity function
Calculates the following cases, depending on if the optional parameter `weight` or data `b` is passed. For `weight=None`:
a) .. math:: F(x) = ||Qx||_{1}
b) .. math:: F(x) = ||Qx - b||_{1}
In the weighted case, `weight` = :math:`w` is an array of non-negative weights.
a) .. math:: F(x) = ||Qx||_{L^1(w)}
b) .. math:: F(x) = ||Qx - b||_{L^1(w)}
with :math:`||x||_{L^1(w)} = || x \cdot w||_1 = \sum_{i=1}^{n} |x_i| w_i`.
In all cases :math:`Q` is an orthogonal operator.
Parameters
---------
Q: orthogonal Operator
Note that for the correct calculation of the proximal the provided operator must be orthogonal
b : Data, DataContainer, default is None
weight: array, optional, default=None
non-negative weight array matching the size of the range of operator :math:`Q`.
"""

def __init__(self, Q, b=None, weight=None):
'''creator
'''

if not Q.is_orthogonal():
warnings.warn(
f"Invalid operator: `{Q}`. L1Sparsity is properly defined only for orthogonal operators!", UserWarning)

super(L1Sparsity, self).__init__()
self.Q = Q

self.l1norm = L1Norm(b=b, weight=weight)

def __call__(self, x):
r"""Returns the value of the L1Sparsity function at x.
Consider the following cases:
a) .. math:: F(x) = ||Qx||_{1}
b) .. math:: F(x) = ||Qx - b||_{1}
In the weighted case, `weight` = :math:`w` is an array of non-negative weights.
a) .. math:: F(x) = ||Qx||_{L^1(w)}
b) .. math:: F(x) = ||Qx - b||_{L^1(w)}
with :math:`|| y ||_{L^1(w)} = || y w ||_1 = \sum_{i=1}^{n} | y_i | w_i`.
"""
y = self.Q.direct(x)
return self.l1norm(y)

def convex_conjugate(self, x):
r"""Returns the value of the convex conjugate of the L1Sparsity function at x.
Here, we need to use the convex conjugate of L1Sparsity, which is the Indicator of the unit
:math:`\ell^{\infty}` norm on the range of the (bijective) operator Q.
Consider the non-weighted case:
a) .. math:: F^{*}(x^{*}) = \mathbb{I}_{\{\|\cdot\|_{\infty}\leq1\}}(Qx^{*})
b) .. math:: F^{*}(x^{*}) = \mathbb{I}_{\{\|\cdot\|_{\infty}\leq1\}}(Qx^{*}) + \langle Qx^{*},b\rangle
.. math:: \mathbb{I}_{\{\|\cdot\|_{\infty}\leq1\}}(x^{*})
= \begin{cases}
0, \mbox{if } \|x^{*}\|_{\infty}\leq1\\
\infty, \mbox{otherwise}
\end{cases}
In the weighted case the convex conjugate is the indicator of the unit
:math:`L^{\infty}( w^{-1} )` norm.
See:
https://math.stackexchange.com/questions/1533217/convex-conjugate-of-l1-norm-function-with-weight
a) .. math:: F^{*}(x^{*}) = \mathbb{I}_{\{\|\cdot\|_{L^\infty(w^{-1})}\leq 1\}}(Qx^{*})
b) .. math:: F^{*}(x^{*}) = \mathbb{I}_{\{\|\cdot\|_{L^\infty(w^{-1})}\leq 1\}}(Qx^{*}) + \langle Qx^{*},b\rangle
with :math:`\|x\|_{L^\infty(w^{-1})} = \max_{i} \frac{|x_i|}{w_i}` and possible cases of 0 / 0 are defined to be 1.
"""
y = self.Q.direct(x)
return self.l1norm.convex_conjugate(y)

def proximal(self, x, tau, out=None):

r"""Returns the value of the proximal operator of the L1 Norm function at x with scaling parameter `tau`.
Consider the following cases:
a) .. math:: \mathrm{prox}_{\tau F}(x) = Q^T \mathrm{ShinkOperator}_{\tau}(Qx)
b) .. math:: \mathrm{prox}_{\tau F}(x) = Q^T \left( \mathrm{ShinkOperator}_\tau(Qx- b) + b \right)
where,
.. math :: \mathrm{prox}_{\tau | \cdot |}(x) = \mathrm{ShinkOperator}(x) = sgn(x) * \max\{ |x| - \tau, 0 \}
The weighted case follows from Example 6.23 in Chapter 6 of "First-Order Methods in Optimization"
by Amir Beck, SIAM 2017 https://archive.siam.org/books/mo25/mo25_ch6.pdf
a) .. math:: \mathrm{prox}_{\tau F}(x) = Q^T \mathrm{ShinkOperator}_{\tau*w}(Qx)
b) .. math:: \mathrm{prox}_{\tau F}(x) = Q^T \left( \mathrm{ShinkOperator}_{\tau*w}(Qx-b) + b \right)
Parameters
-----------
x: DataContainer
tau: float, ndarray, DataContainer
out: DataContainer, default None
If not None, the result will be stored in this object.
Returns
--------
The value of the proximal operator of the L1 norm function at x: DataContainer.
"""
y = self.Q.direct(x)
self.l1norm.proximal(y, tau, out=y)
return self.Q.adjoint(y, out)
1 change: 1 addition & 0 deletions Wrappers/Python/cil/optimisation/functions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from .KullbackLeibler import KullbackLeibler
from .Rosenbrock import Rosenbrock
from .TotalVariation import TotalVariation
from .L1Sparsity import L1Sparsity
from .ApproximateGradientSumFunction import ApproximateGradientSumFunction
from .SGFunction import SGFunction

Original file line number Diff line number Diff line change
Expand Up @@ -80,3 +80,11 @@ def sum_abs_row(self):
def sum_abs_col(self):

return self.gm_domain.allocate(1)

def is_orthogonal(self):
'''Returns if the operator is orthogonal
Returns
-------
`Bool`
'''
return True
9 changes: 9 additions & 0 deletions Wrappers/Python/cil/optimisation/operators/Operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,15 @@ def is_linear(self):
`Bool`
'''
return False

def is_orthogonal(self):
'''Returns if the operator is orthogonal
Returns
-------
`Bool`
'''
return False


def direct(self, x, out=None):
r"""Calls the operator
Expand Down
Loading

0 comments on commit 2f92286

Please sign in to comment.