Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into binner_padder_patch
Browse files Browse the repository at this point in the history
  • Loading branch information
gfardell committed Aug 23, 2023
2 parents 4b28ca5 + b0503d1 commit 2f49aa7
Show file tree
Hide file tree
Showing 14 changed files with 455 additions and 137 deletions.
10 changes: 10 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,13 @@

* x.x.x
- Fix bug in IndicatorBox proximal_conjugate
- Allow CCPi Regulariser functions for non CIL object
- Add norm for CompositionOperator
- Refactor SIRT algorithm to make it more computationally and memory efficient
- Optimisation in L2NormSquared
- Fix for show_geometry bug for 2D data
- FBP split processing bug fix - now respects panel origin

* 23.0.1
- Fix bug with NikonReader requiring ROI to be set in constructor.

Expand Down
1 change: 1 addition & 0 deletions NOTICE.txt
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ Richard Brown (2020-2021) - 5
Sam Tygier (2022) - 1
Andrew Sharits (2022) - 8
Kyle Pidgeon (2023) - 1
Letizia Protopapa (2023) - 1

CIL Advisory Board:
Llion Evans - 9
Expand Down
133 changes: 87 additions & 46 deletions Wrappers/Python/cil/optimisation/algorithms/SIRT.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@

from cil.optimisation.algorithms import Algorithm
from cil.optimisation.functions import IndicatorBox
from cil.framework import BlockDataContainer
from numpy import inf
import numpy
import warnings
import logging

class SIRT(Algorithm):
Expand All @@ -35,36 +35,38 @@ class SIRT(Algorithm):
The SIRT algorithm is
.. math:: x^{k+1} = \mathrm{proj}_{C}( x^{k} + D ( A^{T} ( M * (b - Ax) ) ) ),
.. math:: x^{k+1} = \mathrm{proj}_{C}( x^{k} + \omega * D ( A^{T} ( M * (b - Ax) ) ) ),
where :math:`M = \frac{1}{A*\mathbb{1}}`, :math:`D = \frac{1}{A^{T}\mathbb{1}}`, :math:`\mathbb{1}` is a :code:`DataContainer` of ones
and :math:`\mathrm{prox}_{C}` is the projection over a set :math:`C`.
where,
:math:`M = \frac{1}{A*\mathbb{1}}`,
:math:`D = \frac{1}{A^{T}\mathbb{1}}`,
:math:`\mathbb{1}` is a :code:`DataContainer` of ones,
:math:`\mathrm{prox}_{C}` is the projection over a set :math:`C`,
and :math:`\omega` is the relaxation parameter.
Parameters
----------
initial : DataContainer, default = None
Starting point of the algorithm, default value = Zero DataContainer
Starting point of the algorithm, default value = Zero DataContainer
operator : LinearOperator
The operator A.
The operator A.
data : DataContainer
The data b.
The data b.
lower : :obj:`float`, default = None
Lower bound constraint, default value = :code:`-inf`.
Lower bound constraint
upper : :obj:`float`, default = None
Upper bound constraint, default value = :code:`-inf`.
Upper bound constraint
constraint : Function, default = None
A function with :code:`proximal` method, e.g., :class:`.IndicatorBox` function and :meth:`.IndicatorBox.proximal`,
or :class:`.TotalVariation` function and :meth:`.TotalVariation.proximal`.
A function with :code:`proximal` method, e.g., :class:`.IndicatorBox` function and :meth:`.IndicatorBox.proximal`,
or :class:`.TotalVariation` function and :meth:`.TotalVariation.proximal`.
kwargs:
Keyword arguments used from the base class :class:`.Algorithm`.
Note
----
If :code:`constraint` is not passed, then :code:`lower` and :code:`upper` are looked at and an :class:`.IndicatorBox`
function is created.
If :code:`constraint` is not passed, :code:`lower` and :code:`upper` are used to create an :class:`.IndicatorBox` and apply its :code:`proximal`.
If :code:`constraint` is passed, :code:`proximal` method is required to be implemented.
Expand All @@ -77,9 +79,6 @@ class SIRT(Algorithm):
.. math:: D = \frac{1}{A*\mathbb{1}} = \frac{1}{\sum_{i}a_{i,j}}
In case of division errors above, :meth:`.fix_weights` can be used, where :code:`np.nan`, :code:`+np.inf` and :code:`-np.inf` values
are replaced with 1.0.
Examples
--------
Expand All @@ -105,57 +104,104 @@ def set_up(self, initial, operator, data, lower=None, upper=None, constraint=Non
logging.info("{} setting up".format(self.__class__.__name__, ))

self.x = initial.copy()
self.tmp_x = self.x * 0.0
self.operator = operator
self.data = data

self.r = data.copy()

self.relax_par = 1.0

self.constraint = constraint
if constraint is None:
if lower is not None or upper is not None:
if lower is None:
lower=-inf
if upper is None:
upper=inf
# IndicatorBox accepts None for lower and/or upper
self.constraint=IndicatorBox(lower=lower,upper=upper)


self._relaxation_parameter = 1

# Set up scaling matrices D and M.
self.M = 1./self.operator.direct(self.operator.domain_geometry().allocate(value=1.0))
self.D = 1./self.operator.adjoint(self.operator.range_geometry().allocate(value=1.0))
self._set_up_weights()

self.configured = True
logging.info("{} configured".format(self.__class__.__name__, ))

def fix_weights(self):

r""" In case of division error when the preconditioning arrays :code:`M` and :code:`D`
are defined, the :code:`np.nan`, :code:`+np.inf` and :code:`-np.inf` values are replaced with one.
@property
def relaxation_parameter(self):
return self._relaxation_parameter

@property
def D(self):
return self._Dscaled / self._relaxation_parameter

def set_relaxation_parameter(self, value=1.0):
"""Set the relaxation parameter :math:`\omega`
Parameters
----------
value : float
The relaxation parameter to be applied to the update. Must be between 0 and 2 to guarantee asymptotic convergence.
"""
if value <= 0 or value >= 2:
raise ValueError("Expected relaxation parameter to be in range 0-2. Got {}".format(value))

self._relaxation_parameter = value
self._set_up_weights()
self._Dscaled *= self._relaxation_parameter


def _set_up_weights(self):
self.M = 1./self.operator.direct(self.operator.domain_geometry().allocate(value=1.0))
self._Dscaled = 1./self.operator.adjoint(self.operator.range_geometry().allocate(value=1.0))

# fix for possible +inf, -inf, nan values
for arr in [self.M, self.D]:

tmp = arr.as_array()
arr_replace = numpy.isfinite(tmp)
tmp[~arr_replace] = 1.0
arr.fill(tmp)
for arr in [self.M, self._Dscaled]:
self._remove_nan_or_inf(arr, replace_with=1.0)


def _remove_nan_or_inf(self, datacontainer, replace_with=1.0):
"""Replace nan and inf in datacontainer with a given value.
Parameters:
-------------
datacontainer: DataContainer, BlockDataContainer
replace_with: float, default 1.0
Value to replace elements that evaluate to NaN or inf
In case the input datacontainer is a :code:`BlockDataContainer` the substitution is executed for each container in the :code:`BlockDataContainer`.
"""
if isinstance(datacontainer, BlockDataContainer):
for block in datacontainer.containers:
self._remove_nan_or_inf(block, replace_with=replace_with)
return
tmp = datacontainer.as_array()
numpy.nan_to_num(tmp, copy=False, nan=replace_with, posinf=replace_with, neginf=replace_with)
datacontainer.fill(tmp)


def update(self):

r""" Performs a single iteration of the SIRT algorithm
.. math:: x^{k+1} = \mathrm{proj}_{C}( x^{k} + D ( A^{T} ( M * (b - Ax) ) ) )
.. math:: x^{k+1} = \mathrm{proj}_{C}( x^{k} + \omega * D ( A^{T} ( M * (b - Ax) ) ) )
"""

self.r = self.data - self.operator.direct(self.x)

self.x += self.relax_par * (self.D*self.operator.adjoint(self.M*self.r))
# self.r = self.data - self.operator.direct(self.x)
self.operator.direct(self.x, out=self.r)
self.r.sapyb(-1, self.data, 1.0, out=self.r)

# self.D is prescaled by _relaxation_parameter (default 1)
self.r *= self.M
self.operator.adjoint(self.r, out=self.tmp_x)
self.x.sapyb(1.0, self.tmp_x, self._Dscaled, out=self.x)


if self.constraint is not None:
self.x = self.constraint.proximal(self.x, tau=1)
# IndicatorBox allows inplace operation for proximal
self.constraint.proximal(self.x, tau=1, out=self.x)

def update_objective(self):
r"""Returns the objective
Expand All @@ -164,8 +210,3 @@ def update_objective(self):
"""
self.loss.append(self.r.squared_norm())





30 changes: 0 additions & 30 deletions Wrappers/Python/cil/optimisation/functions/IndicatorBox.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,36 +142,6 @@ def __call__(self, x):
return self.evaluate(x)
return 0.0

def proximal_conjugate(self, x, tau, out=None):
r'''Proximal operator of the convex conjugate of IndicatorBox at x:
.. math:: prox_{\tau * f^{*}}(x)
Parameters
----------
x : DataContainer
Input to the proximal operator
tau : float
Step size. Notice it is ignored in IndicatorBox, see ``proximal`` for details
out : DataContainer, optional
Output of the proximal operator. If not provided, a new DataContainer is created.
'''

# x - tau * self.proximal(x/tau, tau)
should_return = False

if out is None:
out = self.proximal(x, tau)
should_return = True
else:
self.proximal(x, tau, out=out)

out.sapyb(-1., x, 1., out=out)

if should_return:
return out

def proximal(self, x, tau, out=None):
r'''Proximal operator of IndicatorBox at x
Expand Down
47 changes: 17 additions & 30 deletions Wrappers/Python/cil/optimisation/functions/L2NormSquared.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,21 +88,15 @@ def gradient(self, x, out=None):
b) :math:`F'(x) = 2(x-b)`
"""

if out is not None:

out.fill(x)
if self.b is not None:
out -= self.b
out *= 2


if self.b is None:
ret = x.multiply(2, out=out)
else:

y = x
if self.b is not None:
y = x - self.b
return 2*y

ret = x.sapyb(2, self.b, -2, out=out)

if out is None:
return ret


def convex_conjugate(self, x):

Expand All @@ -119,7 +113,7 @@ def convex_conjugate(self, x):
if self.b is not None:
tmp = x.dot(self.b)

return (1./4.) * x.squared_norm() + tmp
return 0.25 * x.squared_norm() + tmp


def proximal(self, x, tau, out = None):
Expand All @@ -134,23 +128,16 @@ def proximal(self, x, tau, out = None):
"""

if out is None:

if self.b is None:
return x/(1+2*tau)
else:
tmp = x.subtract(self.b)
tmp /= (1+2*tau)
tmp += self.b
return tmp
mult = 1/(1+2*tau)

if self.b is None:
ret = x.multiply(mult, out=out)
else:
if self.b is not None:
x.subtract(self.b, out=out)
out /= (1+2*tau)
out += self.b
else:
x.divide((1+2*tau), out=out)
ret = x.sapyb(mult, self.b, (1-mult), out=out)

if out is None:
return ret



class WeightedL2NormSquared(Function):
Expand Down
9 changes: 7 additions & 2 deletions Wrappers/Python/cil/optimisation/operators/Operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -547,5 +547,10 @@ def is_linear(self):
return self.linear_flag




def calculate_norm(self):
'''Returns the norm of the CompositionOperator, that is the product of the norms
of its operators.'''
norm = 1.
for operator in self.operators:
norm *= operator.norm()
return norm
Loading

0 comments on commit 2f49aa7

Please sign in to comment.