From 0ba2f8e2935db66ec3dfe8c88e407e8d0eb5609f Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 8 Sep 2023 08:10:17 +0100 Subject: [PATCH 1/4] bugfix in test_SIRF.py (#1502) Removed import line which is in a try/except loop --- Wrappers/Python/test/test_SIRF.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Wrappers/Python/test/test_SIRF.py b/Wrappers/Python/test/test_SIRF.py index 9d8af9bb3e..5013ccf0f2 100644 --- a/Wrappers/Python/test/test_SIRF.py +++ b/Wrappers/Python/test/test_SIRF.py @@ -29,7 +29,6 @@ from cil.optimisation.algorithms import FISTA import os -from cil.plugins.ccpi_regularisation.functions import FGP_TV, TGV, TNV, FGP_dTV from cil.utilities.display import show2D from testclass import CCPiTestClass @@ -486,4 +485,4 @@ def setUp(self): recon.set_input(preprocessed_data) recon.process() self.image1 = recon.get_output() - self.image2 = self.image1 * 0.5 \ No newline at end of file + self.image2 = self.image1 * 0.5 From a3c5823bf59d3c2ff5251b73ac3fdbe58b6a305d Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Mon, 18 Sep 2023 16:55:09 +0100 Subject: [PATCH 2/4] Tv warmstart (#1493) *Added warm start functionality to the CIL total variation function --------- Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Signed-off-by: Edoardo Pasca Signed-off-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Co-authored-by: Claire Delplanke Co-authored-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Co-authored-by: Edoardo Pasca --- CHANGELOG.md | 2 + NOTICE.txt | 1 + .../optimisation/functions/TotalVariation.py | 219 ++++++++++-------- Wrappers/Python/test/test_functions.py | 150 +++++++++--- 4 files changed, 242 insertions(+), 130 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f694bc1ac3..6331754000 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,8 +6,10 @@ - Refactor SIRT algorithm to make it more computationally and memory efficient - Optimisation in L2NormSquared - Fix for show_geometry bug for 2D data + - Added warmstart capability to proximal evaluation of the CIL TotalVariation function. - FBP split processing bug fix - now respects panel origin + * 23.0.1 - Fix bug with NikonReader requiring ROI to be set in constructor. diff --git a/NOTICE.txt b/NOTICE.txt index a1d08b0d52..6d8bb29c62 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -40,6 +40,7 @@ Gemma Fardell (2019 - present) - 1 Kris Thielemans (2020 - present) - 5 Laura Murgatroyd (2021 - present) - 1 Evelina Ametova (2018-2020) - 3, (2020-2021) - 6 +Margaret Duff (2023 - present) - 1 CIL Contributors: Srikanth Nagella (2017-2018) - 1 diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 0934e01b4b..168dd60d47 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -18,6 +18,7 @@ # CIL Developers, listed at: https://github.com/TomographicImaging/CIL/blob/master/NOTICE.txt # Claire Delplancke (University of Bath) + from cil.optimisation.functions import Function, IndicatorBox, MixedL21Norm, MixedL11Norm from cil.optimisation.operators import GradientOperator import numpy as np @@ -26,9 +27,8 @@ import logging - class TotalVariation(Function): - + r""" Total variation Function .. math:: \mathrm{TV}(u) := \|\nabla u\|_{2,1} = \sum \|\nabla u\|_{2},\, (\mbox{isotropic}) @@ -38,8 +38,8 @@ class TotalVariation(Function): Notes ----- - The :code:`TotalVariation` (TV) :code:`Function` acts as a compositite function, i.e., - the composition of the :class:`.MixedL21Norm` fuction and the :class:`.GradientOperator` operator, + The :code:`TotalVariation` (TV) :code:`Function` acts as a composite function, i.e., + the composition of the :class:`.MixedL21Norm` function and the :class:`.GradientOperator` operator, .. math:: f(u) = \|u\|_{2,1}, \Rightarrow (f\circ\nabla)(u) = f(\nabla x) = \mathrm{TV}(u) @@ -47,19 +47,25 @@ class TotalVariation(Function): algorithm to solve: .. math:: \mathrm{prox}_{\tau \mathrm{TV}}(b) := \underset{u}{\mathrm{argmin}} \frac{1}{2\tau}\|u - b\|^{2} + \mathrm{TV}(u) - + The algorithm used for the proximal operator of TV is the Fast Gradient Projection algorithm (or FISTA) applied to the _dual problem_ of the above problem, see :cite:`BeckTeboulle_b`, :cite:`BeckTeboulle_a`, :cite:`Zhu2010`. + See also "Multicontrast MRI Reconstruction with Structure-Guided Total Variation", Ehrhardt, Betcke, 2016. + Parameters ---------- - max_iteration : :obj:`int`, default = 100 - Maximum number of iterations for the FGP algorithm. + max_iteration : :obj:`int`, default = 5 + Maximum number of iterations for the FGP algorithm to solve to solve the dual problem + of the Total Variation Denoising problem (ROF). If warm_start=False, this should be around 100, + or larger, with a set tolerance. tolerance : :obj:`float`, default = None - Stopping criterion for the FGP algorithm. - + Stopping criterion for the FGP algorithm used to to solve the dual problem + of the Total Variation Denoising problem (ROF). If the difference between iterates in the FGP algorithm is less than the tolerance + the iterations end before the max_iteration number. + .. math:: \|x^{k+1} - x^{k}\|_{2} < \mathrm{tolerance} correlation : :obj:`str`, default = `Space` @@ -81,7 +87,8 @@ class TotalVariation(Function): Splits the Gradient into spatial gradient and spectral or temporal gradient for multichannel data. info : :obj:`boolean`, default = False - Information is printed for the stopping criterion of the FGP algorithm + Information is printed for the stopping criterion of the FGP algorithm used to solve the dual problem + of the Total Variation Denoising problem (ROF). strong_convexity_constant : :obj:`float`, default = 0 A strongly convex term weighted by the :code:`strong_convexity_constant` (:math:`\gamma`) parameter is added to the Total variation. @@ -91,6 +98,17 @@ class TotalVariation(Function): .. math:: \underset{u}{\mathrm{argmin}} \frac{1}{2\frac{\tau}{1+\gamma\tau}}\|u - \frac{b}{1+\gamma\tau}\|^{2} + \mathrm{TV}(u) + warm_start : :obj:`boolean`, default = True + If set to true, the FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) is initiated by the final value from the previous iteration and not at zero. + This allows the max_iteration value to be reduced to 5-10 iterations. + + + Note + ---- + + With warm_start set to the default, True, the TV function will keep in memory the range of the gradient of the image to be denoised, i.e. N times the dimensionality of the image. This increases the memory requirements. + However, during the evaluation of `proximal` the memory requirements will be unchanged as the same amount of memory will need to be allocated and deallocated. + Note ---- @@ -111,7 +129,7 @@ class TotalVariation(Function): >>> alpha = 2.0 >>> TV = TotalVariation() - >>> sol = TV.proxima(b, tau = alpha) + >>> sol = TV.proximal(b, tau = alpha) Examples -------- @@ -122,7 +140,7 @@ class TotalVariation(Function): >>> alpha = 2.0 >>> TV = TotalVariation(isotropic=False, lower=1.0, upper=2.0) - >>> sol = TV.proxima(b, tau = alpha) + >>> sol = TV.proximal(b, tau = alpha) Examples @@ -133,56 +151,52 @@ class TotalVariation(Function): >>> alpha = 2.0 >>> gamma = 1e-3 >>> TV = alpha * TotalVariation(isotropic=False, strong_convexity_constant=gamma) - >>> sol = TV.proxima(b, tau = 1.0) - - """ + >>> sol = TV.proximal(b, tau = 1.0) + """ def __init__(self, - max_iteration=100, - tolerance = None, - correlation = "Space", - backend = "c", - lower = None, - upper = None, - isotropic = True, - split = False, - info = False, - strong_convexity_constant = 0): - - - super(TotalVariation, self).__init__(L = None) + max_iteration=10, + tolerance=None, + correlation="Space", + backend="c", + lower=None, + upper=None, + isotropic=True, + split=False, + info=False, + strong_convexity_constant=0, + warm_start=True): + + super(TotalVariation, self).__init__(L=None) # Regularising parameter = alpha self.regularisation_parameter = 1. - - # Iterations for FGP_TV + self.iterations = max_iteration - - # Tolerance for FGP_TV + self.tolerance = tolerance - + # Total variation correlation (isotropic=Default) self.isotropic = isotropic - + # correlation space or spacechannels self.correlation = correlation - self.backend = backend - + self.backend = backend + # Define orthogonal projection onto the convex set C if lower is None: lower = -np.inf if upper is None: - upper = np.inf + upper = np.inf self.lower = lower self.upper = upper self.projection_C = IndicatorBox(lower, upper).proximal - - # Setup GradientOperator as None. This is to avoid domain argument in the __init__ + + # Setup GradientOperator as None. This is to avoid domain argument in the __init__ self._gradient = None self._domain = None - - # Print stopping information (iterations and tolerance error) of FGP_TV + self.info = info if self.info: warnings.warn(" `info` is deprecate. Please use logging instead.") @@ -190,6 +204,10 @@ def __init__(self, # splitting Gradient self.split = split + # For the warm_start functionality + self.warm_start = warm_start + self._p2 = None + # Strong convexity for TV self.strong_convexity_constant = strong_convexity_constant @@ -199,6 +217,15 @@ def __init__(self, else: self.func = MixedL11Norm() + def _get_p2(self): + r"""The initial value for the dual in the proximal calculation - allocated to zero in the case of warm_start=False + or initialised as the last iterate seen in the proximal calculation in the case warm_start=True .""" + + if self._p2 is None: + return self.gradient.range_geometry().allocate(0) + else: + return self._p2 + @property def regularisation_parameter(self): return self._regularisation_parameter @@ -206,11 +233,11 @@ def regularisation_parameter(self): @regularisation_parameter.setter def regularisation_parameter(self, value): if not isinstance(value, Number): - raise TypeError("regularisation_parameter: expected a number, got {}".format(type(value))) + raise TypeError( + "regularisation_parameter: expected a number, got {}".format(type(value))) self._regularisation_parameter = value def __call__(self, x): - r""" Returns the value of the TotalVariation function at :code:`x` .""" try: @@ -223,46 +250,42 @@ def __call__(self, x): if self._L is None: self.calculate_Lipschitz() - if self.strong_convexity_constant>0: - strongly_convex_term = (self.strong_convexity_constant/2)*x.squared_norm() + if self.strong_convexity_constant > 0: + strongly_convex_term = ( + self.strong_convexity_constant/2)*x.squared_norm() else: strongly_convex_term = 0 return self.regularisation_parameter * self.func(self.gradient.direct(x)) + strongly_convex_term - - def proximal(self, x, tau, out = None): - - r""" Returns the proximal operator of the TotalVariation function at :code:`x` .""" - + def proximal(self, x, tau, out=None): + r""" Returns the proximal operator of the TotalVariation function at :code:`x` .""" - if self.strong_convexity_constant>0: + if self.strong_convexity_constant > 0: strongly_convex_factor = (1 + tau * self.strong_convexity_constant) x /= strongly_convex_factor tau /= strongly_convex_factor - + if out is None: solution = self._fista_on_dual_rof(x, tau) else: - self._fista_on_dual_rof(x, tau, out = out) + self._fista_on_dual_rof(x, tau, out=out) - if self.strong_convexity_constant>0: + if self.strong_convexity_constant > 0: x *= strongly_convex_factor tau *= strongly_convex_factor if out is None: - return solution - + return solution - def _fista_on_dual_rof(self, x, tau, out = None): - - r""" Implements the Fast Gradient Projection algorithm on the dual problem + def _fista_on_dual_rof(self, x, tau, out=None): + r""" Runs the Fast Gradient Projection (FGP) algorithm to solve the dual problem of the Total Variation Denoising problem (ROF). .. math: \max_{\|y\|_{\infty}<=1.} \frac{1}{2}\|\nabla^{*} y + x \|^{2} - \frac{1}{2}\|x\|^{2} - """ + """ try: self._domain = x.geometry except: @@ -271,14 +294,15 @@ def _fista_on_dual_rof(self, x, tau, out = None): # Compute Lipschitz constant provided that domain is not None. # Lipschitz constant depends on the GradientOperator, which is configured only if domain is not None if self._L is None: - self.calculate_Lipschitz() - + self.calculate_Lipschitz() + # initialise - t = 1 + t = 1 - p1 = self.gradient.range_geometry().allocate(0) - p2 = self.gradient.range_geometry().allocate(0) - tmp_q = self.gradient.range_geometry().allocate(0) + # dual variable - its content is overwritten during iterations + p1 = self.gradient.range_geometry().allocate(None) + p2 = self._get_p2() + tmp_q = p2.copy() # multiply tau by -1 * regularisation_parameter here so it's not recomputed every iteration # when tau is an array this is done inplace so reverted at the end @@ -291,53 +315,48 @@ def _fista_on_dual_rof(self, x, tau, out = None): should_return = False if out is None: should_return = True - out = self.gradient.domain_geometry().allocate(0) + out = self.gradient.domain_geometry().allocate(0) - should_break = False for k in range(self.iterations): - + t0 = t - self.gradient.adjoint(tmp_q, out = out) + self.gradient.adjoint(tmp_q, out=out) out.sapyb(tau_reg_neg, x, 1.0, out=out) - self.projection_C(out, tau=None, out = out) - - if should_break: - break + self.projection_C(out, tau=None, out=out) self.gradient.direct(out, out=p1) multip = (-self.L)/tau_reg_neg - p1.multiply(multip,out=p1) - - tmp_q += p1 - if self.tolerance is not None and k%5==0: + tmp_q.sapyb(1., p1, multip, out=tmp_q) + + if self.tolerance is not None and k % 5 == 0: + p1 *= multip error = p1.norm() error /= tmp_q.norm() - if error <= self.tolerance: - should_break = True + if error < self.tolerance: + break - # Depending on the case, isotropic or anisotropic, the proximal conjugate of the MixedL21Norm (isotropic case), - # or the proximal conjugate of the MixedL11Norm (anisotropic case) is computed. self.func.proximal_conjugate(tmp_q, 1.0, out=p1) - + t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2 - p1.subtract(p2, out=tmp_q) tmp_q *= (t0-1)/t tmp_q += p1 - #switch p1 and p2 references + # switch p1 and p2 references tmp = p1 p1 = p2 p2 = tmp + if self.warm_start: + self._p2 = p2 - # Print stopping information (iterations and tolerance error) of FGP_TV if self.info: if self.tolerance is not None: - logging.info("Stop at {} iterations with tolerance {} .".format(k, error)) + logging.info( + "Stop at {} iterations with tolerance {} .".format(k, error)) else: - logging.info("Stop at {} iterations.".format(k)) + logging.info("Stop at {} iterations.".format(k)) # return tau to its original state if it was modified if id(tau_reg_neg) == id(tau): @@ -346,18 +365,17 @@ def _fista_on_dual_rof(self, x, tau, out = None): if should_return: return out + def convex_conjugate(self, x): + r""" Returns the value of convex conjugate of the TotalVariation function at :code:`x` .""" + return 0.0 - def convex_conjugate(self,x): - r""" Returns the value of convex conjugate of the TotalVariation function at :code:`x` .""" - return 0.0 - def calculate_Lipschitz(self): r""" Default value for the Lipschitz constant.""" - + # Compute the Lipschitz parameter from the operator if possible # Leave it initialised to None otherwise - self._L = (1./self.gradient.norm())**2 - + self._L = (1./self.gradient.norm())**2 + @property def gradient(self): r""" GradientOperator is created if it is not instantiated yet. The domain of the `_gradient`, @@ -365,14 +383,17 @@ def gradient(self): """ if self._domain is not None: - self._gradient = GradientOperator(self._domain, correlation = self.correlation, backend = self.backend) + self._gradient = GradientOperator( + self._domain, correlation=self.correlation, backend=self.backend) else: - raise ValueError(" The domain of the TotalVariation is {}. Please use the __call__ or proximal methods first before calling gradient.".format(self._domain)) + raise ValueError( + " The domain of the TotalVariation is {}. Please use the __call__ or proximal methods first before calling gradient.".format(self._domain)) return self._gradient def __rmul__(self, scalar): - if not isinstance (scalar, Number): - raise TypeError("scalar: Expected a number, got {}".format(type(scalar))) + if not isinstance(scalar, Number): + raise TypeError( + "scalar: Expected a number, got {}".format(type(scalar))) self.regularisation_parameter *= scalar return self diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 304729bef0..082779a9b8 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -869,8 +869,8 @@ def test_proximal_conjugate(self): (L1Norm(), ag), (L2NormSquared(), ag), (MixedL21Norm(), bg), - (TotalVariation(backend='c'), ig), - (TotalVariation(backend='numpy'), ig), + (TotalVariation(backend='c', warm_start=False, max_iteration=100), ig), + (TotalVariation(backend='numpy', warm_start=False, max_iteration=100), ig), ] for func, geom in func_geom_test_list: @@ -907,12 +907,48 @@ def setUp(self) -> None: self.grad = GradientOperator(self.ig_real) self.alpha_arr = self.ig_real.allocate(0.15) - def test_regularisation_parameter(self): - np.testing.assert_almost_equal(self.tv.regularisation_parameter, 1.) - - def test_regularisation_parameter2(self): + def test_configure_tv_defaults(self): + self.assertEquals(self.tv.warm_start, True) + self.assertEquals(self.tv.iterations, 10) + self.assertEquals(self.tv.correlation, "Space") + self.assertEquals(self.tv.backend, "c") + self.assertEquals(self.tv.lower, -np.inf) + self.assertEquals(self.tv.upper, np.inf) + self.assertEquals(self.tv.isotropic, True) + self.assertEquals(self.tv.split, False) + self.assertEquals(self.tv.info, False) + self.assertEquals(self.tv.strong_convexity_constant, 0) + self.assertEquals(self.tv.tolerance, None) + + def test_configure_tv_not_defaults(self): + tv=TotalVariation( max_iteration=100, + tolerance = 1e-5, + correlation = "SpaceChannels", + backend = "numpy", + lower = 0., + upper = 1., + isotropic = False, + split = True, + info = True, + strong_convexity_constant = 1., + warm_start=False) + self.assertEquals(tv.warm_start, False) + self.assertEquals(tv.iterations, 100) + self.assertEquals(tv.correlation, "SpaceChannels") + self.assertEquals(tv.backend, "numpy") + self.assertEquals(tv.lower, 0.) + self.assertEquals(tv.upper, 1.) + self.assertEquals(tv.isotropic, False) + self.assertEquals(tv.split, True) + self.assertEquals(tv.info, True) + self.assertEquals(tv.strong_convexity_constant, 1.) + self.assertEquals(tv.tolerance, 1e-5) + + + + def test_scaled_regularisation_parameter(self): np.testing.assert_almost_equal(self.tv_scaled.regularisation_parameter, - self.alpha) + self.alpha, err_msg='Multiplying the TV functional did not change the regularisation parameter') def test_rmul(self): assert isinstance(self.tv_scaled, TotalVariation) @@ -927,29 +963,29 @@ def test_rmul2(self): tv = alpha * TotalVariation() def test_rmul3(self): - self.assertEqual(self.alpha, self.tv_scaled.regularisation_parameter) + self.assertEqual(self.alpha, self.tv_scaled.regularisation_parameter, msg='Failed to scale the TV regularisation parameter') alpha = 2. tv2 = alpha * self.tv_scaled - self.assertEqual(self.alpha * alpha, tv2.regularisation_parameter) + self.assertEqual(self.alpha * alpha, tv2.regularisation_parameter, msg="Failed at scaling regularisation parameter of already scaled TotalVariation") def test_call_real_isotropic(self): x_real = self.ig_real.allocate('random', seed=4) res1 = self.tv_iso(x_real) res2 = self.grad.direct(x_real).pnorm(2).sum() - np.testing.assert_equal(res1, res2) + np.testing.assert_equal(res1, res2, err_msg="Error with isotropic TV calculation") def test_call_real_anisotropic(self): x_real = self.ig_real.allocate('random', seed=4) res1 = self.tv_aniso(x_real) res2 = self.grad.direct(x_real).pnorm(1).sum() - np.testing.assert_almost_equal(res1, res2, decimal=3) + np.testing.assert_almost_equal(res1, res2, decimal=3, err_msg="Error with anisotropic TV calculation") - def test_strongly_convex_TV(self): + def test_strongly_convex_CIL_TV(self): TV_no_strongly_convex = self.alpha * TotalVariation() - self.assertEqual(TV_no_strongly_convex.strong_convexity_constant, 0) + self.assertEqual(TV_no_strongly_convex.strong_convexity_constant, 0, msg="Error strong convexity constant not set to zero by default") # TV as strongly convex, with "small" strongly convex constant TV_strongly_convex = self.alpha * TotalVariation( @@ -961,7 +997,7 @@ def test_strongly_convex_TV(self): res2 = TV_no_strongly_convex( x_real) + (TV_strongly_convex.strong_convexity_constant / 2) * x_real.squared_norm() - np.testing.assert_allclose(res1, res2, atol=1e-3) + np.testing.assert_allclose(res1, res2, atol=1e-3, err_msg='TV calculation with and without strong convexity not equal') # check proximal x_real = self.ig_real.allocate('random', seed=4) @@ -970,17 +1006,17 @@ def test_strongly_convex_TV(self): tmp_x_real = x_real.copy() res2 = TV_strongly_convex.proximal(x_real, tau=1.0) # check input remain the same after proximal - np.testing.assert_array_equal(tmp_x_real.array, x_real.array) + np.testing.assert_array_equal(tmp_x_real.array, x_real.array, err_msg='Input not the same after calling proximal') - np.testing.assert_allclose(res1.array, res2.array, atol=1e-3) + np.testing.assert_allclose(res1.array, res2.array, atol=1e-3, err_msg="TV proximal calculation not the same with and without strong convexity") @unittest.skipUnless(has_ccpi_regularisation, "Regularisation Toolkit not present") - def test_strongly_convex_CIL_FGP_TV(self): + def test_strongly_convex_FGP_TV(self): FGP_TV_no_strongly_convex = self.alpha * FGP_TV() self.assertEqual(FGP_TV_no_strongly_convex.strong_convexity_constant, - 0) + 0, msg="Default strong-convexity constant not set to zero") # TV as strongly convex, with "small" strongly convex constant FGP_TV_strongly_convex = self.alpha * FGP_TV( @@ -993,7 +1029,7 @@ def test_strongly_convex_CIL_FGP_TV(self): res2 = FGP_TV_no_strongly_convex( x_real) + (FGP_TV_strongly_convex.strong_convexity_constant / 2) * x_real.squared_norm() - np.testing.assert_allclose(res1, res2, atol=1e-3) + np.testing.assert_allclose(res1, res2, atol=1e-3, err_msg='Failed at comparing FGP_TV call with and without strong convexity') # check proximal x_real = self.ig_real.allocate('random', seed=4) @@ -1002,14 +1038,14 @@ def test_strongly_convex_CIL_FGP_TV(self): tmp_x_real = x_real.copy() res2 = FGP_TV_strongly_convex.proximal(x_real, tau=1.0) # check input remain the same after proximal - np.testing.assert_array_equal(tmp_x_real.array, x_real.array) + np.testing.assert_array_equal(tmp_x_real.array, x_real.array, err_msg='Failed at checking input remains the same after calling proximal') - np.testing.assert_allclose(res1.array, res2.array, atol=1e-3) + np.testing.assert_allclose(res1.array, res2.array, atol=1e-3, err_msg="For FGP_TV comparing prox with and without strong convexity") @unittest.skipUnless(has_ccpi_regularisation, "Regularisation Toolkit not present") def test_compare_regularisation_toolkit(self): - data = dataexample.SHAPES.get(size=(64, 64)) + data = dataexample.SHAPES.get(size=(16, 16)) ig = data.geometry ag = ig @@ -1024,7 +1060,7 @@ def test_compare_regularisation_toolkit(self): # CIL_FGP_TV no tolerance g_CIL = alpha * TotalVariation( - iters, tolerance=None, lower=0, info=True) + iters, tolerance=None, lower=0, info=True, warm_start=False) t0 = timer() res1 = g_CIL.proximal(noisy_data, 1.) t1 = timer() @@ -1047,12 +1083,12 @@ def test_compare_regularisation_toolkit(self): np.testing.assert_array_almost_equal(res1.as_array(), res2.as_array(), - decimal=4) + decimal=4, err_msg='Comparing the CCPi proximal against the CIL TV proximal, no tolerance') # print("Compare CIL_FGP_TV vs CCPiReg_FGP_TV with iterations.") iters = 408 - # CIL_FGP_TV no tolerance - g_CIL = alpha * TotalVariation(iters, tolerance=1e-9, lower=0.) + # CIL_FGP_TV with tolerance + g_CIL = alpha * TotalVariation(iters, tolerance=1e-9, lower=0., warm_start=False) t0 = timer() res1 = g_CIL.proximal(noisy_data, 1.) t1 = timer() @@ -1075,14 +1111,23 @@ def test_compare_regularisation_toolkit(self): # print(t3-t2) np.testing.assert_array_almost_equal(res1.as_array(), res2.as_array(), - decimal=3) + decimal=3, err_msg='Comparing the CCPi proximal against the CIL TV proximal, with tolerance') + + # CIL_FGP_TV with warm_start + iters=10 + g_CIL = alpha * TotalVariation(iters, lower=0., warm_start=True) + for i in range(6): + res1 = g_CIL.proximal(noisy_data, 1.) + np.testing.assert_array_almost_equal(res1.as_array(), + res2.as_array(), + decimal=3, err_msg='Comparing the CCPi proximal against the CIL TV proximal, with warm_start') @unittest.skipUnless(has_tomophantom and has_ccpi_regularisation, "Missing Tomophantom or Regularisation-Toolkit") def test_compare_regularisation_toolkit_tomophantom(self): # print ("Building 3D phantom using TomoPhantom software") model = 13 # select a model number from the library - N_size = 64 # Define phantom dimensions using a scalar value (cubic phantom) + N_size = 16 # Define phantom dimensions using a scalar value (cubic phantom) #This will generate a N_size x N_size x N_size phantom (3D) ig = ImageGeometry(N_size, N_size, N_size) @@ -1095,7 +1140,7 @@ def test_compare_regularisation_toolkit_tomophantom(self): # print("Use tau as an array of ones") # CIL_TotalVariation no tolerance - g_CIL = alpha * TotalVariation(iters, tolerance=None, info=True) + g_CIL = alpha * TotalVariation(iters, tolerance=None, info=True, warm_start=False) # res1 = g_CIL.proximal(noisy_data, ig.allocate(1.)) t0 = timer() res1 = g_CIL.proximal(noisy_data, ig.allocate(1.)) @@ -1122,7 +1167,20 @@ def test_compare_regularisation_toolkit_tomophantom(self): np.testing.assert_allclose(res1.as_array(), res2.as_array(), - atol=7.5e-2) + atol=5e-2, err_msg='Comparing the CCPi proximal against the CIL TV proximal, without tolerance without warm_start') + + #with warm start + iters=10 + g_CIL = alpha * TotalVariation(iters, tolerance=None, info=True, warm_start=True) + # res1 = g_CIL.proximal(noisy_data, ig.allocate(1.)) + t0 = timer() + for i in range(4): + res1 = g_CIL.proximal(noisy_data, ig.allocate(1.)) + t1 = timer() + np.testing.assert_allclose(res1.as_array(), + res2.as_array(), + atol=4e-2, err_msg='Comparing the CCPi proximal against the CIL TV proximal, without tolerance with warm_start') + def test_non_scalar_tau_cil_tv(self): @@ -1134,7 +1192,37 @@ def test_non_scalar_tau_cil_tv(self): # use the alpha * TV res2 = self.tv_scaled.proximal(x_real, tau=1.0) - np.testing.assert_allclose(res1.array, res2.array, atol=1e-3) + np.testing.assert_allclose(res1.array, res2.array, atol=1e-3, err_msg="Testing non scalar tau in prox calculation") + + def test_get_p2_with_warm_start(self): + data = dataexample.SHAPES.get(size=(16, 16)) + tv=TotalVariation(warm_start=True, max_iteration=10) + self.assertEquals(tv._p2, None, msg="tv._p2 not initialised to None") + tv(data) + checkp2=tv.gradient.range_geometry().allocate(0) + for i, x in enumerate(tv._get_p2()): + np.testing.assert_allclose(x.as_array(), checkp2[i].as_array(), rtol=1e-8, atol=1e-8, err_msg="P2 not initially set to zero") + test=tv.proximal(data, 1.) + print(test) + a=np.sum(np.linalg.norm(test)) + print(np.linalg.norm(test)) + for i, x in enumerate(tv._get_p2()): + np.testing.assert_equal(np.any(np.not_equal(x.as_array(), checkp2[i].as_array())), True, err_msg="The stored value of p2 doesn't change after calling proximal") + np.testing.assert_almost_equal(np.sum(np.linalg.norm(test)),126.337265, err_msg="Incorrect value of the proximal") + + + def test_get_p2_without_warm_start(self): + data = dataexample.SHAPES.get(size=(16, 16)) + tv=TotalVariation(warm_start=False) + self.assertEquals(tv._p2, None, msg="tv._p2 not initialised to None") + tv(data) + checkp2=tv.gradient.range_geometry().allocate(0) + for i, x in enumerate(tv._get_p2()): + np.testing.assert_allclose(x.as_array(), checkp2[i].as_array(), rtol=1e-8, atol=1e-8, err_msg="P2 not initially set to zero") + tv.proximal(data, 1.) + for i, x in enumerate(tv._get_p2()): + np.testing.assert_allclose(x.as_array(), checkp2[i].as_array(), rtol=1e-8, atol=1e-8, err_msg="P2 not reset to zero after a call to proximal") + class TestLeastSquares(unittest.TestCase): From a6ce31c3e354ff236692871f4f1bf4a9ee36b0ad Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Fri, 22 Sep 2023 14:21:54 +0100 Subject: [PATCH 3/4] Partitioner - to fix error if partitions have size 1 (#1499) fix error if partitions have size 1 Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Co-authored-by: Edoardo Pasca --- CHANGELOG.md | 1 + Wrappers/Python/cil/framework/framework.py | 7 +-- .../Python/test/test_BlockDataContainer.py | 45 ++++++++++++++++--- 3 files changed, 43 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6331754000..0c6794ead4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - Add norm for CompositionOperator - Refactor SIRT algorithm to make it more computationally and memory efficient - Optimisation in L2NormSquared + - Added support for partitioner, when partitions have size 1 - Fix for show_geometry bug for 2D data - Added warmstart capability to proximal evaluation of the CIL TotalVariation function. - FBP split processing bug fix - now respects panel origin diff --git a/Wrappers/Python/cil/framework/framework.py b/Wrappers/Python/cil/framework/framework.py index bd4615340a..bf03d4b11f 100644 --- a/Wrappers/Python/cil/framework/framework.py +++ b/Wrappers/Python/cil/framework/framework.py @@ -126,7 +126,6 @@ def _construct_BlockGeometry_from_indices(self, indices): ag = self.geometry.copy() ag.config.angles.angle_data = numpy.take(self.geometry.angles, mask, axis=0) ags.append(ag) - return BlockGeometry(*ags) def partition(self, num_batches, mode, seed=None): @@ -200,8 +199,11 @@ def _partition_deterministic(self, num_batches, stagger=False, indices=None): for i in range(num_batches): out[i].fill( - numpy.take(self.array, partition_indices[i], axis=axis) + numpy.squeeze( + numpy.take(self.array, partition_indices[i], axis=axis) + ) ) + return out def _partition_random_permutation(self, num_batches, seed=None): @@ -2111,7 +2113,6 @@ def shape(self): AcquisitionGeometry.ANGLE: self.config.angles.num_positions, AcquisitionGeometry.VERTICAL: self.config.panel.num_pixels[1], AcquisitionGeometry.HORIZONTAL: self.config.panel.num_pixels[0]} - shape = [] for label in self.dimension_labels: shape.append(shape_dict[label]) diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index c608c7e5f1..11cab04682 100644 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -882,30 +882,61 @@ def test_partition(self): # split in num_batches num_batches = 4 + #Testing sequential partitioning data = self.data.partition(num_batches, 'sequential') idxs = self.data._partition_indices(num_batches, indices=self.data.geometry.num_projections, stagger=False) - - # ig = data.get_ImageGeometry() assert len(data.containers) == num_batches self.assertDataIsTheSame(data, idxs) + #Testing staggered partitioning data = self.data.partition(num_batches, Partitioner.STAGGERED) idxs = self.data._partition_indices(num_batches, indices=self.data.geometry.num_projections, stagger=True) - - # ig = data.get_ImageGeometry() assert len(data.containers) == num_batches self.assertDataIsTheSame(data, idxs) + def test_partition_diff_num_batches(self): + + #Check what happens when the number of batches is equal to the number of projection angles + num_batches=9 + data = self.data.partition(num_batches, 'sequential') + idxs = self.data._partition_indices(num_batches, indices=self.data.geometry.num_projections, stagger=False) + assert len(data.containers) == num_batches + self.assertDataIsTheSame(data, idxs, msg='Failed when num_batches=number of projections') + + #Check what happens when the number of batches is one, the whole set of projection angles + num_batches=1 + data = self.data.partition(num_batches, 'sequential') + idxs = self.data._partition_indices(num_batches, indices=self.data.geometry.num_projections, stagger=False) + assert len(data.containers) == num_batches + self.assertDataIsTheSame(data, idxs, msg="Failed when num_batches=1") + + #Check what happens when the number of batches is zero + num_batches=0 + with self.assertRaises(ZeroDivisionError): + data = self.data.partition(num_batches, 'sequential') + + #Check what happens when the number of batches is greater than the number of projection angles + num_batches=10 + with self.assertRaises(ValueError): + data = self.data.partition(num_batches, 'sequential') + + + + - def assertDataIsTheSame(self, data, idxs): + def assertDataIsTheSame(self, data, idxs, msg=None): # let's check that the data is the same k = 0 wrong = 0 for i, el in enumerate(data): - for j in range(el.shape[0]): + if len(el.shape)>1: + j_range=el.shape[0] + else: + j_range=1 + for j in range(j_range): idx = idxs[i][j] try: - np.testing.assert_array_equal(el.as_array()[j], self.data.as_array()[idx]) + np.testing.assert_array_equal(el.as_array()[j], self.data.as_array()[idx], err_msg=msg) except AssertionError: wrong += 1 k += 1 From f5a9f97855680402ce416a20c86f2f8d58f35fa6 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 27 Sep 2023 13:24:36 +0100 Subject: [PATCH 4/4] Updated documentation (#1506) * Updated documentation * Changes to the parameter list for sapyb * Spelling corrections in documentation --- CHANGELOG.md | 1 + .../cil/framework/BlockDataContainer.py | 40 +++++++++---------- Wrappers/Python/cil/framework/framework.py | 32 +++++++-------- 3 files changed, 35 insertions(+), 38 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c6794ead4..3b8ce3c690 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - Fix for show_geometry bug for 2D data - Added warmstart capability to proximal evaluation of the CIL TotalVariation function. - FBP split processing bug fix - now respects panel origin + - Tidied up documentation in the framework folder * 23.0.1 diff --git a/Wrappers/Python/cil/framework/BlockDataContainer.py b/Wrappers/Python/cil/framework/BlockDataContainer.py index 35b94f84d5..d624c201e2 100644 --- a/Wrappers/Python/cil/framework/BlockDataContainer.py +++ b/Wrappers/Python/cil/framework/BlockDataContainer.py @@ -33,7 +33,7 @@ class BlockDataContainer(object): will fail 2) algebra between `BlockDataContainer`s and `list` or `numpy array` will work as long as the number of `rows` and element of the arrays match, - indipendently on the fact that the `BlockDataContainer` could be nested + independently on the fact that the `BlockDataContainer` could be nested 3) algebra between `BlockDataContainer` and one `DataContainer` is possible. It will require all the `DataContainers` in the block to be compatible with the `DataContainer` we want to operate with. @@ -147,7 +147,7 @@ def subtract(self, other, *args, **kwargs): '''Algebra: subtract method of BlockDataContainer with number/DataContainer or BlockDataContainer :param: other (number, DataContainer or subclasses or BlockDataContainer - :param: out (optional): provides a placehold for the resul. + :param: out (optional): provides a placeholder for the result. ''' out = kwargs.get('out', None) if out is not None: @@ -157,8 +157,8 @@ def subtract(self, other, *args, **kwargs): def multiply(self, other, *args, **kwargs): '''Algebra: multiply method of BlockDataContainer with number/DataContainer or BlockDataContainer - :param: other (number, DataContainer or subclasses or BlockDataContainer - :param: out (optional): provides a placehold for the resul. + :param: other (number, DataContainer or subclasses or BlockDataContainer) + :param: out (optional): provides a placeholder for the result. ''' out = kwargs.get('out', None) if out is not None: @@ -168,8 +168,8 @@ def multiply(self, other, *args, **kwargs): def divide(self, other, *args, **kwargs): '''Algebra: divide method of BlockDataContainer with number/DataContainer or BlockDataContainer - :param: other (number, DataContainer or subclasses or BlockDataContainer - :param: out (optional): provides a placehold for the resul. + :param: other (number, DataContainer or subclasses or BlockDataContainer) + :param: out (optional): provides a placeholder for the result. ''' out = kwargs.get('out', None) if out is not None: @@ -180,7 +180,7 @@ def power(self, other, *args, **kwargs): '''Algebra: power method of BlockDataContainer with number/DataContainer or BlockDataContainer :param: other (number, DataContainer or subclasses or BlockDataContainer - :param: out (optional): provides a placehold for the resul. + :param: out (optional): provides a placeholder for the result. ''' out = kwargs.get('out', None) if out is not None: @@ -190,8 +190,8 @@ def power(self, other, *args, **kwargs): def maximum(self, other, *args, **kwargs): '''Algebra: power method of BlockDataContainer with number/DataContainer or BlockDataContainer - :param: other (number, DataContainer or subclasses or BlockDataContainer - :param: out (optional): provides a placehold for the resul. + :param: other (number, DataContainer or subclasses or BlockDataContainer) + :param: out (optional): provides a placeholder for the result. ''' out = kwargs.get('out', None) if out is not None: @@ -201,8 +201,8 @@ def maximum(self, other, *args, **kwargs): def minimum(self, other, *args, **kwargs): '''Algebra: power method of BlockDataContainer with number/DataContainer or BlockDataContainer - :param: other (number, DataContainer or subclasses or BlockDataContainer - :param: out (optional): provides a placehold for the resul. + :param: other (number, DataContainer or subclasses or BlockDataContainer) + :param: out (optional): provides a placeholder for the result. ''' out = kwargs.get('out', None) if out is not None: @@ -224,14 +224,14 @@ def sapyb(self, a, y, b, out, num_threads = NUM_THREADS): Example: -------- - a = 2 - b = 3 - ig = ImageGeometry(10,11) - x = ig.allocate(1) - y = ig.allocate(2) - bdc1 = BlockDataContainer(2*x, y) - bdc2 = BlockDataContainer(x, 2*y) - out = bdc1.sapyb(a,bdc2,b) + >>> a = 2 + >>> b = 3 + >>> ig = ImageGeometry(10,11) + >>> x = ig.allocate(1) + >>> y = ig.allocate(2) + >>> bdc1 = BlockDataContainer(2*x, y) + >>> bdc2 = BlockDataContainer(x, 2*y) + >>> out = bdc1.sapyb(a,bdc2,b) ''' if out is None: raise ValueError("out container cannot be None") @@ -249,7 +249,7 @@ def binary_operations(self, operation, other, *args, **kwargs): '''Algebra: generic method of algebric operation with BlockDataContainer with number/DataContainer or BlockDataContainer Provides commutativity with DataContainer and subclasses, i.e. this - class's reverse algebric methods take precedence w.r.t. direct algebric + class's reverse algebraic methods take precedence w.r.t. direct algebraic methods of DataContainer and subclasses. This method is not to be used directly diff --git a/Wrappers/Python/cil/framework/framework.py b/Wrappers/Python/cil/framework/framework.py index bf03d4b11f..02558e00bd 100644 --- a/Wrappers/Python/cil/framework/framework.py +++ b/Wrappers/Python/cil/framework/framework.py @@ -2951,12 +2951,12 @@ def __str__ (self, representation=False): def get_data_axes_order(self,new_order=None): '''returns the axes label of self as a list - if new_order is None returns the labels of the axes as a sorted-by-key list - if new_order is a list of length number_of_dimensions, returns a list + If new_order is None returns the labels of the axes as a sorted-by-key list. + If new_order is a list of length number_of_dimensions, returns a list with the indices of the axes in new_order with respect to those in self.dimension_labels: i.e. - self.dimension_labels = {0:'horizontal',1:'vertical'} - new_order = ['vertical','horizontal'] + >>> self.dimension_labels = {0:'horizontal',1:'vertical'} + >>> new_order = ['vertical','horizontal'] returns [1,0] ''' if new_order is None: @@ -3084,19 +3084,18 @@ def sapyb(self, a, y, b, out=None, num_threads=NUM_THREADS): out : return DataContainer, if None a new DataContainer is returned, default None. out can be self or y. num_threads : number of threads to use during the calculation, using the CIL C library + It will try to use the CIL C library and default to numpy operations, in case the C library does not handle the types. - It will try to use the CIL C library and default to numpy operations, in case the C library does - not handle the types. - Example: + Example ------- - a = 2 - b = 3 - ig = ImageGeometry(10,11) - x = ig.allocate(1) - y = ig.allocate(2) - out = x.sapyb(a,y,b) + >>> a = 2 + >>> b = 3 + >>> ig = ImageGeometry(10,11) + >>> x = ig.allocate(1) + >>> y = ig.allocate(2) + >>> out = x.sapyb(a,y,b) ''' ret_out = False @@ -3287,11 +3286,8 @@ def norm(self, **kwargs): return numpy.sqrt(self.squared_norm(**kwargs)) def dot(self, other, *args, **kwargs): - '''return the inner product of 2 DataContainers viewed as vectors - - applies to real and complex data. In such case the dot method returns - - a.dot(b.conjugate()) + '''returns the inner product of 2 DataContainers viewed as vectors. Suitable for real and complex data. + For complex data, the dot method returns a.dot(b.conjugate()) ''' method = kwargs.get('method', 'numpy') if method not in ['numpy','reduce']: