From 8fa44c389882d89ec55c623fb71f390cfdba4656 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Thu, 3 Aug 2023 13:26:24 +0000 Subject: [PATCH 01/48] TV warm_start file --- .../optimisation/functions/TotalVariation.py | 75 ++++++++++++------- 1 file changed, 47 insertions(+), 28 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 0934e01b4b..33c5d32459 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -16,7 +16,7 @@ # # Authors: # CIL Developers, listed at: https://github.com/TomographicImaging/CIL/blob/master/NOTICE.txt -# Claire Delplancke (University of Bath) +# Margaret Duff, Claire Delplancke (University of Bath) from cil.optimisation.functions import Function, IndicatorBox, MixedL21Norm, MixedL11Norm from cil.optimisation.operators import GradientOperator @@ -91,6 +91,11 @@ class TotalVariation(Function): .. math:: \underset{u}{\mathrm{argmin}} \frac{1}{2\frac{\tau}{1+\gamma\tau}}\|u - \frac{b}{1+\gamma\tau}\|^{2} + \mathrm{TV}(u) + warmstart : :obj`boolean`, default = False + If set to true, the FGP aglorithm to calculate the TV proximal is initiated by the final value from the previous iteration and not at zero. + This allows the max_iteration value to be reduced to 5-10 iterations. + + Note ---- @@ -111,7 +116,7 @@ class TotalVariation(Function): >>> alpha = 2.0 >>> TV = TotalVariation() - >>> sol = TV.proxima(b, tau = alpha) + >>> sol = TV.proximal(b, tau = alpha) Examples -------- @@ -122,7 +127,7 @@ class TotalVariation(Function): >>> alpha = 2.0 >>> TV = TotalVariation(isotropic=False, lower=1.0, upper=2.0) - >>> sol = TV.proxima(b, tau = alpha) + >>> sol = TV.proximal(b, tau = alpha) Examples @@ -133,7 +138,7 @@ class TotalVariation(Function): >>> alpha = 2.0 >>> gamma = 1e-3 >>> TV = alpha * TotalVariation(isotropic=False, strong_convexity_constant=gamma) - >>> sol = TV.proxima(b, tau = 1.0) + >>> sol = TV.proximal(b, tau = 1.0) """ @@ -148,7 +153,8 @@ def __init__(self, isotropic = True, split = False, info = False, - strong_convexity_constant = 0): + strong_convexity_constant = 0, + warmstart=False): super(TotalVariation, self).__init__(L = None) @@ -190,6 +196,11 @@ def __init__(self, # splitting Gradient self.split = split + # warm-start + self.warmstart = warmstart + if self.warmstart: + self.hasstarted = False + # Strong convexity for TV self.strong_convexity_constant = strong_convexity_constant @@ -234,7 +245,7 @@ def __call__(self, x): def proximal(self, x, tau, out = None): r""" Returns the proximal operator of the TotalVariation function at :code:`x` .""" - + self.tau=tau #Introduced for testing if self.strong_convexity_constant>0: @@ -274,11 +285,19 @@ def _fista_on_dual_rof(self, x, tau, out = None): self.calculate_Lipschitz() # initialise - t = 1 + t = 1 # line 2 + + p1 = self.gradient.range_geometry().allocate(0) # dua variable - value alloacated here is not used - is overwritten during iterations + if not self.warmstart: + self.p2 = self.gradient.range_geometry().allocate(0) # previous dual variable - needs loading for warm start! + tmp_q = self.gradient.range_geometry().allocate(0) # should be equal to self.p2 - i.e. needs loading for warm start + else: + if not self.hasstarted: + self.p2 = self.gradient.range_geometry().allocate(0) # previous dual variable - needs loading for warm start! + self.hasstarted = True + tmp_q = self.p2.copy() # should be equal to self.p2 - i.e. needs loading for warm start + - p1 = self.gradient.range_geometry().allocate(0) - p2 = self.gradient.range_geometry().allocate(0) - tmp_q = self.gradient.range_geometry().allocate(0) # multiply tau by -1 * regularisation_parameter here so it's not recomputed every iteration # when tau is an array this is done inplace so reverted at the end @@ -294,24 +313,24 @@ def _fista_on_dual_rof(self, x, tau, out = None): out = self.gradient.domain_geometry().allocate(0) should_break = False - for k in range(self.iterations): + for k in range(self.iterations): # line 3 in alogirhtm one of "Multicontrast MRI Reconstruction with Structure-Guided Total Variation", Ehrhardt, Betcke, 2016. t0 = t - self.gradient.adjoint(tmp_q, out = out) - out.sapyb(tau_reg_neg, x, 1.0, out=out) - self.projection_C(out, tau=None, out = out) + self.gradient.adjoint(tmp_q, out = out) # line 4 + out.sapyb(tau_reg_neg, x, 1.0, out=out)# line 4 + self.projection_C(out, tau=None, out = out)# line 4 if should_break: break - self.gradient.direct(out, out=p1) + self.gradient.direct(out, out=p1) # line 4 - multip = (-self.L)/tau_reg_neg - p1.multiply(multip,out=p1) + multip = (-self.L)/tau_reg_neg# line 4 + p1.multiply(multip,out=p1) #line 4/5 - tmp_q += p1 + tmp_q += p1 # line 5 - if self.tolerance is not None and k%5==0: + if self.tolerance is not None and k%5==0: # testing convergence criterion error = p1.norm() error /= tmp_q.norm() if error <= self.tolerance: @@ -319,18 +338,18 @@ def _fista_on_dual_rof(self, x, tau, out = None): # Depending on the case, isotropic or anisotropic, the proximal conjugate of the MixedL21Norm (isotropic case), # or the proximal conjugate of the MixedL11Norm (anisotropic case) is computed. - self.func.proximal_conjugate(tmp_q, 1.0, out=p1) + self.func.proximal_conjugate(tmp_q, 1.0, out=p1) # line 5 - t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2 + t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2 # line 6 - p1.subtract(p2, out=tmp_q) - tmp_q *= (t0-1)/t - tmp_q += p1 + p1.subtract(self.p2, out=tmp_q) # line 7 + tmp_q *= (t0-1)/t # line 7 + tmp_q += p1 # line 7 - #switch p1 and p2 references + #switch p1 and self.p2 references tmp = p1 - p1 = p2 - p2 = tmp + p1 = self.p2 + self.p2 = tmp # Print stopping information (iterations and tolerance error) of FGP_TV if self.info: @@ -375,4 +394,4 @@ def __rmul__(self, scalar): if not isinstance (scalar, Number): raise TypeError("scalar: Expected a number, got {}".format(type(scalar))) self.regularisation_parameter *= scalar - return self + return self \ No newline at end of file From 7b735668ee78a503d41bf771f1076d42e5a07539 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Thu, 3 Aug 2023 13:27:36 +0000 Subject: [PATCH 02/48] Naming --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 33c5d32459..29225b4148 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -16,7 +16,8 @@ # # Authors: # CIL Developers, listed at: https://github.com/TomographicImaging/CIL/blob/master/NOTICE.txt -# Margaret Duff, Claire Delplancke (University of Bath) +# Claire Delplancke (University of Bath) + from cil.optimisation.functions import Function, IndicatorBox, MixedL21Norm, MixedL11Norm from cil.optimisation.operators import GradientOperator From d39387b3f6b5bf04cb5596f0f42f9c771d3f16d7 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Mon, 7 Aug 2023 14:18:15 +0000 Subject: [PATCH 03/48] Removed hasstarted and replaced with a property and setter for p2 --- .../optimisation/functions/TotalVariation.py | 52 ++++++++++++------- 1 file changed, 34 insertions(+), 18 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 29225b4148..cea7c30b43 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -95,6 +95,8 @@ class TotalVariation(Function): warmstart : :obj`boolean`, default = False If set to true, the FGP aglorithm to calculate the TV proximal is initiated by the final value from the previous iteration and not at zero. This allows the max_iteration value to be reduced to 5-10 iterations. + With warmstart this function will keep in memory the range of the gradient of the image to be denoised, i.e. N times the dimensionality of the image. + However, during the evaluation of `proximal` the memory requirements will be unchanged as the same amount of memory will need to be allocated and deallocated. Note @@ -197,10 +199,9 @@ def __init__(self, # splitting Gradient self.split = split - # warm-start - self.warmstart = warmstart - if self.warmstart: - self.hasstarted = False + #For the warmstart functionality + self.warmstart=warmstart + self._p2=None # Strong convexity for TV self.strong_convexity_constant = strong_convexity_constant @@ -211,6 +212,24 @@ def __init__(self, else: self.func = MixedL11Norm() + # setting the dual value for the proximal calculation - important for warmstart + @property + def p2(self): + if self._p2 is not None: + return self._p2 + else: + p2 = self.gradient.range_geometry().allocate(0) + if self.warmstart: + self.p2 = p2 + return p2 + + @p2.setter + def p2(self, value): + if isinstance(value, type(self.gradient.range_geometry().allocate(0))): + self._p2 = value + else: + raise TypeError('p2 should be in the range of the gradient of the image') + @property def regularisation_parameter(self): return self._regularisation_parameter @@ -288,16 +307,11 @@ def _fista_on_dual_rof(self, x, tau, out = None): # initialise t = 1 # line 2 - p1 = self.gradient.range_geometry().allocate(0) # dua variable - value alloacated here is not used - is overwritten during iterations - if not self.warmstart: - self.p2 = self.gradient.range_geometry().allocate(0) # previous dual variable - needs loading for warm start! - tmp_q = self.gradient.range_geometry().allocate(0) # should be equal to self.p2 - i.e. needs loading for warm start - else: - if not self.hasstarted: - self.p2 = self.gradient.range_geometry().allocate(0) # previous dual variable - needs loading for warm start! - self.hasstarted = True - tmp_q = self.p2.copy() # should be equal to self.p2 - i.e. needs loading for warm start - + # dual variable - its content is overwritten during iterations + p1 = self.gradient.range_geometry().allocate(None) + p2 = self.p2 # sets p2 to be of size self.gradient.range_geometry() initialised at zero unless warmstart=True + # temp_q should be equal to p2 + tmp_q = p2.copy() # multiply tau by -1 * regularisation_parameter here so it's not recomputed every iteration @@ -343,14 +357,16 @@ def _fista_on_dual_rof(self, x, tau, out = None): t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2 # line 6 - p1.subtract(self.p2, out=tmp_q) # line 7 + p1.subtract(p2, out=tmp_q) # line 7 tmp_q *= (t0-1)/t # line 7 tmp_q += p1 # line 7 - #switch p1 and self.p2 references + #switch p1 and p2 references tmp = p1 - p1 = self.p2 - self.p2 = tmp + p1 = p2 + p2 = tmp + if self.warmstart: + self.p2=p2 # Print stopping information (iterations and tolerance error) of FGP_TV if self.info: From 647dd41c57196bb75bc69d6cea786bac9f03fdcb Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Tue, 8 Aug 2023 09:12:53 +0100 Subject: [PATCH 04/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Edoardo Pasca Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index cea7c30b43..57dfa1323d 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -225,7 +225,7 @@ def p2(self): @p2.setter def p2(self, value): - if isinstance(value, type(self.gradient.range_geometry().allocate(0))): + if len(value.geometry.geometries) != len(self.gradient.range_geometry().geometries): self._p2 = value else: raise TypeError('p2 should be in the range of the gradient of the image') From 76c9d0fd443cf24ed9c9e187ae79057c80c1d395 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Tue, 8 Aug 2023 09:13:17 +0100 Subject: [PATCH 05/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Edoardo Pasca Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 57dfa1323d..51ed089114 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -200,8 +200,8 @@ def __init__(self, self.split = split #For the warmstart functionality - self.warmstart=warmstart - self._p2=None + self.warmstart = warmstart + self._p2 = None # Strong convexity for TV self.strong_convexity_constant = strong_convexity_constant From 7d539fe63e6ed9031b05780b3dac7053dc96fc95 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Tue, 8 Aug 2023 10:40:44 +0000 Subject: [PATCH 06/48] Updated docstrings --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 51ed089114..2b12046d1d 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -212,9 +212,11 @@ def __init__(self, else: self.func = MixedL11Norm() - # setting the dual value for the proximal calculation - important for warmstart + @property def p2(self): + r"""The initial value for the dual in the proximal calculation - allocated to zero in the case of warmstart=False + or initialised as the last iterate seen in the proximal calculation in the case warmstart=True .""" if self._p2 is not None: return self._p2 else: @@ -225,6 +227,7 @@ def p2(self): @p2.setter def p2(self, value): + r""" Setter for the dual value in the proximal calculation.""" if len(value.geometry.geometries) != len(self.gradient.range_geometry().geometries): self._p2 = value else: From bc7ae27a29693d12db9107c396354273ba8466a0 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Tue, 8 Aug 2023 11:32:52 +0000 Subject: [PATCH 07/48] Testing in the p2 setter --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 2b12046d1d..8ef2b466ff 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -228,7 +228,7 @@ def p2(self): @p2.setter def p2(self, value): r""" Setter for the dual value in the proximal calculation.""" - if len(value.geometry.geometries) != len(self.gradient.range_geometry().geometries): + if len(value) != len(self.gradient.range_geometry().geometries): self._p2 = value else: raise TypeError('p2 should be in the range of the gradient of the image') From 2d6a48d78248a26be4d3d4d9ce28f284cc8bc2d3 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Tue, 8 Aug 2023 11:38:07 +0000 Subject: [PATCH 08/48] Try again with the test on the setter --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 8ef2b466ff..11df5e3a49 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -228,7 +228,7 @@ def p2(self): @p2.setter def p2(self, value): r""" Setter for the dual value in the proximal calculation.""" - if len(value) != len(self.gradient.range_geometry().geometries): + if len(value) == len(self.gradient.range_geometry().geometries): self._p2 = value else: raise TypeError('p2 should be in the range of the gradient of the image') From 8698a5a7bb339149eea305e47ac691cac18e0513 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 9 Aug 2023 16:34:50 +0100 Subject: [PATCH 09/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 11df5e3a49..2059856a6b 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -368,8 +368,8 @@ def _fista_on_dual_rof(self, x, tau, out = None): tmp = p1 p1 = p2 p2 = tmp - if self.warmstart: - self.p2=p2 + if self.warmstart: + self._p2 = p2 # Print stopping information (iterations and tolerance error) of FGP_TV if self.info: From 85f9a54bb3f577c219a8ba6f644c854e4fd77489 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 9 Aug 2023 16:35:13 +0100 Subject: [PATCH 10/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- .../cil/optimisation/functions/TotalVariation.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 2059856a6b..ae681806c2 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -213,17 +213,14 @@ def __init__(self, self.func = MixedL11Norm() - @property - def p2(self): + def _get_p2(self): r"""The initial value for the dual in the proximal calculation - allocated to zero in the case of warmstart=False or initialised as the last iterate seen in the proximal calculation in the case warmstart=True .""" - if self._p2 is not None: - return self._p2 + + if self._p2 is None: + return self.gradient.range_geometry().allocate(0) else: - p2 = self.gradient.range_geometry().allocate(0) - if self.warmstart: - self.p2 = p2 - return p2 + return self._p2 @p2.setter def p2(self, value): From c63355bc1ebda211d0148d8928623562893dee54 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 9 Aug 2023 16:35:19 +0100 Subject: [PATCH 11/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- .../Python/cil/optimisation/functions/TotalVariation.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index ae681806c2..8922c18af6 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -222,14 +222,6 @@ def _get_p2(self): else: return self._p2 - @p2.setter - def p2(self, value): - r""" Setter for the dual value in the proximal calculation.""" - if len(value) == len(self.gradient.range_geometry().geometries): - self._p2 = value - else: - raise TypeError('p2 should be in the range of the gradient of the image') - @property def regularisation_parameter(self): return self._regularisation_parameter From 0a46d9aadbb4a0203cf25bd0300cafb3adcefcd1 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 9 Aug 2023 16:35:35 +0100 Subject: [PATCH 12/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 8922c18af6..3610989d0c 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -301,7 +301,7 @@ def _fista_on_dual_rof(self, x, tau, out = None): # dual variable - its content is overwritten during iterations p1 = self.gradient.range_geometry().allocate(None) - p2 = self.p2 # sets p2 to be of size self.gradient.range_geometry() initialised at zero unless warmstart=True + p2 = self._get_p2() # temp_q should be equal to p2 tmp_q = p2.copy() From ee6895c625ec7f227f394a5e125f1e56b536f136 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 9 Aug 2023 16:36:29 +0100 Subject: [PATCH 13/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 3610989d0c..a58488e023 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -327,8 +327,6 @@ def _fista_on_dual_rof(self, x, tau, out = None): out.sapyb(tau_reg_neg, x, 1.0, out=out)# line 4 self.projection_C(out, tau=None, out = out)# line 4 - if should_break: - break self.gradient.direct(out, out=p1) # line 4 From af5f9f8bbd83d53094af6d20eaae66c0399d7cb9 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 9 Aug 2023 16:37:03 +0100 Subject: [PATCH 14/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index a58488e023..8ea92c13e6 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -338,8 +338,8 @@ def _fista_on_dual_rof(self, x, tau, out = None): if self.tolerance is not None and k%5==0: # testing convergence criterion error = p1.norm() error /= tmp_q.norm() - if error <= self.tolerance: - should_break = True + if error < self.tolerance: + break # Depending on the case, isotropic or anisotropic, the proximal conjugate of the MixedL21Norm (isotropic case), # or the proximal conjugate of the MixedL11Norm (anisotropic case) is computed. From b494a9859efc232ea443c5cd7624236ab02029f3 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Wed, 9 Aug 2023 15:40:10 +0000 Subject: [PATCH 15/48] Removing comments referencing lines in the algorithm --- .../optimisation/functions/TotalVariation.py | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 8ea92c13e6..a4598aa932 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -52,6 +52,8 @@ class TotalVariation(Function): The algorithm used for the proximal operator of TV is the Fast Gradient Projection algorithm (or FISTA) applied to the _dual problem_ of the above problem, see :cite:`BeckTeboulle_b`, :cite:`BeckTeboulle_a`, :cite:`Zhu2010`. + See also "Multicontrast MRI Reconstruction with Structure-Guided Total Variation", Ehrhardt, Betcke, 2016. + Parameters ---------- @@ -320,20 +322,20 @@ def _fista_on_dual_rof(self, x, tau, out = None): out = self.gradient.domain_geometry().allocate(0) should_break = False - for k in range(self.iterations): # line 3 in alogirhtm one of "Multicontrast MRI Reconstruction with Structure-Guided Total Variation", Ehrhardt, Betcke, 2016. + for k in range(self.iterations): t0 = t - self.gradient.adjoint(tmp_q, out = out) # line 4 - out.sapyb(tau_reg_neg, x, 1.0, out=out)# line 4 - self.projection_C(out, tau=None, out = out)# line 4 + self.gradient.adjoint(tmp_q, out = out) + out.sapyb(tau_reg_neg, x, 1.0, out=out) + self.projection_C(out, tau=None, out = out) - self.gradient.direct(out, out=p1) # line 4 + self.gradient.direct(out, out=p1) - multip = (-self.L)/tau_reg_neg# line 4 - p1.multiply(multip,out=p1) #line 4/5 + multip = (-self.L)/tau_reg_neg + p1.multiply(multip,out=p1) - tmp_q += p1 # line 5 + tmp_q += p1 if self.tolerance is not None and k%5==0: # testing convergence criterion error = p1.norm() @@ -343,13 +345,12 @@ def _fista_on_dual_rof(self, x, tau, out = None): # Depending on the case, isotropic or anisotropic, the proximal conjugate of the MixedL21Norm (isotropic case), # or the proximal conjugate of the MixedL11Norm (anisotropic case) is computed. - self.func.proximal_conjugate(tmp_q, 1.0, out=p1) # line 5 + self.func.proximal_conjugate(tmp_q, 1.0, out=p1) - t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2 # line 6 - - p1.subtract(p2, out=tmp_q) # line 7 - tmp_q *= (t0-1)/t # line 7 - tmp_q += p1 # line 7 + t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2 + p1.subtract(p2, out=tmp_q) + tmp_q *= (t0-1)/t + tmp_q += p1 #switch p1 and p2 references tmp = p1 From 8643abde72aaf8dcbab35ace3852f4d383309e06 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Wed, 9 Aug 2023 15:47:40 +0000 Subject: [PATCH 16/48] Changelog and developer list --- CHANGELOG.md | 1 + NOTICE.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c0cc69188f..76d46acfbf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - Refactor SIRT algorithm to make it more computationally and memory efficient - Optimisation in L2NormSquared - Fix for show_geometry bug for 2D data + - Added warmstart capability to proximal evaluation of the CIL TotalVariation function. * 23.0.1 - Fix bug with NikonReader requiring ROI to be set in constructor. diff --git a/NOTICE.txt b/NOTICE.txt index a1d08b0d52..6d8bb29c62 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -40,6 +40,7 @@ Gemma Fardell (2019 - present) - 1 Kris Thielemans (2020 - present) - 5 Laura Murgatroyd (2021 - present) - 1 Evelina Ametova (2018-2020) - 3, (2020-2021) - 6 +Margaret Duff (2023 - present) - 1 CIL Contributors: Srikanth Nagella (2017-2018) - 1 From f422900da3ef0c126ed93cc2f8067f071cf96987 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Wed, 9 Aug 2023 15:59:26 +0000 Subject: [PATCH 17/48] Changed commenting to clear up ROF/FGP confusion --- .../optimisation/functions/TotalVariation.py | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index a4598aa932..e97e7146e1 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -59,9 +59,11 @@ class TotalVariation(Function): ---------- max_iteration : :obj:`int`, default = 100 - Maximum number of iterations for the FGP algorithm. + Maximum number of iterations for the FGP algorithm to solve to solve the dual problem + of the Total Variation Denoising problem (ROF). tolerance : :obj:`float`, default = None - Stopping criterion for the FGP algorithm. + Stopping criterion for the FGP algorithm used to to solve the dual problem + of the Total Variation Denoising problem (ROF) .. math:: \|x^{k+1} - x^{k}\|_{2} < \mathrm{tolerance} @@ -84,7 +86,8 @@ class TotalVariation(Function): Splits the Gradient into spatial gradient and spectral or temporal gradient for multichannel data. info : :obj:`boolean`, default = False - Information is printed for the stopping criterion of the FGP algorithm + Information is printed for the stopping criterion of the FGP algorithm used to solve the dual problem + of the Total Variation Denoising problem (ROF). strong_convexity_constant : :obj:`float`, default = 0 A strongly convex term weighted by the :code:`strong_convexity_constant` (:math:`\gamma`) parameter is added to the Total variation. @@ -95,7 +98,7 @@ class TotalVariation(Function): .. math:: \underset{u}{\mathrm{argmin}} \frac{1}{2\frac{\tau}{1+\gamma\tau}}\|u - \frac{b}{1+\gamma\tau}\|^{2} + \mathrm{TV}(u) warmstart : :obj`boolean`, default = False - If set to true, the FGP aglorithm to calculate the TV proximal is initiated by the final value from the previous iteration and not at zero. + If set to true, the FGP aglorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) is initiated by the final value from the previous iteration and not at zero. This allows the max_iteration value to be reduced to 5-10 iterations. With warmstart this function will keep in memory the range of the gradient of the image to be denoised, i.e. N times the dimensionality of the image. However, during the evaluation of `proximal` the memory requirements will be unchanged as the same amount of memory will need to be allocated and deallocated. @@ -167,10 +170,10 @@ def __init__(self, # Regularising parameter = alpha self.regularisation_parameter = 1. - # Iterations for FGP_TV + # Iterations for FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) self.iterations = max_iteration - # Tolerance for FGP_TV + # Tolerance for FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) self.tolerance = tolerance # Total variation correlation (isotropic=Default) @@ -193,7 +196,7 @@ def __init__(self, self._gradient = None self._domain = None - # Print stopping information (iterations and tolerance error) of FGP_TV + # Print stopping information (iterations and tolerance error) of FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) self.info = info if self.info: warnings.warn(" `info` is deprecate. Please use logging instead.") @@ -282,7 +285,7 @@ def proximal(self, x, tau, out = None): def _fista_on_dual_rof(self, x, tau, out = None): - r""" Implements the Fast Gradient Projection algorithm on the dual problem + r""" Implements the Fast Gradient Projection (FGP) algorithm to solve the dual problem of the Total Variation Denoising problem (ROF). .. math: \max_{\|y\|_{\infty}<=1.} \frac{1}{2}\|\nabla^{*} y + x \|^{2} - \frac{1}{2}\|x\|^{2} @@ -359,7 +362,7 @@ def _fista_on_dual_rof(self, x, tau, out = None): if self.warmstart: self._p2 = p2 - # Print stopping information (iterations and tolerance error) of FGP_TV + # Print stopping information (iterations and tolerance error) of FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) if self.info: if self.tolerance is not None: logging.info("Stop at {} iterations with tolerance {} .".format(k, error)) From 579643dcf76c44f73248f44638c0a88ecad8c384 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Wed, 9 Aug 2023 16:05:28 +0000 Subject: [PATCH 18/48] Changes to ohow p1 is multiplied by multip - Edo's comments --- .../Python/cil/optimisation/functions/TotalVariation.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index e97e7146e1..d77c6f5dd0 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -336,12 +336,11 @@ def _fista_on_dual_rof(self, x, tau, out = None): self.gradient.direct(out, out=p1) multip = (-self.L)/tau_reg_neg - p1.multiply(multip,out=p1) - - tmp_q += p1 + tmp_q.sapyb(1., p1, multip, out=tmp_q) + if self.tolerance is not None and k%5==0: # testing convergence criterion - error = p1.norm() + error = p1.norm()*multip error /= tmp_q.norm() if error < self.tolerance: break From 6ac35bc4a91b341df584e5f0b3f4925398455a5a Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 10 Aug 2023 22:30:44 +0100 Subject: [PATCH 19/48] fix for tau as image Signed-off-by: Edoardo Pasca --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index d77c6f5dd0..8311b99ce4 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -340,7 +340,8 @@ def _fista_on_dual_rof(self, x, tau, out = None): tmp_q.sapyb(1., p1, multip, out=tmp_q) if self.tolerance is not None and k%5==0: # testing convergence criterion - error = p1.norm()*multip + p1 *= multip + error = p1.norm() error /= tmp_q.norm() if error < self.tolerance: break From 9ef73b6aeafc355bcda29383cf3acc2740a62242 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Fri, 11 Aug 2023 14:23:02 +0100 Subject: [PATCH 20/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Edoardo Pasca Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 8311b99ce4..1adb25b47a 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -98,7 +98,7 @@ class TotalVariation(Function): .. math:: \underset{u}{\mathrm{argmin}} \frac{1}{2\frac{\tau}{1+\gamma\tau}}\|u - \frac{b}{1+\gamma\tau}\|^{2} + \mathrm{TV}(u) warmstart : :obj`boolean`, default = False - If set to true, the FGP aglorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) is initiated by the final value from the previous iteration and not at zero. + If set to true, the FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) is initiated by the final value from the previous iteration and not at zero. This allows the max_iteration value to be reduced to 5-10 iterations. With warmstart this function will keep in memory the range of the gradient of the image to be denoised, i.e. N times the dimensionality of the image. However, during the evaluation of `proximal` the memory requirements will be unchanged as the same amount of memory will need to be allocated and deallocated. From ee6ffd9b91ccf7f4029fc659a1992806ed5cba98 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Fri, 11 Aug 2023 13:36:57 +0000 Subject: [PATCH 21/48] Changed defaults to True --- .../cil/optimisation/functions/TotalVariation.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 8311b99ce4..7c06e29230 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -58,9 +58,10 @@ class TotalVariation(Function): Parameters ---------- - max_iteration : :obj:`int`, default = 100 + max_iteration : :obj:`int`, default = 5 Maximum number of iterations for the FGP algorithm to solve to solve the dual problem - of the Total Variation Denoising problem (ROF). + of the Total Variation Denoising problem (ROF). If warmstart=False, this should be around 100, + or larger with a set tolerance. tolerance : :obj:`float`, default = None Stopping criterion for the FGP algorithm used to to solve the dual problem of the Total Variation Denoising problem (ROF) @@ -97,7 +98,7 @@ class TotalVariation(Function): .. math:: \underset{u}{\mathrm{argmin}} \frac{1}{2\frac{\tau}{1+\gamma\tau}}\|u - \frac{b}{1+\gamma\tau}\|^{2} + \mathrm{TV}(u) - warmstart : :obj`boolean`, default = False + warmstart : :obj`boolean`, default = True If set to true, the FGP aglorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) is initiated by the final value from the previous iteration and not at zero. This allows the max_iteration value to be reduced to 5-10 iterations. With warmstart this function will keep in memory the range of the gradient of the image to be denoised, i.e. N times the dimensionality of the image. @@ -152,7 +153,7 @@ class TotalVariation(Function): def __init__(self, - max_iteration=100, + max_iteration=5, tolerance = None, correlation = "Space", backend = "c", @@ -162,7 +163,7 @@ def __init__(self, split = False, info = False, strong_convexity_constant = 0, - warmstart=False): + warmstart=True): super(TotalVariation, self).__init__(L = None) @@ -262,7 +263,6 @@ def __call__(self, x): def proximal(self, x, tau, out = None): r""" Returns the proximal operator of the TotalVariation function at :code:`x` .""" - self.tau=tau #Introduced for testing if self.strong_convexity_constant>0: From 624aa433a088cb75541ae771fc597574f26821cc Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Mon, 14 Aug 2023 10:06:42 +0000 Subject: [PATCH 22/48] Default max iterations =10 and started unit tests --- .../optimisation/functions/TotalVariation.py | 2 +- Wrappers/Python/test/test_functions.py | 40 ++++++++++++++++++- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 7c06e29230..59b2a69a00 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -153,7 +153,7 @@ class TotalVariation(Function): def __init__(self, - max_iteration=5, + max_iteration=10, tolerance = None, correlation = "Space", backend = "c", diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 304729bef0..473ff08ca4 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -869,8 +869,8 @@ def test_proximal_conjugate(self): (L1Norm(), ag), (L2NormSquared(), ag), (MixedL21Norm(), bg), - (TotalVariation(backend='c'), ig), - (TotalVariation(backend='numpy'), ig), + (TotalVariation(backend='c', warmstart=False, max_iteration=100), ig), + (TotalVariation(backend='numpy', warmstart=False, max_iteration=100), ig), ] for func, geom in func_geom_test_list: @@ -907,6 +907,42 @@ def setUp(self) -> None: self.grad = GradientOperator(self.ig_real) self.alpha_arr = self.ig_real.allocate(0.15) + def test_configure_tv_defaults(self): + self.assertEquals(self.tv.warmstart, True) + self.assertEquals(self.tv.iterations, 10) + self.assertEquals(self.tv.correlation, "Space") + self.assertEquals(self.tv.backend, "c") + self.assertEquals(self.tv.lower, -np.inf) + self.assertEquals(self.tv.upper, np.inf) + self.assertEquals(self.tv.isotropic, True) + self.assertEquals(self.tv.split, False) + self.assertEquals(self.tv.info, False) + self.assertEquals(self.tv.strong_convexity_constant, 0) + self.assertEquals(self.tv.tolerance, None) + + def test_configure_tv_not_defaults(self): + tv=TotalVariation( max_iteration=100, + tolerance = 1e-5, + correlation = "SpaceChannels", + backend = "numpy", + lower = 0., + upper = 1., + isotropic = False, + split = True, + info = True, + strong_convexity_constant = 1., + warmstart=False) + self.assertEquals(tv.warmstart, False) + self.assertEquals(tv.iterations, 100) + self.assertEquals(tv.correlation, "SpaceChannels") + self.assertEquals(tv.backend, "numpy") + self.assertEquals(tv.lower, 0.) + self.assertEquals(tv.upper, 1.) + self.assertEquals(tv.isotropic, False) + self.assertEquals(tv.split, True) + self.assertEquals(tv.info, True) + self.assertEquals(tv.strong_convexity_constant, 1.) + self.assertEquals(tv.tolerance, 1e-5) def test_regularisation_parameter(self): np.testing.assert_almost_equal(self.tv.regularisation_parameter, 1.) From e882e2dee38591fcc9fb64cc40aff1e707a91855 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Mon, 14 Aug 2023 10:40:07 +0000 Subject: [PATCH 23/48] Added test to check default p2 value --- Wrappers/Python/test/test_functions.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 473ff08ca4..952cd81fb3 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -943,10 +943,10 @@ def test_configure_tv_not_defaults(self): self.assertEquals(tv.info, True) self.assertEquals(tv.strong_convexity_constant, 1.) self.assertEquals(tv.tolerance, 1e-5) - def test_regularisation_parameter(self): - np.testing.assert_almost_equal(self.tv.regularisation_parameter, 1.) - def test_regularisation_parameter2(self): + + + def test_scaled_regularisation_parameter(self): np.testing.assert_almost_equal(self.tv_scaled.regularisation_parameter, self.alpha) @@ -1172,6 +1172,17 @@ def test_non_scalar_tau_cil_tv(self): np.testing.assert_allclose(res1.array, res2.array, atol=1e-3) + def test_get_p2(self): + self.assertEquals(self.tv._p2, None) + data = dataexample.SHAPES.get(size=(64, 64)) + tv=TotalVariation() + tv(data) + a=tv.gradient.range_geometry().allocate(0) + b=tv._get_p2() + np.testing.assert_allclose(tv._get_p2()[0].array, tv.gradient.range_geometry().allocate(0)[0].array) + np.testing.assert_allclose(tv._get_p2()[1].array, tv.gradient.range_geometry().allocate(0)[1].array) + + class TestLeastSquares(unittest.TestCase): From 70de544710c558675186b2c176150a1fe9ea8883 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Mon, 14 Aug 2023 12:43:50 +0000 Subject: [PATCH 24/48] Neatening block function tests --- Wrappers/Python/test/test_functions.py | 33 +++++++++++++++++++------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 952cd81fb3..f8b1e6becc 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -1172,17 +1172,34 @@ def test_non_scalar_tau_cil_tv(self): np.testing.assert_allclose(res1.array, res2.array, atol=1e-3) - def test_get_p2(self): - self.assertEquals(self.tv._p2, None) + def test_get_p2_default(self): data = dataexample.SHAPES.get(size=(64, 64)) tv=TotalVariation() + self.assertEquals(tv._p2, None) tv(data) - a=tv.gradient.range_geometry().allocate(0) - b=tv._get_p2() - np.testing.assert_allclose(tv._get_p2()[0].array, tv.gradient.range_geometry().allocate(0)[0].array) - np.testing.assert_allclose(tv._get_p2()[1].array, tv.gradient.range_geometry().allocate(0)[1].array) - - + if isinstance(tv._get_p2(), BlockDataContainer): + for xa,xb in zip(tv._get_p2(),tv.gradient.range_geometry().allocate(0)): + np.testing.assert_allclose(xa.as_array(), xb.as_array(), + rtol=1e-5, atol=1e-5) + + def test_get_p2_after_prox_iteration_has_changed(self): + data = dataexample.SHAPES.get(size=(64, 64)) + tv=TotalVariation() + self.assertEquals(tv._p2, None) + tv.proximal(data, 1.) + if isinstance(tv._get_p2(), BlockDataContainer): + for xa,xb in zip(tv._get_p2(),tv.gradient.range_geometry().allocate(0)): + np.testing.assert_equal(np.any(np.not_equal(xa.as_array(), xb.as_array())), True) + + def test_get_p2_after_prox_iteration_has_not_changed(self): + data = dataexample.SHAPES.get(size=(64, 64)) + tv=TotalVariation(warmstart=False) + self.assertEquals(tv._p2, None) + tv.proximal(data, 1.) + if isinstance(tv._get_p2(), BlockDataContainer): + for xa,xb in zip(tv._get_p2(),tv.gradient.range_geometry().allocate(0)): + np.testing.assert_allclose(xa.as_array(), xb.as_array(), + rtol=1e-5, atol=1e-5) class TestLeastSquares(unittest.TestCase): From 72350c5ede709477383cca450d90906b9ec59b32 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Mon, 14 Aug 2023 13:24:24 +0000 Subject: [PATCH 25/48] Changed tolerances on testing --- Wrappers/Python/test/test_functions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index f8b1e6becc..a7a7255557 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -1180,7 +1180,7 @@ def test_get_p2_default(self): if isinstance(tv._get_p2(), BlockDataContainer): for xa,xb in zip(tv._get_p2(),tv.gradient.range_geometry().allocate(0)): np.testing.assert_allclose(xa.as_array(), xb.as_array(), - rtol=1e-5, atol=1e-5) + rtol=1e-8, atol=1e-8) def test_get_p2_after_prox_iteration_has_changed(self): data = dataexample.SHAPES.get(size=(64, 64)) @@ -1199,7 +1199,7 @@ def test_get_p2_after_prox_iteration_has_not_changed(self): if isinstance(tv._get_p2(), BlockDataContainer): for xa,xb in zip(tv._get_p2(),tv.gradient.range_geometry().allocate(0)): np.testing.assert_allclose(xa.as_array(), xb.as_array(), - rtol=1e-5, atol=1e-5) + rtol=1e-8, atol=1e-8) class TestLeastSquares(unittest.TestCase): From 24b2e4e4268ae8367835141d5f940e58dc78b745 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Thu, 17 Aug 2023 09:41:37 +0000 Subject: [PATCH 26/48] Changes to unittests after discussion with Gemma --- Wrappers/Python/test/test_functions.py | 101 +++++++++++++------------ 1 file changed, 52 insertions(+), 49 deletions(-) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index a7a7255557..954085c9f8 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -932,7 +932,7 @@ def test_configure_tv_not_defaults(self): info = True, strong_convexity_constant = 1., warmstart=False) - self.assertEquals(tv.warmstart, False) + self.assertEquals(tv.warmstart, False) self.assertEquals(tv.iterations, 100) self.assertEquals(tv.correlation, "SpaceChannels") self.assertEquals(tv.backend, "numpy") @@ -948,7 +948,7 @@ def test_configure_tv_not_defaults(self): def test_scaled_regularisation_parameter(self): np.testing.assert_almost_equal(self.tv_scaled.regularisation_parameter, - self.alpha) + self.alpha, err_msg='Multiplying the TV functional did not change the regularisation parameter') def test_rmul(self): assert isinstance(self.tv_scaled, TotalVariation) @@ -963,29 +963,29 @@ def test_rmul2(self): tv = alpha * TotalVariation() def test_rmul3(self): - self.assertEqual(self.alpha, self.tv_scaled.regularisation_parameter) + self.assertEqual(self.alpha, self.tv_scaled.regularisation_parameter, msg='Failed to scale the TV regularisation parameter') alpha = 2. tv2 = alpha * self.tv_scaled - self.assertEqual(self.alpha * alpha, tv2.regularisation_parameter) + self.assertEqual(self.alpha * alpha, tv2.regularisation_parameter, msg="Failed at scaling regularisation parameter of already scaled TotalVariation") def test_call_real_isotropic(self): x_real = self.ig_real.allocate('random', seed=4) res1 = self.tv_iso(x_real) res2 = self.grad.direct(x_real).pnorm(2).sum() - np.testing.assert_equal(res1, res2) + np.testing.assert_equal(res1, res2, err_msg="Error with isotrpopic TV calculation") def test_call_real_anisotropic(self): x_real = self.ig_real.allocate('random', seed=4) res1 = self.tv_aniso(x_real) res2 = self.grad.direct(x_real).pnorm(1).sum() - np.testing.assert_almost_equal(res1, res2, decimal=3) + np.testing.assert_almost_equal(res1, res2, decimal=3, err_msg="Error with anisotropic TV calculation") - def test_strongly_convex_TV(self): + def test_strongly_convex_CIL_TV(self): TV_no_strongly_convex = self.alpha * TotalVariation() - self.assertEqual(TV_no_strongly_convex.strong_convexity_constant, 0) + self.assertEqual(TV_no_strongly_convex.strong_convexity_constant, 0, msg="Error strong convexity constant not set to zero by default") # TV as strongly convex, with "small" strongly convex constant TV_strongly_convex = self.alpha * TotalVariation( @@ -997,7 +997,7 @@ def test_strongly_convex_TV(self): res2 = TV_no_strongly_convex( x_real) + (TV_strongly_convex.strong_convexity_constant / 2) * x_real.squared_norm() - np.testing.assert_allclose(res1, res2, atol=1e-3) + np.testing.assert_allclose(res1, res2, atol=1e-3, err_msg='TV calculation with and without strong convexity not equal') # check proximal x_real = self.ig_real.allocate('random', seed=4) @@ -1006,17 +1006,17 @@ def test_strongly_convex_TV(self): tmp_x_real = x_real.copy() res2 = TV_strongly_convex.proximal(x_real, tau=1.0) # check input remain the same after proximal - np.testing.assert_array_equal(tmp_x_real.array, x_real.array) + np.testing.assert_array_equal(tmp_x_real.array, x_real.array, err_msg='Input not the same after calling proximal') - np.testing.assert_allclose(res1.array, res2.array, atol=1e-3) + np.testing.assert_allclose(res1.array, res2.array, atol=1e-3, err_msg="TV proximal calculation not the same with and without strong convexity") @unittest.skipUnless(has_ccpi_regularisation, "Regularisation Toolkit not present") - def test_strongly_convex_CIL_FGP_TV(self): + def test_strongly_convex_FGP_TV(self): FGP_TV_no_strongly_convex = self.alpha * FGP_TV() self.assertEqual(FGP_TV_no_strongly_convex.strong_convexity_constant, - 0) + 0, msg="Default strong-convexity constant not set to zero") # TV as strongly convex, with "small" strongly convex constant FGP_TV_strongly_convex = self.alpha * FGP_TV( @@ -1029,7 +1029,7 @@ def test_strongly_convex_CIL_FGP_TV(self): res2 = FGP_TV_no_strongly_convex( x_real) + (FGP_TV_strongly_convex.strong_convexity_constant / 2) * x_real.squared_norm() - np.testing.assert_allclose(res1, res2, atol=1e-3) + np.testing.assert_allclose(res1, res2, atol=1e-3, err_msg='Failed at comparing FGP_TV call with and without strong convexity') # check proximal x_real = self.ig_real.allocate('random', seed=4) @@ -1038,14 +1038,14 @@ def test_strongly_convex_CIL_FGP_TV(self): tmp_x_real = x_real.copy() res2 = FGP_TV_strongly_convex.proximal(x_real, tau=1.0) # check input remain the same after proximal - np.testing.assert_array_equal(tmp_x_real.array, x_real.array) + np.testing.assert_array_equal(tmp_x_real.array, x_real.array, err_msg='Failed at checking input remains the same after calling proximal') - np.testing.assert_allclose(res1.array, res2.array, atol=1e-3) + np.testing.assert_allclose(res1.array, res2.array, atol=1e-3, err_msg="For FGP_TV comparing prox with and without strong convexity") @unittest.skipUnless(has_ccpi_regularisation, "Regularisation Toolkit not present") def test_compare_regularisation_toolkit(self): - data = dataexample.SHAPES.get(size=(64, 64)) + data = dataexample.SHAPES.get(size=(32, 32)) ig = data.geometry ag = ig @@ -1060,7 +1060,7 @@ def test_compare_regularisation_toolkit(self): # CIL_FGP_TV no tolerance g_CIL = alpha * TotalVariation( - iters, tolerance=None, lower=0, info=True) + iters, tolerance=None, lower=0, info=True, warmstart=False) t0 = timer() res1 = g_CIL.proximal(noisy_data, 1.) t1 = timer() @@ -1083,12 +1083,12 @@ def test_compare_regularisation_toolkit(self): np.testing.assert_array_almost_equal(res1.as_array(), res2.as_array(), - decimal=4) + decimal=4, err_msg='Comparing the CCPi proximal against the CIL TV proximal, no tolerance') # print("Compare CIL_FGP_TV vs CCPiReg_FGP_TV with iterations.") iters = 408 - # CIL_FGP_TV no tolerance - g_CIL = alpha * TotalVariation(iters, tolerance=1e-9, lower=0.) + # CIL_FGP_TV with tolerance + g_CIL = alpha * TotalVariation(iters, tolerance=1e-9, lower=0., warmstart=False) t0 = timer() res1 = g_CIL.proximal(noisy_data, 1.) t1 = timer() @@ -1111,14 +1111,14 @@ def test_compare_regularisation_toolkit(self): # print(t3-t2) np.testing.assert_array_almost_equal(res1.as_array(), res2.as_array(), - decimal=3) + decimal=3, err_msg='Comparing the CCPi proximal against the CIL TV proximal, with tolerance') @unittest.skipUnless(has_tomophantom and has_ccpi_regularisation, "Missing Tomophantom or Regularisation-Toolkit") def test_compare_regularisation_toolkit_tomophantom(self): # print ("Building 3D phantom using TomoPhantom software") model = 13 # select a model number from the library - N_size = 64 # Define phantom dimensions using a scalar value (cubic phantom) + N_size = 32 # Define phantom dimensions using a scalar value (cubic phantom) #This will generate a N_size x N_size x N_size phantom (3D) ig = ImageGeometry(N_size, N_size, N_size) @@ -1131,7 +1131,7 @@ def test_compare_regularisation_toolkit_tomophantom(self): # print("Use tau as an array of ones") # CIL_TotalVariation no tolerance - g_CIL = alpha * TotalVariation(iters, tolerance=None, info=True) + g_CIL = alpha * TotalVariation(iters, tolerance=None, info=True, warmstart=False) # res1 = g_CIL.proximal(noisy_data, ig.allocate(1.)) t0 = timer() res1 = g_CIL.proximal(noisy_data, ig.allocate(1.)) @@ -1158,7 +1158,7 @@ def test_compare_regularisation_toolkit_tomophantom(self): np.testing.assert_allclose(res1.as_array(), res2.as_array(), - atol=7.5e-2) + atol=7.5e-2, err_msg='Comparing the CCPi proximal against the CIL TV proximal, with tolerance') def test_non_scalar_tau_cil_tv(self): @@ -1170,36 +1170,39 @@ def test_non_scalar_tau_cil_tv(self): # use the alpha * TV res2 = self.tv_scaled.proximal(x_real, tau=1.0) - np.testing.assert_allclose(res1.array, res2.array, atol=1e-3) + np.testing.assert_allclose(res1.array, res2.array, atol=1e-3, err_msg="Testing non scalar tau in prox calculation") - def test_get_p2_default(self): - data = dataexample.SHAPES.get(size=(64, 64)) - tv=TotalVariation() - self.assertEquals(tv._p2, None) + def test_get_p2_with_warmstart(self): + data = dataexample.SHAPES.get(size=(16, 16)) + tv=TotalVariation(warmstart=True) + self.assertEquals(tv._p2, None, msg="tv._p2 not initialised to None") tv(data) - if isinstance(tv._get_p2(), BlockDataContainer): - for xa,xb in zip(tv._get_p2(),tv.gradient.range_geometry().allocate(0)): - np.testing.assert_allclose(xa.as_array(), xb.as_array(), - rtol=1e-8, atol=1e-8) - - def test_get_p2_after_prox_iteration_has_changed(self): - data = dataexample.SHAPES.get(size=(64, 64)) - tv=TotalVariation() - self.assertEquals(tv._p2, None) + checkp2=tv.gradient.range_geometry().allocate(0) + for i, x in enumerate(tv._get_p2()): + np.testing.assert_allclose(x.as_array(), checkp2[i].as_array(), rtol=1e-8, atol=1e-8, err_msg="P2 not initially set to zero") tv.proximal(data, 1.) - if isinstance(tv._get_p2(), BlockDataContainer): - for xa,xb in zip(tv._get_p2(),tv.gradient.range_geometry().allocate(0)): - np.testing.assert_equal(np.any(np.not_equal(xa.as_array(), xb.as_array())), True) + for i, x in enumerate(tv._get_p2()): + np.testing.assert_equal(np.any(np.not_equal(x.as_array(), checkp2[i].as_array())), True, err_msg="The stored value of p2 doesn't change after calling proximal") - def test_get_p2_after_prox_iteration_has_not_changed(self): - data = dataexample.SHAPES.get(size=(64, 64)) + #TODO: compare with some known values + + def test_p2_convergence(self): + + #TODO: + pass + def test_get_p2_without_warmstart(self): + data = dataexample.SHAPES.get(size=(16, 16)) tv=TotalVariation(warmstart=False) - self.assertEquals(tv._p2, None) + self.assertEquals(tv._p2, None, msg="tv._p2 not initialised to None") + tv(data) + checkp2=tv.gradient.range_geometry().allocate(0) + for i, x in enumerate(tv._get_p2()): + np.testing.assert_allclose(x.as_array(), checkp2[i].as_array(), rtol=1e-8, atol=1e-8, err_msg="P2 not initially set to zero") tv.proximal(data, 1.) - if isinstance(tv._get_p2(), BlockDataContainer): - for xa,xb in zip(tv._get_p2(),tv.gradient.range_geometry().allocate(0)): - np.testing.assert_allclose(xa.as_array(), xb.as_array(), - rtol=1e-8, atol=1e-8) + for i, x in enumerate(tv._get_p2()): + np.testing.assert_allclose(x.as_array(), checkp2[i].as_array(), rtol=1e-8, atol=1e-8, err_msg="P2 not reset to zero after a call to proximal") + + class TestLeastSquares(unittest.TestCase): From be508d6876fbbb0c5f837846f6f90af590881cf8 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Thu, 17 Aug 2023 09:45:10 +0000 Subject: [PATCH 27/48] Spelling --- Wrappers/Python/test/test_functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 954085c9f8..4f6aa6b4a0 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -973,7 +973,7 @@ def test_call_real_isotropic(self): res1 = self.tv_iso(x_real) res2 = self.grad.direct(x_real).pnorm(2).sum() - np.testing.assert_equal(res1, res2, err_msg="Error with isotrpopic TV calculation") + np.testing.assert_equal(res1, res2, err_msg="Error with isotropic TV calculation") def test_call_real_anisotropic(self): x_real = self.ig_real.allocate('random', seed=4) From 126545b2688892844bfaf64c9c973ff1da3ca9b4 Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Thu, 17 Aug 2023 14:54:07 +0000 Subject: [PATCH 28/48] TV warmstart unittests --- Wrappers/Python/test/test_functions.py | 46 ++++++++++++++++++++------ 1 file changed, 35 insertions(+), 11 deletions(-) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 4f6aa6b4a0..3d940e5a1e 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -1045,7 +1045,7 @@ def test_strongly_convex_FGP_TV(self): @unittest.skipUnless(has_ccpi_regularisation, "Regularisation Toolkit not present") def test_compare_regularisation_toolkit(self): - data = dataexample.SHAPES.get(size=(32, 32)) + data = dataexample.SHAPES.get(size=(16, 16)) ig = data.geometry ag = ig @@ -1113,12 +1113,24 @@ def test_compare_regularisation_toolkit(self): res2.as_array(), decimal=3, err_msg='Comparing the CCPi proximal against the CIL TV proximal, with tolerance') + # CIL_FGP_TV with warmstart + iters=10 + g_CIL = alpha * TotalVariation(iters, lower=0., warmstart=True) + t0 = timer() + for i in range(6): + res1 = g_CIL.proximal(noisy_data, 1.) + t1 = timer() + np.testing.assert_array_almost_equal(res1.as_array(), + res2.as_array(), + decimal=3, err_msg='Comparing the CCPi proximal against the CIL TV proximal, with warmstart') + + # print(t1-t0) @unittest.skipUnless(has_tomophantom and has_ccpi_regularisation, "Missing Tomophantom or Regularisation-Toolkit") def test_compare_regularisation_toolkit_tomophantom(self): # print ("Building 3D phantom using TomoPhantom software") model = 13 # select a model number from the library - N_size = 32 # Define phantom dimensions using a scalar value (cubic phantom) + N_size = 16 # Define phantom dimensions using a scalar value (cubic phantom) #This will generate a N_size x N_size x N_size phantom (3D) ig = ImageGeometry(N_size, N_size, N_size) @@ -1158,7 +1170,20 @@ def test_compare_regularisation_toolkit_tomophantom(self): np.testing.assert_allclose(res1.as_array(), res2.as_array(), - atol=7.5e-2, err_msg='Comparing the CCPi proximal against the CIL TV proximal, with tolerance') + atol=5e-2, err_msg='Comparing the CCPi proximal against the CIL TV proximal, without tolerance without warmstart') + + #with warm start + iters=10 + g_CIL = alpha * TotalVariation(iters, tolerance=None, info=True, warmstart=True) + # res1 = g_CIL.proximal(noisy_data, ig.allocate(1.)) + t0 = timer() + for i in range(4): + res1 = g_CIL.proximal(noisy_data, ig.allocate(1.)) + t1 = timer() + np.testing.assert_allclose(res1.as_array(), + res2.as_array(), + atol=4e-2, err_msg='Comparing the CCPi proximal against the CIL TV proximal, without tolerance with warmstart') + def test_non_scalar_tau_cil_tv(self): @@ -1174,22 +1199,21 @@ def test_non_scalar_tau_cil_tv(self): def test_get_p2_with_warmstart(self): data = dataexample.SHAPES.get(size=(16, 16)) - tv=TotalVariation(warmstart=True) + tv=TotalVariation(warmstart=True, max_iteration=10) self.assertEquals(tv._p2, None, msg="tv._p2 not initialised to None") tv(data) checkp2=tv.gradient.range_geometry().allocate(0) for i, x in enumerate(tv._get_p2()): np.testing.assert_allclose(x.as_array(), checkp2[i].as_array(), rtol=1e-8, atol=1e-8, err_msg="P2 not initially set to zero") - tv.proximal(data, 1.) + test=tv.proximal(data, 1.) + print(test) + a=np.sum(np.linalg.norm(test)) + print(np.linalg.norm(test)) for i, x in enumerate(tv._get_p2()): np.testing.assert_equal(np.any(np.not_equal(x.as_array(), checkp2[i].as_array())), True, err_msg="The stored value of p2 doesn't change after calling proximal") - - #TODO: compare with some known values + np.testing.assert_almost_equal(np.sum(np.linalg.norm(test)),126.337265, err_msg="Incorrect value of the proximal") - def test_p2_convergence(self): - - #TODO: - pass + def test_get_p2_without_warmstart(self): data = dataexample.SHAPES.get(size=(16, 16)) tv=TotalVariation(warmstart=False) From 34de0d3da8d9bf9faf811c7553bcb2d302583127 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:46:04 +0100 Subject: [PATCH 29/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 59b2a69a00..1eb0c08e39 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -324,7 +324,6 @@ def _fista_on_dual_rof(self, x, tau, out = None): should_return = True out = self.gradient.domain_geometry().allocate(0) - should_break = False for k in range(self.iterations): t0 = t From df50ae3e3146159ddb05f66d81a1ddc7982777b8 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:46:18 +0100 Subject: [PATCH 30/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 1eb0c08e39..53bb406194 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -327,7 +327,7 @@ def _fista_on_dual_rof(self, x, tau, out = None): for k in range(self.iterations): t0 = t - self.gradient.adjoint(tmp_q, out = out) + self.gradient.adjoint(tmp_q, out = out) out.sapyb(tau_reg_neg, x, 1.0, out=out) self.projection_C(out, tau=None, out = out) From e7bc6ed9094ede0f460ee12e367890d9d84d16ca Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:46:26 +0100 Subject: [PATCH 31/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 53bb406194..e361f3578b 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -332,7 +332,7 @@ def _fista_on_dual_rof(self, x, tau, out = None): self.projection_C(out, tau=None, out = out) - self.gradient.direct(out, out=p1) + self.gradient.direct(out, out=p1) multip = (-self.L)/tau_reg_neg From 9abaec9e4fcfc82d35f4077fad835a174675379e Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:46:42 +0100 Subject: [PATCH 32/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index e361f3578b..cf7963a7bb 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -347,7 +347,7 @@ def _fista_on_dual_rof(self, x, tau, out = None): # Depending on the case, isotropic or anisotropic, the proximal conjugate of the MixedL21Norm (isotropic case), # or the proximal conjugate of the MixedL11Norm (anisotropic case) is computed. - self.func.proximal_conjugate(tmp_q, 1.0, out=p1) + self.func.proximal_conjugate(tmp_q, 1.0, out=p1) t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2 p1.subtract(p2, out=tmp_q) From fa98b9b90b71ff8f137ea85ecd64b2df2f0d958c Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:46:52 +0100 Subject: [PATCH 33/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index cf7963a7bb..45f45edb6a 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -324,7 +324,7 @@ def _fista_on_dual_rof(self, x, tau, out = None): should_return = True out = self.gradient.domain_geometry().allocate(0) - for k in range(self.iterations): + for k in range(self.iterations): t0 = t self.gradient.adjoint(tmp_q, out = out) From 2a0027bd5da24944c554211ec843ca76abd02770 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:46:58 +0100 Subject: [PATCH 34/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 45f45edb6a..bcb9c40874 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -404,4 +404,4 @@ def __rmul__(self, scalar): if not isinstance (scalar, Number): raise TypeError("scalar: Expected a number, got {}".format(type(scalar))) self.regularisation_parameter *= scalar - return self \ No newline at end of file + return self From cb126b0a32289df7db076a9293dba1f6a3f5a90f Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:47:48 +0100 Subject: [PATCH 35/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index bcb9c40874..23231e6689 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -87,7 +87,7 @@ class TotalVariation(Function): Splits the Gradient into spatial gradient and spectral or temporal gradient for multichannel data. info : :obj:`boolean`, default = False - Information is printed for the stopping criterion of the FGP algorithm used to solve the dual problem + Information is printed for the stopping criterion of the FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF). strong_convexity_constant : :obj:`float`, default = 0 From 6499a4379fa1c1823e12f66f6cf00c52f727c16a Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:47:55 +0100 Subject: [PATCH 36/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Gemma Fardell <47746591+gfardell@users.noreply.github.com> Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 23231e6689..cffe61c293 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -98,7 +98,7 @@ class TotalVariation(Function): .. math:: \underset{u}{\mathrm{argmin}} \frac{1}{2\frac{\tau}{1+\gamma\tau}}\|u - \frac{b}{1+\gamma\tau}\|^{2} + \mathrm{TV}(u) - warmstart : :obj`boolean`, default = True + warmstart : :obj:`boolean`, default = True If set to true, the FGP aglorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) is initiated by the final value from the previous iteration and not at zero. This allows the max_iteration value to be reduced to 5-10 iterations. With warmstart this function will keep in memory the range of the gradient of the image to be denoised, i.e. N times the dimensionality of the image. From 995239ef05f0561d0d19f57aaa5fabd21d62ff7e Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Wed, 13 Sep 2023 14:58:43 +0000 Subject: [PATCH 37/48] Updated documentation --- .../optimisation/functions/TotalVariation.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index cffe61c293..001a9ff951 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -39,8 +39,8 @@ class TotalVariation(Function): Notes ----- - The :code:`TotalVariation` (TV) :code:`Function` acts as a compositite function, i.e., - the composition of the :class:`.MixedL21Norm` fuction and the :class:`.GradientOperator` operator, + The :code:`TotalVariation` (TV) :code:`Function` acts as a composite function, i.e., + the composition of the :class:`.MixedL21Norm` function and the :class:`.GradientOperator` operator, .. math:: f(u) = \|u\|_{2,1}, \Rightarrow (f\circ\nabla)(u) = f(\nabla x) = \mathrm{TV}(u) @@ -61,7 +61,7 @@ class TotalVariation(Function): max_iteration : :obj:`int`, default = 5 Maximum number of iterations for the FGP algorithm to solve to solve the dual problem of the Total Variation Denoising problem (ROF). If warmstart=False, this should be around 100, - or larger with a set tolerance. + or larger, with a set tolerance. tolerance : :obj:`float`, default = None Stopping criterion for the FGP algorithm used to to solve the dual problem of the Total Variation Denoising problem (ROF) @@ -87,7 +87,7 @@ class TotalVariation(Function): Splits the Gradient into spatial gradient and spectral or temporal gradient for multichannel data. info : :obj:`boolean`, default = False - Information is printed for the stopping criterion of the FGP algorithm used to solve the dual problem + Information is printed for the stopping criterion of the FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF). strong_convexity_constant : :obj:`float`, default = 0 @@ -99,12 +99,16 @@ class TotalVariation(Function): .. math:: \underset{u}{\mathrm{argmin}} \frac{1}{2\frac{\tau}{1+\gamma\tau}}\|u - \frac{b}{1+\gamma\tau}\|^{2} + \mathrm{TV}(u) warmstart : :obj:`boolean`, default = True - If set to true, the FGP aglorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) is initiated by the final value from the previous iteration and not at zero. + If set to true, the FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) is initiated by the final value from the previous iteration and not at zero. This allows the max_iteration value to be reduced to 5-10 iterations. - With warmstart this function will keep in memory the range of the gradient of the image to be denoised, i.e. N times the dimensionality of the image. - However, during the evaluation of `proximal` the memory requirements will be unchanged as the same amount of memory will need to be allocated and deallocated. + + Note + ---- + With warmstart set to the default, True, the TV function will keep in memory the range of the gradient of the image to be denoised, i.e. N times the dimensionality of the image. This increases the memory requirements. + However, during the evaluation of `proximal` the memory requirements will be unchanged as the same amount of memory will need to be allocated and deallocated. + Note ---- From c1fec463925cafb282683a7ea2f3068885c6bbf8 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Thu, 14 Sep 2023 10:02:16 +0100 Subject: [PATCH 38/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Edoardo Pasca Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 001a9ff951..0ca0e5855a 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -178,7 +178,6 @@ def __init__(self, # Iterations for FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) self.iterations = max_iteration - # Tolerance for FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) self.tolerance = tolerance # Total variation correlation (isotropic=Default) From f4eba6a04ecf02fd0dbb5b5765c5cff9293e5ea8 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Thu, 14 Sep 2023 10:02:32 +0100 Subject: [PATCH 39/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Edoardo Pasca Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 0ca0e5855a..0f836da3e7 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -175,7 +175,6 @@ def __init__(self, # Regularising parameter = alpha self.regularisation_parameter = 1. - # Iterations for FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) self.iterations = max_iteration self.tolerance = tolerance From 3109a9f221c02c4e62abbc9db25cd3786f06464d Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Thu, 14 Sep 2023 10:03:03 +0100 Subject: [PATCH 40/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Edoardo Pasca Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 0f836da3e7..c16bf8253f 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -199,7 +199,6 @@ def __init__(self, self._gradient = None self._domain = None - # Print stopping information (iterations and tolerance error) of FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) self.info = info if self.info: warnings.warn(" `info` is deprecate. Please use logging instead.") From 6dc1da22ed2b489038c673eec1b0087ed05dfd69 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Thu, 14 Sep 2023 10:03:24 +0100 Subject: [PATCH 41/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Edoardo Pasca Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index c16bf8253f..ef2a4d80c3 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -286,7 +286,7 @@ def proximal(self, x, tau, out = None): def _fista_on_dual_rof(self, x, tau, out = None): - r""" Implements the Fast Gradient Projection (FGP) algorithm to solve the dual problem + r""" Runs the Fast Gradient Projection (FGP) algorithm to solve the dual problem of the Total Variation Denoising problem (ROF). .. math: \max_{\|y\|_{\infty}<=1.} \frac{1}{2}\|\nabla^{*} y + x \|^{2} - \frac{1}{2}\|x\|^{2} From 8a4360102a0b447b5c66c2e7b9ab949f6f7028be Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Thu, 14 Sep 2023 10:03:36 +0100 Subject: [PATCH 42/48] Update Wrappers/Python/test/test_functions.py Co-authored-by: Edoardo Pasca Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/test/test_functions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 3d940e5a1e..707b68718b 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -1116,7 +1116,6 @@ def test_compare_regularisation_toolkit(self): # CIL_FGP_TV with warmstart iters=10 g_CIL = alpha * TotalVariation(iters, lower=0., warmstart=True) - t0 = timer() for i in range(6): res1 = g_CIL.proximal(noisy_data, 1.) t1 = timer() From 5e4edee670bd2e44ff09e5fe49e8fd3f6fc33b0b Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Thu, 14 Sep 2023 10:03:48 +0100 Subject: [PATCH 43/48] Update Wrappers/Python/test/test_functions.py Co-authored-by: Edoardo Pasca Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/test/test_functions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 707b68718b..c372358c34 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -1118,7 +1118,6 @@ def test_compare_regularisation_toolkit(self): g_CIL = alpha * TotalVariation(iters, lower=0., warmstart=True) for i in range(6): res1 = g_CIL.proximal(noisy_data, 1.) - t1 = timer() np.testing.assert_array_almost_equal(res1.as_array(), res2.as_array(), decimal=3, err_msg='Comparing the CCPi proximal against the CIL TV proximal, with warmstart') From 244586b5fe0e7872afb91416f8bbfe0dff46e4a8 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Thu, 14 Sep 2023 10:04:11 +0100 Subject: [PATCH 44/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Edoardo Pasca Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index ef2a4d80c3..13126e913a 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -346,8 +346,6 @@ def _fista_on_dual_rof(self, x, tau, out = None): if error < self.tolerance: break - # Depending on the case, isotropic or anisotropic, the proximal conjugate of the MixedL21Norm (isotropic case), - # or the proximal conjugate of the MixedL11Norm (anisotropic case) is computed. self.func.proximal_conjugate(tmp_q, 1.0, out=p1) t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2 From 6f22d38afa2543832191143ca552682147b48f2a Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Thu, 14 Sep 2023 10:04:18 +0100 Subject: [PATCH 45/48] Update Wrappers/Python/test/test_functions.py Co-authored-by: Edoardo Pasca Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/test/test_functions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index c372358c34..3621d4e208 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -1122,7 +1122,6 @@ def test_compare_regularisation_toolkit(self): res2.as_array(), decimal=3, err_msg='Comparing the CCPi proximal against the CIL TV proximal, with warmstart') - # print(t1-t0) @unittest.skipUnless(has_tomophantom and has_ccpi_regularisation, "Missing Tomophantom or Regularisation-Toolkit") def test_compare_regularisation_toolkit_tomophantom(self): From e6dd3deb8e13114ba196e237eb2e7a43029f04c2 Mon Sep 17 00:00:00 2001 From: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> Date: Thu, 14 Sep 2023 10:04:28 +0100 Subject: [PATCH 46/48] Update Wrappers/Python/cil/optimisation/functions/TotalVariation.py Co-authored-by: Edoardo Pasca Signed-off-by: Margaret Duff <43645617+MargaretDuff@users.noreply.github.com> --- Wrappers/Python/cil/optimisation/functions/TotalVariation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index 13126e913a..a7e01a1b72 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -360,7 +360,6 @@ def _fista_on_dual_rof(self, x, tau, out = None): if self.warmstart: self._p2 = p2 - # Print stopping information (iterations and tolerance error) of FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) if self.info: if self.tolerance is not None: logging.info("Stop at {} iterations with tolerance {} .".format(k, error)) From 34140736210871a22bdce085de04ec5f3ce4352d Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Thu, 14 Sep 2023 09:29:06 +0000 Subject: [PATCH 47/48] Changes requested by Edo --- .../optimisation/functions/TotalVariation.py | 180 +++++++++--------- 1 file changed, 87 insertions(+), 93 deletions(-) diff --git a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py index a7e01a1b72..168dd60d47 100644 --- a/Wrappers/Python/cil/optimisation/functions/TotalVariation.py +++ b/Wrappers/Python/cil/optimisation/functions/TotalVariation.py @@ -27,9 +27,8 @@ import logging - class TotalVariation(Function): - + r""" Total variation Function .. math:: \mathrm{TV}(u) := \|\nabla u\|_{2,1} = \sum \|\nabla u\|_{2},\, (\mbox{isotropic}) @@ -48,7 +47,7 @@ class TotalVariation(Function): algorithm to solve: .. math:: \mathrm{prox}_{\tau \mathrm{TV}}(b) := \underset{u}{\mathrm{argmin}} \frac{1}{2\tau}\|u - b\|^{2} + \mathrm{TV}(u) - + The algorithm used for the proximal operator of TV is the Fast Gradient Projection algorithm (or FISTA) applied to the _dual problem_ of the above problem, see :cite:`BeckTeboulle_b`, :cite:`BeckTeboulle_a`, :cite:`Zhu2010`. @@ -60,12 +59,13 @@ class TotalVariation(Function): max_iteration : :obj:`int`, default = 5 Maximum number of iterations for the FGP algorithm to solve to solve the dual problem - of the Total Variation Denoising problem (ROF). If warmstart=False, this should be around 100, + of the Total Variation Denoising problem (ROF). If warm_start=False, this should be around 100, or larger, with a set tolerance. tolerance : :obj:`float`, default = None Stopping criterion for the FGP algorithm used to to solve the dual problem - of the Total Variation Denoising problem (ROF) - + of the Total Variation Denoising problem (ROF). If the difference between iterates in the FGP algorithm is less than the tolerance + the iterations end before the max_iteration number. + .. math:: \|x^{k+1} - x^{k}\|_{2} < \mathrm{tolerance} correlation : :obj:`str`, default = `Space` @@ -98,17 +98,17 @@ class TotalVariation(Function): .. math:: \underset{u}{\mathrm{argmin}} \frac{1}{2\frac{\tau}{1+\gamma\tau}}\|u - \frac{b}{1+\gamma\tau}\|^{2} + \mathrm{TV}(u) - warmstart : :obj:`boolean`, default = True + warm_start : :obj:`boolean`, default = True If set to true, the FGP algorithm used to solve the dual problem of the Total Variation Denoising problem (ROF) is initiated by the final value from the previous iteration and not at zero. This allows the max_iteration value to be reduced to 5-10 iterations. - + Note ---- - With warmstart set to the default, True, the TV function will keep in memory the range of the gradient of the image to be denoised, i.e. N times the dimensionality of the image. This increases the memory requirements. + With warm_start set to the default, True, the TV function will keep in memory the range of the gradient of the image to be denoised, i.e. N times the dimensionality of the image. This increases the memory requirements. However, during the evaluation of `proximal` the memory requirements will be unchanged as the same amount of memory will need to be allocated and deallocated. - + Note ---- @@ -153,52 +153,50 @@ class TotalVariation(Function): >>> TV = alpha * TotalVariation(isotropic=False, strong_convexity_constant=gamma) >>> sol = TV.proximal(b, tau = 1.0) - """ - + """ def __init__(self, - max_iteration=10, - tolerance = None, - correlation = "Space", - backend = "c", - lower = None, - upper = None, - isotropic = True, - split = False, - info = False, - strong_convexity_constant = 0, - warmstart=True): - - - super(TotalVariation, self).__init__(L = None) + max_iteration=10, + tolerance=None, + correlation="Space", + backend="c", + lower=None, + upper=None, + isotropic=True, + split=False, + info=False, + strong_convexity_constant=0, + warm_start=True): + + super(TotalVariation, self).__init__(L=None) # Regularising parameter = alpha self.regularisation_parameter = 1. - + self.iterations = max_iteration - + self.tolerance = tolerance - + # Total variation correlation (isotropic=Default) self.isotropic = isotropic - + # correlation space or spacechannels self.correlation = correlation - self.backend = backend - + self.backend = backend + # Define orthogonal projection onto the convex set C if lower is None: lower = -np.inf if upper is None: - upper = np.inf + upper = np.inf self.lower = lower self.upper = upper self.projection_C = IndicatorBox(lower, upper).proximal - - # Setup GradientOperator as None. This is to avoid domain argument in the __init__ + + # Setup GradientOperator as None. This is to avoid domain argument in the __init__ self._gradient = None self._domain = None - + self.info = info if self.info: warnings.warn(" `info` is deprecate. Please use logging instead.") @@ -206,8 +204,8 @@ def __init__(self, # splitting Gradient self.split = split - #For the warmstart functionality - self.warmstart = warmstart + # For the warm_start functionality + self.warm_start = warm_start self._p2 = None # Strong convexity for TV @@ -219,11 +217,10 @@ def __init__(self, else: self.func = MixedL11Norm() - def _get_p2(self): - r"""The initial value for the dual in the proximal calculation - allocated to zero in the case of warmstart=False - or initialised as the last iterate seen in the proximal calculation in the case warmstart=True .""" - + r"""The initial value for the dual in the proximal calculation - allocated to zero in the case of warm_start=False + or initialised as the last iterate seen in the proximal calculation in the case warm_start=True .""" + if self._p2 is None: return self.gradient.range_geometry().allocate(0) else: @@ -236,11 +233,11 @@ def regularisation_parameter(self): @regularisation_parameter.setter def regularisation_parameter(self, value): if not isinstance(value, Number): - raise TypeError("regularisation_parameter: expected a number, got {}".format(type(value))) + raise TypeError( + "regularisation_parameter: expected a number, got {}".format(type(value))) self._regularisation_parameter = value def __call__(self, x): - r""" Returns the value of the TotalVariation function at :code:`x` .""" try: @@ -253,45 +250,42 @@ def __call__(self, x): if self._L is None: self.calculate_Lipschitz() - if self.strong_convexity_constant>0: - strongly_convex_term = (self.strong_convexity_constant/2)*x.squared_norm() + if self.strong_convexity_constant > 0: + strongly_convex_term = ( + self.strong_convexity_constant/2)*x.squared_norm() else: strongly_convex_term = 0 return self.regularisation_parameter * self.func(self.gradient.direct(x)) + strongly_convex_term - - def proximal(self, x, tau, out = None): - - r""" Returns the proximal operator of the TotalVariation function at :code:`x` .""" + def proximal(self, x, tau, out=None): + r""" Returns the proximal operator of the TotalVariation function at :code:`x` .""" - if self.strong_convexity_constant>0: + if self.strong_convexity_constant > 0: strongly_convex_factor = (1 + tau * self.strong_convexity_constant) x /= strongly_convex_factor tau /= strongly_convex_factor - + if out is None: solution = self._fista_on_dual_rof(x, tau) else: - self._fista_on_dual_rof(x, tau, out = out) + self._fista_on_dual_rof(x, tau, out=out) - if self.strong_convexity_constant>0: + if self.strong_convexity_constant > 0: x *= strongly_convex_factor tau *= strongly_convex_factor if out is None: - return solution - + return solution - def _fista_on_dual_rof(self, x, tau, out = None): - + def _fista_on_dual_rof(self, x, tau, out=None): r""" Runs the Fast Gradient Projection (FGP) algorithm to solve the dual problem of the Total Variation Denoising problem (ROF). .. math: \max_{\|y\|_{\infty}<=1.} \frac{1}{2}\|\nabla^{*} y + x \|^{2} - \frac{1}{2}\|x\|^{2} - """ + """ try: self._domain = x.geometry except: @@ -300,17 +294,15 @@ def _fista_on_dual_rof(self, x, tau, out = None): # Compute Lipschitz constant provided that domain is not None. # Lipschitz constant depends on the GradientOperator, which is configured only if domain is not None if self._L is None: - self.calculate_Lipschitz() - + self.calculate_Lipschitz() + # initialise - t = 1 # line 2 + t = 1 - # dual variable - its content is overwritten during iterations + # dual variable - its content is overwritten during iterations p1 = self.gradient.range_geometry().allocate(None) p2 = self._get_p2() - # temp_q should be equal to p2 - tmp_q = p2.copy() - + tmp_q = p2.copy() # multiply tau by -1 * regularisation_parameter here so it's not recomputed every iteration # when tau is an array this is done inplace so reverted at the end @@ -323,48 +315,48 @@ def _fista_on_dual_rof(self, x, tau, out = None): should_return = False if out is None: should_return = True - out = self.gradient.domain_geometry().allocate(0) + out = self.gradient.domain_geometry().allocate(0) for k in range(self.iterations): - + t0 = t - self.gradient.adjoint(tmp_q, out = out) + self.gradient.adjoint(tmp_q, out=out) out.sapyb(tau_reg_neg, x, 1.0, out=out) - self.projection_C(out, tau=None, out = out) - + self.projection_C(out, tau=None, out=out) self.gradient.direct(out, out=p1) multip = (-self.L)/tau_reg_neg tmp_q.sapyb(1., p1, multip, out=tmp_q) - - if self.tolerance is not None and k%5==0: # testing convergence criterion + + if self.tolerance is not None and k % 5 == 0: p1 *= multip error = p1.norm() error /= tmp_q.norm() - if error < self.tolerance: + if error < self.tolerance: break self.func.proximal_conjugate(tmp_q, 1.0, out=p1) - - t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2 - p1.subtract(p2, out=tmp_q) - tmp_q *= (t0-1)/t + + t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2 + p1.subtract(p2, out=tmp_q) + tmp_q *= (t0-1)/t tmp_q += p1 - #switch p1 and p2 references + # switch p1 and p2 references tmp = p1 p1 = p2 p2 = tmp - if self.warmstart: + if self.warm_start: self._p2 = p2 if self.info: if self.tolerance is not None: - logging.info("Stop at {} iterations with tolerance {} .".format(k, error)) + logging.info( + "Stop at {} iterations with tolerance {} .".format(k, error)) else: - logging.info("Stop at {} iterations.".format(k)) + logging.info("Stop at {} iterations.".format(k)) # return tau to its original state if it was modified if id(tau_reg_neg) == id(tau): @@ -373,18 +365,17 @@ def _fista_on_dual_rof(self, x, tau, out = None): if should_return: return out + def convex_conjugate(self, x): + r""" Returns the value of convex conjugate of the TotalVariation function at :code:`x` .""" + return 0.0 - def convex_conjugate(self,x): - r""" Returns the value of convex conjugate of the TotalVariation function at :code:`x` .""" - return 0.0 - def calculate_Lipschitz(self): r""" Default value for the Lipschitz constant.""" - + # Compute the Lipschitz parameter from the operator if possible # Leave it initialised to None otherwise - self._L = (1./self.gradient.norm())**2 - + self._L = (1./self.gradient.norm())**2 + @property def gradient(self): r""" GradientOperator is created if it is not instantiated yet. The domain of the `_gradient`, @@ -392,14 +383,17 @@ def gradient(self): """ if self._domain is not None: - self._gradient = GradientOperator(self._domain, correlation = self.correlation, backend = self.backend) + self._gradient = GradientOperator( + self._domain, correlation=self.correlation, backend=self.backend) else: - raise ValueError(" The domain of the TotalVariation is {}. Please use the __call__ or proximal methods first before calling gradient.".format(self._domain)) + raise ValueError( + " The domain of the TotalVariation is {}. Please use the __call__ or proximal methods first before calling gradient.".format(self._domain)) return self._gradient def __rmul__(self, scalar): - if not isinstance (scalar, Number): - raise TypeError("scalar: Expected a number, got {}".format(type(scalar))) + if not isinstance(scalar, Number): + raise TypeError( + "scalar: Expected a number, got {}".format(type(scalar))) self.regularisation_parameter *= scalar return self From 0b48716ba1deaf1502be7e156e531b5dcb5957bd Mon Sep 17 00:00:00 2001 From: Margaret Duff Date: Thu, 14 Sep 2023 10:03:58 +0000 Subject: [PATCH 48/48] Changed warmstart to warm_start in test functions --- Wrappers/Python/test/test_functions.py | 36 +++++++++++++------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 3621d4e208..082779a9b8 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -869,8 +869,8 @@ def test_proximal_conjugate(self): (L1Norm(), ag), (L2NormSquared(), ag), (MixedL21Norm(), bg), - (TotalVariation(backend='c', warmstart=False, max_iteration=100), ig), - (TotalVariation(backend='numpy', warmstart=False, max_iteration=100), ig), + (TotalVariation(backend='c', warm_start=False, max_iteration=100), ig), + (TotalVariation(backend='numpy', warm_start=False, max_iteration=100), ig), ] for func, geom in func_geom_test_list: @@ -908,7 +908,7 @@ def setUp(self) -> None: self.alpha_arr = self.ig_real.allocate(0.15) def test_configure_tv_defaults(self): - self.assertEquals(self.tv.warmstart, True) + self.assertEquals(self.tv.warm_start, True) self.assertEquals(self.tv.iterations, 10) self.assertEquals(self.tv.correlation, "Space") self.assertEquals(self.tv.backend, "c") @@ -931,8 +931,8 @@ def test_configure_tv_not_defaults(self): split = True, info = True, strong_convexity_constant = 1., - warmstart=False) - self.assertEquals(tv.warmstart, False) + warm_start=False) + self.assertEquals(tv.warm_start, False) self.assertEquals(tv.iterations, 100) self.assertEquals(tv.correlation, "SpaceChannels") self.assertEquals(tv.backend, "numpy") @@ -1060,7 +1060,7 @@ def test_compare_regularisation_toolkit(self): # CIL_FGP_TV no tolerance g_CIL = alpha * TotalVariation( - iters, tolerance=None, lower=0, info=True, warmstart=False) + iters, tolerance=None, lower=0, info=True, warm_start=False) t0 = timer() res1 = g_CIL.proximal(noisy_data, 1.) t1 = timer() @@ -1088,7 +1088,7 @@ def test_compare_regularisation_toolkit(self): # print("Compare CIL_FGP_TV vs CCPiReg_FGP_TV with iterations.") iters = 408 # CIL_FGP_TV with tolerance - g_CIL = alpha * TotalVariation(iters, tolerance=1e-9, lower=0., warmstart=False) + g_CIL = alpha * TotalVariation(iters, tolerance=1e-9, lower=0., warm_start=False) t0 = timer() res1 = g_CIL.proximal(noisy_data, 1.) t1 = timer() @@ -1113,14 +1113,14 @@ def test_compare_regularisation_toolkit(self): res2.as_array(), decimal=3, err_msg='Comparing the CCPi proximal against the CIL TV proximal, with tolerance') - # CIL_FGP_TV with warmstart + # CIL_FGP_TV with warm_start iters=10 - g_CIL = alpha * TotalVariation(iters, lower=0., warmstart=True) + g_CIL = alpha * TotalVariation(iters, lower=0., warm_start=True) for i in range(6): res1 = g_CIL.proximal(noisy_data, 1.) np.testing.assert_array_almost_equal(res1.as_array(), res2.as_array(), - decimal=3, err_msg='Comparing the CCPi proximal against the CIL TV proximal, with warmstart') + decimal=3, err_msg='Comparing the CCPi proximal against the CIL TV proximal, with warm_start') @unittest.skipUnless(has_tomophantom and has_ccpi_regularisation, "Missing Tomophantom or Regularisation-Toolkit") @@ -1140,7 +1140,7 @@ def test_compare_regularisation_toolkit_tomophantom(self): # print("Use tau as an array of ones") # CIL_TotalVariation no tolerance - g_CIL = alpha * TotalVariation(iters, tolerance=None, info=True, warmstart=False) + g_CIL = alpha * TotalVariation(iters, tolerance=None, info=True, warm_start=False) # res1 = g_CIL.proximal(noisy_data, ig.allocate(1.)) t0 = timer() res1 = g_CIL.proximal(noisy_data, ig.allocate(1.)) @@ -1167,11 +1167,11 @@ def test_compare_regularisation_toolkit_tomophantom(self): np.testing.assert_allclose(res1.as_array(), res2.as_array(), - atol=5e-2, err_msg='Comparing the CCPi proximal against the CIL TV proximal, without tolerance without warmstart') + atol=5e-2, err_msg='Comparing the CCPi proximal against the CIL TV proximal, without tolerance without warm_start') #with warm start iters=10 - g_CIL = alpha * TotalVariation(iters, tolerance=None, info=True, warmstart=True) + g_CIL = alpha * TotalVariation(iters, tolerance=None, info=True, warm_start=True) # res1 = g_CIL.proximal(noisy_data, ig.allocate(1.)) t0 = timer() for i in range(4): @@ -1179,7 +1179,7 @@ def test_compare_regularisation_toolkit_tomophantom(self): t1 = timer() np.testing.assert_allclose(res1.as_array(), res2.as_array(), - atol=4e-2, err_msg='Comparing the CCPi proximal against the CIL TV proximal, without tolerance with warmstart') + atol=4e-2, err_msg='Comparing the CCPi proximal against the CIL TV proximal, without tolerance with warm_start') def test_non_scalar_tau_cil_tv(self): @@ -1194,9 +1194,9 @@ def test_non_scalar_tau_cil_tv(self): np.testing.assert_allclose(res1.array, res2.array, atol=1e-3, err_msg="Testing non scalar tau in prox calculation") - def test_get_p2_with_warmstart(self): + def test_get_p2_with_warm_start(self): data = dataexample.SHAPES.get(size=(16, 16)) - tv=TotalVariation(warmstart=True, max_iteration=10) + tv=TotalVariation(warm_start=True, max_iteration=10) self.assertEquals(tv._p2, None, msg="tv._p2 not initialised to None") tv(data) checkp2=tv.gradient.range_geometry().allocate(0) @@ -1211,9 +1211,9 @@ def test_get_p2_with_warmstart(self): np.testing.assert_almost_equal(np.sum(np.linalg.norm(test)),126.337265, err_msg="Incorrect value of the proximal") - def test_get_p2_without_warmstart(self): + def test_get_p2_without_warm_start(self): data = dataexample.SHAPES.get(size=(16, 16)) - tv=TotalVariation(warmstart=False) + tv=TotalVariation(warm_start=False) self.assertEquals(tv._p2, None, msg="tv._p2 not initialised to None") tv(data) checkp2=tv.gradient.range_geometry().allocate(0)