From b8e7122d935503724cbeccb54ace581830aaee0a Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 20 Sep 2021 15:13:01 +0000 Subject: [PATCH 01/13] use tau parameter in all regulariser functions --- .../functions/regularisers.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py index 963b0641c2..f6945c096a 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py @@ -81,7 +81,7 @@ def __init__(self,lambdaReg,iterationsTV,tolerance,time_marchstep,device): def proximal_numpy(self, in_arr, tau, out = None): res , info = regularisers.ROF_TV(in_arr, - self.alpha, + self.alpha * tau, self.max_iteration, self.time_marchstep, self.tolerance, @@ -111,7 +111,7 @@ def __init__(self, alpha=1, max_iteration=100, tolerance=1e-6, isotropic=True, n def proximal_numpy(self, in_arr, tau, out = None): res , info = regularisers.FGP_TV(\ in_arr,\ - self.alpha*tau,\ + self.alpha * tau,\ self.max_iteration,\ self.tolerance,\ self.methodTV,\ @@ -136,7 +136,7 @@ def __call__(self,x): def proximal_numpy(self, in_arr, tau, out = None): res , info = regularisers.TGV(in_arr, - self.regularisation_parameter, + self.regularisation_parameter * tau, self.alpha1, self.alpha2, self.iter_TGV, @@ -172,8 +172,8 @@ def __call__(self,x): def proximal_numpy(self, in_arr, tau, out = None): res , info = regularisers.LLT_ROF(in_arr, - self.regularisation_parameterROF, - self.regularisation_parameterLLT, + self.regularisation_parameterROF * tau, + self.regularisation_parameterLLT * tau, self.iter_LLT_ROF, self.time_marching_parameter, self.torelance, @@ -218,7 +218,7 @@ def proximal_numpy(self, in_arr, tau, out = None): res , info = regularisers.FGP_dTV(\ in_arr,\ self.reference,\ - self.alpha*tau,\ + self.alpha * tau,\ self.max_iteration,\ self.tolerance,\ self.eta,\ @@ -243,7 +243,7 @@ def __init__(self,lambdaReg,iterationsTV,tolerance,methodTV,printing,device): def proximal_numpy(self, in_arr, tau, out = None): res , info = regularisers.SB_TV(in_arr, - self.alpha*tau, + self.alpha * tau, self.max_iteration, self.tolerance, self.methodTV, @@ -266,7 +266,7 @@ def __call__(self,x): def proximal_numpy(self, in_arr, tau, out = None): res = regularisers.TNV(in_arr, - self.regularisation_parameter, + self.regularisation_parameter * tau, self.iterationsTNV, self.tolerance) From 243b3505cdb1b08fb9d0378cbde340f3a6462658 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 15 Oct 2021 12:26:41 +0100 Subject: [PATCH 02/13] removed unused regularisers --- .../ccpi_regularisation/functions/__init__.py | 3 +- .../functions/regularisers.py | 116 +++++++----------- .../Python/test/test_PluginsRegularisation.py | 61 +++++---- 3 files changed, 82 insertions(+), 98 deletions(-) diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py index aea6902e7d..1b27aef58e 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py @@ -15,5 +15,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .regularisers import FGP_TV, ROF_TV, TGV, LLT_ROF, FGP_dTV,\ - SB_TV, TNV +from .regularisers import FGP_TV, TGV, FGP_dTV, TNV diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py index f6945c096a..1cdb24ed27 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py @@ -29,6 +29,7 @@ from cil.optimisation.functions import Function import numpy as np import warnings +from numbers import Number class RegulariserFunction(Function): def proximal(self, x, tau, out=None): @@ -70,24 +71,6 @@ def __call__(self,x): def convex_conjugate(self,x): return 0.0 -class ROF_TV(TV_Base): - def __init__(self,lambdaReg,iterationsTV,tolerance,time_marchstep,device): - # set parameters - self.alpha = lambdaReg - self.max_iteration = iterationsTV - self.time_marchstep = time_marchstep - self.device = device # string for 'cpu' or 'gpu' - self.tolerance = tolerance - - def proximal_numpy(self, in_arr, tau, out = None): - res , info = regularisers.ROF_TV(in_arr, - self.alpha * tau, - self.max_iteration, - self.time_marchstep, - self.tolerance, - self.device) - - return res, info class FGP_TV(TV_Base): def __init__(self, alpha=1, max_iteration=100, tolerance=1e-6, isotropic=True, nonnegativity=True, printing=False, device='cpu'): @@ -118,16 +101,27 @@ def proximal_numpy(self, in_arr, tau, out = None): self.nonnegativity,\ self.device) return res, info + + def __rmul__(self, scalar): + '''Define the multiplication with a scalar + + this changes the regularisation parameter in the plugin''' + if not isinstance (scalar, Number): + raise NotImplemented + else: + self.alpha *= scalar class TGV(RegulariserFunction): - def __init__(self, regularisation_parameter, alpha1, alpha2, iter_TGV, LipshitzConstant, torelance, device ): - self.regularisation_parameter = regularisation_parameter + def __init__(self, alpha=1, alpha1=1, alpha2=1, iter_TGV=100, LipshitzConstant=12, tolerance=1e-6, device='cpu' ): + # Default values + # https://github.com/vais-ral/CCPi-Regularisation-Toolkit/blob/413c6001003c6f1272aeb43152654baaf0c8a423/src/Core/regularisers_CPU/TGV_core.c#L25-L32 + self.alpha = alpha self.alpha1 = alpha1 self.alpha2 = alpha2 self.iter_TGV = iter_TGV self.LipshitzConstant = LipshitzConstant - self.torelance = torelance + self.torelance = tolerance self.device = device def __call__(self,x): @@ -152,42 +146,16 @@ def proximal_numpy(self, in_arr, tau, out = None): def convex_conjugate(self, x): warnings.warn("{}: the convex_conjugate method is not implemented. Returning NaN.".format(self.__class__.__name__)) return np.nan - -class LLT_ROF(RegulariserFunction): - - def __init__(self, regularisation_parameterROF, - regularisation_parameterLLT, - iter_LLT_ROF, time_marching_parameter, torelance, device ): - self.regularisation_parameterROF = regularisation_parameterROF - self.regularisation_parameterLLT = regularisation_parameterLLT - self.iter_LLT_ROF = iter_LLT_ROF - self.time_marching_parameter = time_marching_parameter - self.torelance = torelance - self.device = device + def __rmul__(self, scalar): + '''Define the multiplication with a scalar - def __call__(self,x): - warnings.warn("{}: the __call__ method is not implemented. Returning NaN.".format(self.__class__.__name__)) - return np.nan - - def proximal_numpy(self, in_arr, tau, out = None): - res , info = regularisers.LLT_ROF(in_arr, - self.regularisation_parameterROF * tau, - self.regularisation_parameterLLT * tau, - self.iter_LLT_ROF, - self.time_marching_parameter, - self.torelance, - self.device) - - # info: return number of iteration and reached tolerance - # https://github.com/vais-ral/CCPi-Regularisation-Toolkit/blob/master/src/Core/regularisers_CPU/TGV_core.c#L168 - # Stopping Criteria || u^k - u^(k-1) ||_{2} / || u^{k} ||_{2} - - return res, info + this changes the regularisation parameter in the plugin''' + if not isinstance (scalar, Number): + raise NotImplemented + else: + self.alpha *= scalar - def convex_conjugate(self, x): - warnings.warn("{}: the convex_conjugate method is not implemented. Returning NaN.".format(self.__class__.__name__)) - return np.nan class FGP_dTV(RegulariserFunction): def __init__(self, reference, alpha=1, max_iteration=100, @@ -231,32 +199,21 @@ def convex_conjugate(self, x): warnings.warn("{}: the convex_conjugate method is not implemented. Returning NaN.".format(self.__class__.__name__)) return np.nan -class SB_TV(TV_Base): - def __init__(self,lambdaReg,iterationsTV,tolerance,methodTV,printing,device): - # set parameters - self.alpha = lambdaReg - self.max_iteration = iterationsTV - self.tolerance = tolerance - self.methodTV = methodTV - self.printing = printing - self.device = device # string for 'cpu' or 'gpu' - - def proximal_numpy(self, in_arr, tau, out = None): - res , info = regularisers.SB_TV(in_arr, - self.alpha * tau, - self.max_iteration, - self.tolerance, - self.methodTV, - self.device) + def __rmul__(self, scalar): + '''Define the multiplication with a scalar - return res, info + this changes the regularisation parameter in the plugin''' + if not isinstance (scalar, Number): + raise NotImplemented + else: + self.alpha *= scalar class TNV(RegulariserFunction): - def __init__(self,regularisation_parameter,iterationsTNV,tolerance): + def __init__(self,alpha=1, iterationsTNV=100, tolerance=1e-6): # set parameters - self.regularisation_parameter = regularisation_parameter + self.alpha = alpha self.iterationsTNV = iterationsTNV self.tolerance = tolerance @@ -266,7 +223,7 @@ def __call__(self,x): def proximal_numpy(self, in_arr, tau, out = None): res = regularisers.TNV(in_arr, - self.regularisation_parameter * tau, + self.alpha * tau, self.iterationsTNV, self.tolerance) @@ -275,3 +232,12 @@ def proximal_numpy(self, in_arr, tau, out = None): def convex_conjugate(self, x): warnings.warn("{}: the convex_conjugate method is not implemented. Returning NaN.".format(self.__class__.__name__)) return np.nan + + def __rmul__(self, scalar): + '''Define the multiplication with a scalar + + this changes the regularisation parameter in the plugin''' + if not isinstance (scalar, Number): + raise NotImplemented + else: + self.alpha *= scalar \ No newline at end of file diff --git a/Wrappers/Python/test/test_PluginsRegularisation.py b/Wrappers/Python/test/test_PluginsRegularisation.py index 52e00db4f6..720ea071f5 100644 --- a/Wrappers/Python/test/test_PluginsRegularisation.py +++ b/Wrappers/Python/test/test_PluginsRegularisation.py @@ -55,13 +55,7 @@ def test_import_FGP_TV(self): except ModuleNotFoundError as ie: print (ie) assert False - @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") - def test_import_ROF_TV(self): - try: - from cil.plugins.ccpi_regularisation.functions import ROF_TV - assert True - except ModuleNotFoundError as ie: - assert False + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") def test_import_TGV(self): try: @@ -69,13 +63,7 @@ def test_import_TGV(self): assert True except ModuleNotFoundError as ie: assert False - @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") - def test_import_LLT_ROF(self): - try: - from cil.plugins.ccpi_regularisation.functions import LLT_ROF - assert True - except ModuleNotFoundError as ie: - assert False + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") def test_import_FGP_dTV(self): try: @@ -83,13 +71,7 @@ def test_import_FGP_dTV(self): assert True except ModuleNotFoundError as ie: assert False - @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") - def test_import_SB_TV(self): - try: - from cil.plugins.ccpi_regularisation.functions import SB_TV - assert True - except ModuleNotFoundError as ie: - assert False + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") def test_import_TNV(self): try: @@ -97,6 +79,7 @@ def test_import_TNV(self): assert True except ModuleNotFoundError as ie: assert False + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") def test_FGP_TV_complex(self): data = dataexample.CAMERA.get(size=(256,256)) @@ -110,3 +93,39 @@ def test_FGP_TV_complex(self): outarr = out.as_array() np.testing.assert_almost_equal(outarr.imag, outarr.real) + def rmul_test(self, f): + + alpha = f.alpha + scalar = 2.123 + af = scalar*f + + assert af.alpha == scalar * alpha + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_FGP_TV_rmul(self): + from cil.plugins.ccpi_regularisation.functions import FGP_TV + f = FGP_TV() + + self.rmul_test(f) + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_FGP_TGV_rmul(self): + from cil.plugins.ccpi_regularisation.functions import FGP_TGV + f = FGP_TGV() + + self.rmul_test(f) + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_FGP_TGV_rmul(self): + from cil.plugins.ccpi_regularisation.functions import TNV + f = TNV() + + self.rmul_test(f) + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_FGP_dTV_rmul(self): + from cil.plugins.ccpi_regularisation.functions import FGP_dTV + data = dataexample.CAMERA.get(size=(256,256)) + f = FGP_dTV(data) + + self.rmul_test(f) + \ No newline at end of file From 3731f65d1d64776d4c3cc5c184f252433aa48ed2 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 15 Oct 2021 13:32:14 +0100 Subject: [PATCH 03/13] added unit tests --- .../functions/regularisers.py | 11 +- .../Python/test/test_PluginsRegularisation.py | 121 ++++++++++++++++++ 2 files changed, 128 insertions(+), 4 deletions(-) diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py index b18cdf3f70..6806f398dc 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py @@ -110,6 +110,7 @@ def __rmul__(self, scalar): raise NotImplemented else: self.alpha *= scalar + return self class TGV(RegulariserFunction): @@ -121,7 +122,7 @@ def __init__(self, alpha=1, alpha1=1, alpha2=1, iter_TGV=100, LipshitzConstant=1 self.alpha2 = alpha2 self.iter_TGV = iter_TGV self.LipshitzConstant = LipshitzConstant - self.torelance = tolerance + self.tolerance = tolerance self.device = device def __call__(self,x): @@ -130,12 +131,12 @@ def __call__(self,x): def proximal_numpy(self, in_arr, tau, out = None): res , info = regularisers.TGV(in_arr, - self.regularisation_parameter * tau, + self.alpha * tau, self.alpha1, self.alpha2, self.iter_TGV, self.LipshitzConstant, - self.torelance, + self.tolerance, self.device) # info: return number of iteration and reached tolerance @@ -155,6 +156,7 @@ def __rmul__(self, scalar): raise NotImplemented else: self.alpha *= scalar + return self class FGP_dTV(RegulariserFunction): @@ -207,6 +209,7 @@ def __rmul__(self, scalar): raise NotImplemented else: self.alpha *= scalar + return self class TNV(RegulariserFunction): @@ -226,7 +229,6 @@ def proximal_numpy(self, in_arr, tau, out = None): self.alpha * tau, self.iterationsTNV, self.tolerance) - return res, [] def convex_conjugate(self, x): @@ -241,3 +243,4 @@ def __rmul__(self, scalar): raise NotImplemented else: self.alpha *= scalar + return self diff --git a/Wrappers/Python/test/test_PluginsRegularisation.py b/Wrappers/Python/test/test_PluginsRegularisation.py index 129c1a6ed9..2733c75961 100644 --- a/Wrappers/Python/test/test_PluginsRegularisation.py +++ b/Wrappers/Python/test/test_PluginsRegularisation.py @@ -40,6 +40,7 @@ # "Minimal version is 20.04") has_regularisation_toolkit = False print ("has_regularisation_toolkit", has_regularisation_toolkit) +TNV_fixed = False class TestPlugin(unittest.TestCase): def setUp(self): @@ -99,6 +100,7 @@ def rmul_test(self, f): scalar = 2.123 af = scalar*f + assert (id(af) == id(f)) assert af.alpha == scalar * alpha @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") @@ -129,3 +131,122 @@ def test_FGP_dTV_rmul(self): self.rmul_test(f) + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_functionality_FGP_TV(self): + + data = dataexample.CAMERA.get(size=(256,256)) + datarr = data.as_array() + from cil.plugins.ccpi_regularisation.functions import FGP_TV + from ccpi.filters import regularisers + + tau = 1. + fcil = FGP_TV() + outcil = fcil.proximal(data, tau=tau) + # CIL defaults + # alpha=1, max_iteration=100, tolerance=1e-6, isotropic=True, nonnegativity=True, printing=False, device='cpu' + # in_arr,\ + # self.alpha * tau,\ + # self.max_iteration,\ + # self.tolerance,\ + # self.methodTV,\ + # self.nonnegativity,\ + # self.device + + # if nonnegativity == True: + # self.nonnegativity = 1 + # else: + # self.nonnegativity = 0 + outrgl, info = regularisers.FGP_TV(datarr, fcil.alpha*tau, fcil.max_iteration, fcil.tolerance, 0, 1, 'cpu' ) + np.testing.assert_almost_equal(outrgl, outcil.as_array()) + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_functionality_TGV(self): + + data = dataexample.CAMERA.get(size=(256,256)) + datarr = data.as_array() + from cil.plugins.ccpi_regularisation.functions import TGV + from ccpi.filters import regularisers + + tau = 1. + fcil = TGV() + outcil = fcil.proximal(data, tau=tau) + # CIL defaults + # alpha=1, alpha1=1, alpha2=1, iter_TGV=100, LipshitzConstant=12, tolerance=1e-6, device='cpu' + # self.alpha * tau, + # self.alpha1, + # self.alpha2, + # self.iter_TGV, + # self.LipshitzConstant, + # self.torelance, + # self.device + outrgl, info = regularisers.TGV(datarr, fcil.alpha*tau, 1,1, fcil.iter_TGV, 12, fcil.tolerance, 'cpu' ) + + np.testing.assert_almost_equal(outrgl, outcil.as_array()) + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_functionality_FGP_dTV(self): + + data = dataexample.CAMERA.get(size=(256,256)) + datarr = data.as_array() + ref = data*0.3 + from cil.plugins.ccpi_regularisation.functions import FGP_dTV + from ccpi.filters import regularisers + + tau = 1. + fcil = FGP_dTV(ref) + outcil = fcil.proximal(data, tau=tau) + # CIL defaults + # if isotropic == True: + # self.methodTV = 0 + # else: + # self.methodTV = 1 + + # if nonnegativity == True: + # self.nonnegativity = 1 + # else: + # self.nonnegativity = 0 + + # self.alpha = alpha + # self.max_iteration = max_iteration + # self.tolerance = tolerance + # self.device = device # string for 'cpu' or 'gpu' + # self.reference = np.asarray(reference.as_array(), dtype=np.float32) + # self.eta = eta + + # in_arr,\ + # self.reference,\ + # self.alpha * tau,\ + # self.max_iteration,\ + # self.tolerance,\ + # self.eta,\ + # self.methodTV,\ + # self.nonnegativity,\ + # self.device + # reference, alpha=1, max_iteration=100, + # tolerance=1e-6, eta=0.01, isotropic=True, nonnegativity=True, device='cpu' + outrgl, info = regularisers.FGP_dTV(datarr, ref.as_array(), fcil.alpha*tau, fcil.max_iteration, fcil.tolerance, 0.01, 0, 1, 'cpu' ) + np.testing.assert_almost_equal(outrgl, outcil.as_array()) + + @unittest.skipUnless(TNV_fixed, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_functionality_TNV(self): + + data = dataexample.CAMERA.get(size=(256,256)) + datarr = data.as_array() + from cil.plugins.ccpi_regularisation.functions import TNV + from ccpi.filters import regularisers + + tau = 1. + + # CIL defaults + # alpha=1, iterationsTNV=100, tolerance=1e-6 + # self.alpha * tau, + # self.iterationsTNV, + # self.tolerance + # outrgl, info = regularisers.TGV(datarr, fcil.alpha*tau, fcil.iterationsTNV, fcil.tolerance ) + outrgl = regularisers.TNV(datarr, 1, 100, 1e-6 ) + print ("test outrgl", type(outrgl)) + + fcil = TNV() + outcil = fcil.proximal(data, tau=tau) + np.testing.assert_almost_equal(outrgl, outcil.as_array()) \ No newline at end of file From 2438710e9419e72e3b706ad44e1491bc4e11bf60 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 15 Oct 2021 13:32:52 +0100 Subject: [PATCH 04/13] removed TNV as it does not work --- .../ccpi_regularisation/functions/__init__.py | 2 +- .../functions/regularisers.py | 56 +++++++++---------- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py index 1b27aef58e..8c07aebbb3 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py @@ -15,4 +15,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .regularisers import FGP_TV, TGV, FGP_dTV, TNV +from .regularisers import FGP_TV, TGV, FGP_dTV diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py index 6806f398dc..c47f11e0af 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py @@ -211,36 +211,36 @@ def __rmul__(self, scalar): self.alpha *= scalar return self -class TNV(RegulariserFunction): +# class TNV(RegulariserFunction): - def __init__(self,alpha=1, iterationsTNV=100, tolerance=1e-6): +# def __init__(self,alpha=1, iterationsTNV=100, tolerance=1e-6): - # set parameters - self.alpha = alpha - self.iterationsTNV = iterationsTNV - self.tolerance = tolerance +# # set parameters +# self.alpha = alpha +# self.iterationsTNV = iterationsTNV +# self.tolerance = tolerance - def __call__(self,x): - warnings.warn("{}: the __call__ method is not implemented. Returning NaN.".format(self.__class__.__name__)) - return np.nan +# def __call__(self,x): +# warnings.warn("{}: the __call__ method is not implemented. Returning NaN.".format(self.__class__.__name__)) +# return np.nan - def proximal_numpy(self, in_arr, tau, out = None): - res = regularisers.TNV(in_arr, - self.alpha * tau, - self.iterationsTNV, - self.tolerance) - return res, [] - - def convex_conjugate(self, x): - warnings.warn("{}: the convex_conjugate method is not implemented. Returning NaN.".format(self.__class__.__name__)) - return np.nan - - def __rmul__(self, scalar): - '''Define the multiplication with a scalar +# def proximal_numpy(self, in_arr, tau, out = None): +# res = regularisers.TNV(in_arr, +# self.alpha * tau, +# self.iterationsTNV, +# self.tolerance) +# return res, [] + +# def convex_conjugate(self, x): +# warnings.warn("{}: the convex_conjugate method is not implemented. Returning NaN.".format(self.__class__.__name__)) +# return np.nan + +# def __rmul__(self, scalar): +# '''Define the multiplication with a scalar - this changes the regularisation parameter in the plugin''' - if not isinstance (scalar, Number): - raise NotImplemented - else: - self.alpha *= scalar - return self +# this changes the regularisation parameter in the plugin''' +# if not isinstance (scalar, Number): +# raise NotImplemented +# else: +# self.alpha *= scalar +# return self From c5868d2638b7eeb3a7216cc8e7b650cf449666fd Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 15 Oct 2021 14:00:54 +0100 Subject: [PATCH 05/13] add TNV with raise error if not 3D --- .../ccpi_regularisation/functions/__init__.py | 2 +- .../functions/regularisers.py | 59 ++++++++++--------- .../Python/test/test_PluginsRegularisation.py | 25 ++++++-- 3 files changed, 53 insertions(+), 33 deletions(-) diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py index 8c07aebbb3..1b27aef58e 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/__init__.py @@ -15,4 +15,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .regularisers import FGP_TV, TGV, FGP_dTV +from .regularisers import FGP_TV, TGV, FGP_dTV, TNV diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py index c47f11e0af..983d424baa 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py @@ -211,36 +211,39 @@ def __rmul__(self, scalar): self.alpha *= scalar return self -# class TNV(RegulariserFunction): +class TNV(RegulariserFunction): -# def __init__(self,alpha=1, iterationsTNV=100, tolerance=1e-6): + def __init__(self,alpha=1, iterationsTNV=100, tolerance=1e-6): -# # set parameters -# self.alpha = alpha -# self.iterationsTNV = iterationsTNV -# self.tolerance = tolerance + # set parameters + self.alpha = alpha + self.iterationsTNV = iterationsTNV + self.tolerance = tolerance -# def __call__(self,x): -# warnings.warn("{}: the __call__ method is not implemented. Returning NaN.".format(self.__class__.__name__)) -# return np.nan + def __call__(self,x): + warnings.warn("{}: the __call__ method is not implemented. Returning NaN.".format(self.__class__.__name__)) + return np.nan -# def proximal_numpy(self, in_arr, tau, out = None): -# res = regularisers.TNV(in_arr, -# self.alpha * tau, -# self.iterationsTNV, -# self.tolerance) -# return res, [] - -# def convex_conjugate(self, x): -# warnings.warn("{}: the convex_conjugate method is not implemented. Returning NaN.".format(self.__class__.__name__)) -# return np.nan - -# def __rmul__(self, scalar): -# '''Define the multiplication with a scalar + def proximal_numpy(self, in_arr, tau, out = None): + if in_arr.ndim != 3: + # https://github.com/vais-ral/CCPi-Regularisation-Toolkit/blob/413c6001003c6f1272aeb43152654baaf0c8a423/src/Python/src/cpu_regularisers.pyx#L584-L588 + raise ValueError('Only 3D data is supported. Passed data has {} dimensions'.format(in_arr.ndim)) + res = regularisers.TNV(in_arr, + self.alpha * tau, + self.iterationsTNV, + self.tolerance) + return res, [] + + def convex_conjugate(self, x): + warnings.warn("{}: the convex_conjugate method is not implemented. Returning NaN.".format(self.__class__.__name__)) + return np.nan + + def __rmul__(self, scalar): + '''Define the multiplication with a scalar -# this changes the regularisation parameter in the plugin''' -# if not isinstance (scalar, Number): -# raise NotImplemented -# else: -# self.alpha *= scalar -# return self + this changes the regularisation parameter in the plugin''' + if not isinstance (scalar, Number): + raise NotImplemented + else: + self.alpha *= scalar + return self diff --git a/Wrappers/Python/test/test_PluginsRegularisation.py b/Wrappers/Python/test/test_PluginsRegularisation.py index 2733c75961..cb1c81abed 100644 --- a/Wrappers/Python/test/test_PluginsRegularisation.py +++ b/Wrappers/Python/test/test_PluginsRegularisation.py @@ -228,10 +228,11 @@ def test_functionality_FGP_dTV(self): outrgl, info = regularisers.FGP_dTV(datarr, ref.as_array(), fcil.alpha*tau, fcil.max_iteration, fcil.tolerance, 0.01, 0, 1, 'cpu' ) np.testing.assert_almost_equal(outrgl, outcil.as_array()) - @unittest.skipUnless(TNV_fixed, "Skipping as CCPi Regularisation Toolkit is not installed") + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") def test_functionality_TNV(self): - data = dataexample.CAMERA.get(size=(256,256)) + data = dataexample.SYNCHROTRON_PARALLEL_BEAM_DATA.get() + datarr = data.as_array() from cil.plugins.ccpi_regularisation.functions import TNV from ccpi.filters import regularisers @@ -245,8 +246,24 @@ def test_functionality_TNV(self): # self.tolerance # outrgl, info = regularisers.TGV(datarr, fcil.alpha*tau, fcil.iterationsTNV, fcil.tolerance ) outrgl = regularisers.TNV(datarr, 1, 100, 1e-6 ) - print ("test outrgl", type(outrgl)) fcil = TNV() outcil = fcil.proximal(data, tau=tau) - np.testing.assert_almost_equal(outrgl, outcil.as_array()) \ No newline at end of file + np.testing.assert_almost_equal(outrgl, outcil.as_array()) + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_TNV_raise_on_2D(self): + + # data = dataexample.SYNCHROTRON_PARALLEL_BEAM_DATA.get() + data = dataexample.CAMERA.get(size=(256,256)) + datarr = data.as_array() + from cil.plugins.ccpi_regularisation.functions import TNV + + tau = 1. + + fcil = TNV() + try: + outcil = fcil.proximal(data, tau=tau) + assert False + except ValueError: + assert True \ No newline at end of file From c5dceae36cc565d52f8652b00d6e3a976570928d Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 18 Oct 2021 17:46:28 +0100 Subject: [PATCH 06/13] remove commented out code --- Wrappers/Python/cil/framework/framework.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Wrappers/Python/cil/framework/framework.py b/Wrappers/Python/cil/framework/framework.py index 9841ba8a8c..eb6c93bf0f 100644 --- a/Wrappers/Python/cil/framework/framework.py +++ b/Wrappers/Python/cil/framework/framework.py @@ -2401,11 +2401,6 @@ def log(self, *args, **kwargs): '''Applies log pixel-wise to the DataContainer''' return self.pixel_wise_unary(numpy.log, *args, **kwargs) - #def __abs__(self): - # operation = FM.OPERATION.ABS - # return self.callFieldMath(operation, None, self.mask, self.maskOnValue) - # __abs__ - ## reductions def sum(self, *args, **kwargs): return self.as_array().sum(*args, **kwargs) @@ -3085,7 +3080,12 @@ def get_order_for_engine(engine, geometry): if isinstance(geometry, AcquisitionGeometry): dim_order = DataOrder.TIGRE_AG_LABELS else: - dim_order = DataOrder.TIGRE_IG_LABELS + dim_order = DataOrder.TIGRE_IG_LABELS + elif engine == 'cil': + if isinstance(geometry, AcquisitionGeometry): + dim_order = DataOrder.CIL_AG_LABELS + else: + dim_order = DataOrder.CIL_IG_LABELS else: raise ValueError("Unknown engine expected 'tigre' or 'astra' got {}".format(engine)) From a461657ec0ad8cfec6e00ec5fa2434ca79caf1cd Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 18 Oct 2021 17:47:16 +0100 Subject: [PATCH 07/13] added check_input, docstrings and unittests --- .../functions/regularisers.py | 136 ++++++++++++++-- .../Python/test/test_PluginsRegularisation.py | 152 +++++++++++------- 2 files changed, 215 insertions(+), 73 deletions(-) diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py index 983d424baa..450cacf895 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py @@ -25,7 +25,7 @@ "Minimal version is 20.04") -from cil.framework import DataContainer +from cil.framework import DataOrder from cil.optimisation.functions import Function import numpy as np import warnings @@ -33,6 +33,16 @@ class RegulariserFunction(Function): def proximal(self, x, tau, out=None): + '''Generic proximal method for a RegulariserFunction + + :param x: image to be regularised + :type x: an ImageData + :param tau: + :type tau: Number + :param out: a placeholder for the result + :type out: same as x: ImageData''' + + self.check_input(x) arr = x.as_array() if arr.dtype in [np.complex, np.complex64]: # do real and imag part indep @@ -62,6 +72,9 @@ def proximal(self, x, tau, out=None): def proximal_numpy(self, xarr, tau, out=None): raise NotImplementedError('Please implement proximal_numpy') + def check_input(self, input): + pass + class TV_Base(RegulariserFunction): def __call__(self,x): in_arr = np.asarray(x.as_array(), dtype=np.float32, order='C') @@ -73,8 +86,23 @@ def convex_conjugate(self,x): class FGP_TV(TV_Base): - def __init__(self, alpha=1, max_iteration=100, tolerance=1e-6, isotropic=True, nonnegativity=True, printing=False, device='cpu'): + def __init__(self, alpha=1, max_iteration=100, tolerance=1e-6, isotropic=True, nonnegativity=True, device='cpu'): + '''Creator of FGP_TV Function + + :param alpha: regularisation parameter + :type alpha: number, default 1 + :param isotropic: Whether it uses L2 (isotropic) or L1 (unisotropic) norm + :type isotropic: boolean, default True, can range between 1 and 2 + :param nonnegativity: Whether to add the non-negativity constraint + :type nonnegativity: boolean, default True + :param max_iteration: max number of sub iterations. The algorithm will iterate up to this number of iteration or up to when the tolerance has been reached + :type max_iteration: integer, default 100 + :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than num_iter + :type tolerance: float, default 1e-6 + :param device: determines if the code runs on CPU or GPU + :type device: string, default 'cpu', can be 'gpu' if GPU is installed + ''' if isotropic == True: self.methodTV = 0 else: @@ -111,30 +139,61 @@ def __rmul__(self, scalar): else: self.alpha *= scalar return self + def check_input(self, input): + if input.geometry.length > 3: + raise ValueError('{} cannot work on more than 3D. Got {}'.format(self.__class__.__name__, input.geometry.length)) class TGV(RegulariserFunction): - def __init__(self, alpha=1, alpha1=1, alpha2=1, iter_TGV=100, LipshitzConstant=12, tolerance=1e-6, device='cpu' ): - # Default values - # https://github.com/vais-ral/CCPi-Regularisation-Toolkit/blob/413c6001003c6f1272aeb43152654baaf0c8a423/src/Core/regularisers_CPU/TGV_core.c#L25-L32 + def __init__(self, alpha=1, gamma=1, max_iteration=100, tolerance=1e-6, device='cpu' , **kwargs): + '''Creator of Total Generalised Variation Function + + :param alpha: regularisation parameter + :type alpha: number, default 1 + :param gamma: ratio of TGV terms + :type gamma: number, default 1, can range between 1 and 2 + :param max_iteration: max number of sub iterations. The algorithm will iterate up to this number of iteration or up to when the tolerance has been reached + :type max_iteration: integer, default 100 + :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than num_iter + :type tolerance: float, default 1e-6 + :param device: determines if the code runs on CPU or GPU + :type device: string, default 'cpu', can be 'gpu' if GPU is installed + + ''' + self.alpha = alpha - self.alpha1 = alpha1 - self.alpha2 = alpha2 - self.iter_TGV = iter_TGV - self.LipshitzConstant = LipshitzConstant + self.gamma = gamma + self.max_iteration = max_iteration self.tolerance = tolerance self.device = device + + if kwargs.get('iter_TGV', None) is not None: + # raise ValueError('iter_TGV parameter has been superseded by num_iter. Use that instead.') + self.num_iter = kwargs.get('iter_TGV') def __call__(self,x): warnings.warn("{}: the __call__ method is not implemented. Returning NaN.".format(self.__class__.__name__)) return np.nan + @property + def gamma(self): + return self.__gamma + @gamma.setter + def gamma(self, value): + if value <= 2 and value >= 1: + self.__gamma = value + @property + def alpha2(self): + return self.alpha1 * self.gamma + @property + def alpha1(self): + return 1. def proximal_numpy(self, in_arr, tau, out = None): res , info = regularisers.TGV(in_arr, self.alpha * tau, self.alpha1, self.alpha2, - self.iter_TGV, + self.max_iteration, self.LipshitzConstant, self.tolerance, self.device) @@ -158,8 +217,38 @@ def __rmul__(self, scalar): self.alpha *= scalar return self + # f = TGV() + # f = alpha * f + + def check_input(self, input): + if len(input.dimension_labels) == 2: + self.LipshitzConstant = 12 + elif len(input.dimension_labels) == 3: + self.LipshitzConstant = 16 # Vaggelis to confirm + else: + raise ValueError('{} cannot work on more than 3D. Got {}'.format(self.__class__.__name__, input.geometry.length)) + class FGP_dTV(RegulariserFunction): + '''Creator of FGP_dTV Function + + :param reference: reference image + :type reference: ImageData + :param alpha: regularisation parameter + :type alpha: number, default 1 + :param max_iteration: max number of sub iterations. The algorithm will iterate up to this number of iteration or up to when the tolerance has been reached + :type max_iteration: integer, default 100 + :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than num_iter + :type tolerance: float, default 1e-6 + :param eta: smoothing constant to calculate gradient of the reference + :type eta: number, default 0.01 + :param isotropic: Whether it uses L2 (isotropic) or L1 (unisotropic) norm + :type isotropic: boolean, default True, can range between 1 and 2 + :param nonnegativity: Whether to add the non-negativity constraint + :type nonnegativity: boolean, default True + :param device: determines if the code runs on CPU or GPU + :type device: string, default 'cpu', can be 'gpu' if GPU is installed + ''' def __init__(self, reference, alpha=1, max_iteration=100, tolerance=1e-6, eta=0.01, isotropic=True, nonnegativity=True, device='cpu'): @@ -211,13 +300,25 @@ def __rmul__(self, scalar): self.alpha *= scalar return self + def check_input(self, input): + if input.geometry.length > 3: + raise ValueError('{} cannot work on more than 3D. Got {}'.format(self.__class__.__name__, input.geometry.length)) + class TNV(RegulariserFunction): - def __init__(self,alpha=1, iterationsTNV=100, tolerance=1e-6): - + def __init__(self,alpha=1, max_iteration=100, tolerance=1e-6): + '''Creator of TNV Function + + :param alpha: regularisation parameter + :type alpha: number, default 1 + :param max_iteration: max number of sub iterations. The algorithm will iterate up to this number of iteration or up to when the tolerance has been reached + :type max_iteration: integer, default 100 + :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than num_iter + :type tolerance: float, default 1e-6 + ''' # set parameters self.alpha = alpha - self.iterationsTNV = iterationsTNV + self.max_iteration = max_iteration self.tolerance = tolerance def __call__(self,x): @@ -230,7 +331,7 @@ def proximal_numpy(self, in_arr, tau, out = None): raise ValueError('Only 3D data is supported. Passed data has {} dimensions'.format(in_arr.ndim)) res = regularisers.TNV(in_arr, self.alpha * tau, - self.iterationsTNV, + self.max_iteration, self.tolerance) return res, [] @@ -247,3 +348,10 @@ def __rmul__(self, scalar): else: self.alpha *= scalar return self + + def check_input(self, input): + '''TNV requires 2D+channel data with the first dimension as the channel dimension''' + DataOrder.check_order_for_engine('cil', input.geometry) + if ( input.geometry.channels == 1 ) or ( not input.geometry.length == 3) : + raise ValueError('TNV requires 2D+channel data. Got {}'.format(input.geometry.dimension_labels)) + \ No newline at end of file diff --git a/Wrappers/Python/test/test_PluginsRegularisation.py b/Wrappers/Python/test/test_PluginsRegularisation.py index cb1c81abed..4ae77e3918 100644 --- a/Wrappers/Python/test/test_PluginsRegularisation.py +++ b/Wrappers/Python/test/test_PluginsRegularisation.py @@ -143,20 +143,7 @@ def test_functionality_FGP_TV(self): tau = 1. fcil = FGP_TV() outcil = fcil.proximal(data, tau=tau) - # CIL defaults - # alpha=1, max_iteration=100, tolerance=1e-6, isotropic=True, nonnegativity=True, printing=False, device='cpu' - # in_arr,\ - # self.alpha * tau,\ - # self.max_iteration,\ - # self.tolerance,\ - # self.methodTV,\ - # self.nonnegativity,\ - # self.device - - # if nonnegativity == True: - # self.nonnegativity = 1 - # else: - # self.nonnegativity = 0 + # use CIL defaults outrgl, info = regularisers.FGP_TV(datarr, fcil.alpha*tau, fcil.max_iteration, fcil.tolerance, 0, 1, 'cpu' ) np.testing.assert_almost_equal(outrgl, outcil.as_array()) @@ -171,15 +158,7 @@ def test_functionality_TGV(self): tau = 1. fcil = TGV() outcil = fcil.proximal(data, tau=tau) - # CIL defaults - # alpha=1, alpha1=1, alpha2=1, iter_TGV=100, LipshitzConstant=12, tolerance=1e-6, device='cpu' - # self.alpha * tau, - # self.alpha1, - # self.alpha2, - # self.iter_TGV, - # self.LipshitzConstant, - # self.torelance, - # self.device + # use CIL defaults outrgl, info = regularisers.TGV(datarr, fcil.alpha*tau, 1,1, fcil.iter_TGV, 12, fcil.tolerance, 'cpu' ) np.testing.assert_almost_equal(outrgl, outcil.as_array()) @@ -196,42 +175,22 @@ def test_functionality_FGP_dTV(self): tau = 1. fcil = FGP_dTV(ref) outcil = fcil.proximal(data, tau=tau) - # CIL defaults - # if isotropic == True: - # self.methodTV = 0 - # else: - # self.methodTV = 1 - - # if nonnegativity == True: - # self.nonnegativity = 1 - # else: - # self.nonnegativity = 0 - - # self.alpha = alpha - # self.max_iteration = max_iteration - # self.tolerance = tolerance - # self.device = device # string for 'cpu' or 'gpu' - # self.reference = np.asarray(reference.as_array(), dtype=np.float32) - # self.eta = eta - - # in_arr,\ - # self.reference,\ - # self.alpha * tau,\ - # self.max_iteration,\ - # self.tolerance,\ - # self.eta,\ - # self.methodTV,\ - # self.nonnegativity,\ - # self.device - # reference, alpha=1, max_iteration=100, - # tolerance=1e-6, eta=0.01, isotropic=True, nonnegativity=True, device='cpu' + # use CIL defaults outrgl, info = regularisers.FGP_dTV(datarr, ref.as_array(), fcil.alpha*tau, fcil.max_iteration, fcil.tolerance, 0.01, 0, 1, 'cpu' ) np.testing.assert_almost_equal(outrgl, outcil.as_array()) @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") def test_functionality_TNV(self): - data = dataexample.SYNCHROTRON_PARALLEL_BEAM_DATA.get() + # fake a 2D+channel image + d = dataexample.SYNCHROTRON_PARALLEL_BEAM_DATA.get() + ig = ImageGeometry(160, 135, channels=91) + data = ig.allocate(None) + data.fill(d) + del d + + + print (data.dimension_labels, data.shape) datarr = data.as_array() from cil.plugins.ccpi_regularisation.functions import TNV @@ -240,11 +199,6 @@ def test_functionality_TNV(self): tau = 1. # CIL defaults - # alpha=1, iterationsTNV=100, tolerance=1e-6 - # self.alpha * tau, - # self.iterationsTNV, - # self.tolerance - # outrgl, info = regularisers.TGV(datarr, fcil.alpha*tau, fcil.iterationsTNV, fcil.tolerance ) outrgl = regularisers.TNV(datarr, 1, 100, 1e-6 ) fcil = TNV() @@ -266,4 +220,84 @@ def test_TNV_raise_on_2D(self): outcil = fcil.proximal(data, tau=tau) assert False except ValueError: - assert True \ No newline at end of file + assert True + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_TNV_raise_on_3D_nochannel(self): + + # data = dataexample.SYNCHROTRON_PARALLEL_BEAM_DATA.get() + data = dataexample.CAMERA.get(size=(256,256)) + datarr = data.as_array() + from cil.plugins.ccpi_regularisation.functions import TNV + + tau = 1. + + fcil = TNV() + try: + outcil = fcil.proximal(data, tau=tau) + assert False + except ValueError: + assert True + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_TNV_raise_on_4D(self): + + from cil.plugins.ccpi_regularisation.functions import TNV + + data = ImageGeometry(3,4,5,channels=5).allocate(1) + + tau = 1. + + fcil = TNV() + try: + outcil = fcil.proximal(data, tau=tau) + assert False + except ValueError: + assert True + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_FGP_TV_raise_on_4D_data(self): + + from cil.plugins.ccpi_regularisation.functions import FGP_TV + + tau = 1. + fcil = FGP_TV() + data = ImageGeometry(3,4,5,channels=10).allocate(0) + + + try: + outcil = fcil.proximal(data, tau=tau) + assert False + except ValueError: + assert True + + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_TGV_raise_on_4D_data(self): + + from cil.plugins.ccpi_regularisation.functions import TGV + + tau = 1. + fcil = TGV() + data = ImageGeometry(3,4,5,channels=10).allocate(0) + + + try: + outcil = fcil.proximal(data, tau=tau) + assert False + except ValueError: + assert True + @unittest.skipUnless(has_regularisation_toolkit, "Skipping as CCPi Regularisation Toolkit is not installed") + def test_FGP_dTV_raise_on_4D_data(self): + + from cil.plugins.ccpi_regularisation.functions import FGP_dTV + + tau = 1. + + data = ImageGeometry(3,4,5,channels=10).allocate(0) + ref = data * 2 + + fcil = FGP_dTV(ref) + + try: + outcil = fcil.proximal(data, tau=tau) + assert False + except ValueError: + assert True From eb2f7645cfad40bfb12723d61807d364f9e2619c Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 18 Oct 2021 17:55:53 +0100 Subject: [PATCH 08/13] removed out parameter --- .../functions/regularisers.py | 16 ++++++++-------- .../Python/test/test_PluginsRegularisation.py | 9 +++------ 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py index 450cacf895..5123e36e92 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py @@ -47,10 +47,10 @@ def proximal(self, x, tau, out=None): if arr.dtype in [np.complex, np.complex64]: # do real and imag part indep in_arr = np.asarray(arr.real, dtype=np.float32, order='C') - res, info = self.proximal_numpy(in_arr, tau, out) + res, info = self.proximal_numpy(in_arr, tau) arr.real = res[:] in_arr = np.asarray(arr.imag, dtype=np.float32, order='C') - res, info = self.proximal_numpy(in_arr, tau, out) + res, info = self.proximal_numpy(in_arr, tau) arr.imag = res[:] self.info = info if out is not None: @@ -61,7 +61,7 @@ def proximal(self, x, tau, out=None): return out else: arr = np.asarray(x.as_array(), dtype=np.float32, order='C') - res, info = self.proximal_numpy(arr, tau, out) + res, info = self.proximal_numpy(arr, tau) self.info = info if out is not None: out.fill(res) @@ -69,7 +69,7 @@ def proximal(self, x, tau, out=None): out = x.copy() out.fill(res) return out - def proximal_numpy(self, xarr, tau, out=None): + def proximal_numpy(self, xarr, tau): raise NotImplementedError('Please implement proximal_numpy') def check_input(self, input): @@ -119,7 +119,7 @@ def __init__(self, alpha=1, max_iteration=100, tolerance=1e-6, isotropic=True, n self.nonnegativity = nonnegativity self.device = device # string for 'cpu' or 'gpu' - def proximal_numpy(self, in_arr, tau, out = None): + def proximal_numpy(self, in_arr, tau): res , info = regularisers.FGP_TV(\ in_arr,\ self.alpha * tau,\ @@ -188,7 +188,7 @@ def alpha2(self): def alpha1(self): return 1. - def proximal_numpy(self, in_arr, tau, out = None): + def proximal_numpy(self, in_arr, tau): res , info = regularisers.TGV(in_arr, self.alpha * tau, self.alpha1, @@ -273,7 +273,7 @@ def __call__(self,x): warnings.warn("{}: the __call__ method is not implemented. Returning NaN.".format(self.__class__.__name__)) return np.nan - def proximal_numpy(self, in_arr, tau, out = None): + def proximal_numpy(self, in_arr, tau): res , info = regularisers.FGP_dTV(\ in_arr,\ self.reference,\ @@ -325,7 +325,7 @@ def __call__(self,x): warnings.warn("{}: the __call__ method is not implemented. Returning NaN.".format(self.__class__.__name__)) return np.nan - def proximal_numpy(self, in_arr, tau, out = None): + def proximal_numpy(self, in_arr, tau): if in_arr.ndim != 3: # https://github.com/vais-ral/CCPi-Regularisation-Toolkit/blob/413c6001003c6f1272aeb43152654baaf0c8a423/src/Python/src/cpu_regularisers.pyx#L584-L588 raise ValueError('Only 3D data is supported. Passed data has {} dimensions'.format(in_arr.ndim)) diff --git a/Wrappers/Python/test/test_PluginsRegularisation.py b/Wrappers/Python/test/test_PluginsRegularisation.py index 4ae77e3918..372ea5f676 100644 --- a/Wrappers/Python/test/test_PluginsRegularisation.py +++ b/Wrappers/Python/test/test_PluginsRegularisation.py @@ -44,7 +44,7 @@ class TestPlugin(unittest.TestCase): def setUp(self): - print ("test plugins") + # print ("test plugins") pass def tearDown(self): pass @@ -159,7 +159,7 @@ def test_functionality_TGV(self): fcil = TGV() outcil = fcil.proximal(data, tau=tau) # use CIL defaults - outrgl, info = regularisers.TGV(datarr, fcil.alpha*tau, 1,1, fcil.iter_TGV, 12, fcil.tolerance, 'cpu' ) + outrgl, info = regularisers.TGV(datarr, fcil.alpha*tau, 1,1, fcil.max_iteration, 12, fcil.tolerance, 'cpu' ) np.testing.assert_almost_equal(outrgl, outcil.as_array()) @@ -188,10 +188,7 @@ def test_functionality_TNV(self): data = ig.allocate(None) data.fill(d) del d - - - print (data.dimension_labels, data.shape) - + datarr = data.as_array() from cil.plugins.ccpi_regularisation.functions import TNV from ccpi.filters import regularisers From 5d9985f9ea482d7b7d8b5f8f865c6f0b7a2b209b Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Tue, 19 Oct 2021 13:57:04 +0100 Subject: [PATCH 09/13] removed printing parameter from regulariser call --- Wrappers/Python/test/test_functions.py | 57 ++------------------------ 1 file changed, 3 insertions(+), 54 deletions(-) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 328604b0f6..a061426a16 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -988,10 +988,8 @@ def test_compare_regularisation_toolkit(self): r_tolerance = 1e-9 r_iso = True r_nonneg = True - r_printing = 0 - # g_CCPI_reg_toolkit = alpha * FGP_TV(1., r_iterations, r_tolerance, r_iso, r_nonneg, r_printing, 'cpu') g_CCPI_reg_toolkit = alpha * FGP_TV(max_iteration=r_iterations, tolerance=r_tolerance, - isotropic=r_iso, nonnegativity=r_nonneg, printing=r_printing, device='cpu') + isotropic=r_iso, nonnegativity=r_nonneg, device='cpu') t2 = timer() res2 = g_CCPI_reg_toolkit.proximal(noisy_data, 1.) @@ -1021,10 +1019,8 @@ def test_compare_regularisation_toolkit(self): r_tolerance = 1e-9 r_iso = True r_nonneg = True - r_printing = 0 - # g_CCPI_reg_toolkit = alpha * FGP_TV(1., r_iterations, r_tolerance, r_iso, r_nonneg, r_printing, 'cpu') g_CCPI_reg_toolkit = alpha * FGP_TV(max_iteration=r_iterations, tolerance=r_tolerance, - isotropic=r_iso, nonnegativity=r_nonneg, printing=r_printing, device='cpu') + isotropic=r_iso, nonnegativity=r_nonneg, device='cpu') t2 = timer() res2 = g_CCPI_reg_toolkit.proximal(noisy_data, 1.) @@ -1073,9 +1069,8 @@ def test_compare_regularisation_toolkit_tomophantom(self): r_tolerance = 1e-9 r_iso = True r_nonneg = True - r_printing = 0 g_CCPI_reg_toolkit = alpha * FGP_TV(max_iteration=r_iterations, tolerance=r_tolerance, - isotropic=r_iso, nonnegativity=r_nonneg, printing=r_printing, device='cpu') + isotropic=r_iso, nonnegativity=r_nonneg, device='cpu') t2 = timer() @@ -1085,52 +1080,6 @@ def test_compare_regularisation_toolkit_tomophantom(self): np.testing.assert_allclose(res1.as_array(), res2.as_array(), atol=7.5e-2) - # the following were in the unit tests but didn't assert anything - # # CIL_FGP_TV no tolerance - # g_CIL.tolerance = None - # t0 = timer() - # res1 = g_CIL.proximal(noisy_data, 1.) - # t1 = timer() - # # print(t1-t0) - - # ################################################################### - # ################################################################### - # ################################################################### - # ################################################################### - - # data = dataexample.PEPPERS.get(size=(256, 256)) - # ig = data.geometry - # ag = ig - - # noisy_data = noise.gaussian(data, seed=10) - - # alpha = 0.1 - # iters = 1000 - - # # CIL_FGP_TV no tolerance - # g_CIL = alpha * TotalVariation(iters, tolerance=None) - # t0 = timer() - # res1 = g_CIL.proximal(noisy_data, 1.) - # t1 = timer() - # # print(t1-t0) - - # # CCPi Regularisation toolkit high tolerance - - # r_alpha = alpha - # r_iterations = iters - # r_tolerance = 1e-9 - # r_iso = True - # r_nonneg = True - # r_printing = 0 - # g_CCPI_reg_toolkit = alpha * FGP_TV(max_iteration=r_iterations, tolerance=r_tolerance, - # isotropic=r_iso, nonnegativity=r_nonneg, printing=r_printing, device='cpu') - - - # t2 = timer() - # res2 = g_CCPI_reg_toolkit.proximal(noisy_data, 1.) - # t3 = timer() - # # print (t3-t2) - class TestKullbackLeiblerNumba(unittest.TestCase): def setUp(self): From dd71eedba36e6334fefd5ef927b62d69d1cd49b6 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 21 Oct 2021 09:33:45 +0100 Subject: [PATCH 10/13] set tolerance to 0 --- .../plugins/ccpi_regularisation/functions/regularisers.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py index 5123e36e92..3e823d62e4 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py @@ -86,7 +86,7 @@ def convex_conjugate(self,x): class FGP_TV(TV_Base): - def __init__(self, alpha=1, max_iteration=100, tolerance=1e-6, isotropic=True, nonnegativity=True, device='cpu'): + def __init__(self, alpha=1, max_iteration=100, tolerance=0, isotropic=True, nonnegativity=True, device='cpu'): '''Creator of FGP_TV Function @@ -145,7 +145,7 @@ def check_input(self, input): class TGV(RegulariserFunction): - def __init__(self, alpha=1, gamma=1, max_iteration=100, tolerance=1e-6, device='cpu' , **kwargs): + def __init__(self, alpha=1, gamma=1, max_iteration=100, tolerance=0, device='cpu' , **kwargs): '''Creator of Total Generalised Variation Function :param alpha: regularisation parameter @@ -250,7 +250,7 @@ class FGP_dTV(RegulariserFunction): :type device: string, default 'cpu', can be 'gpu' if GPU is installed ''' def __init__(self, reference, alpha=1, max_iteration=100, - tolerance=1e-6, eta=0.01, isotropic=True, nonnegativity=True, device='cpu'): + tolerance=0, eta=0.01, isotropic=True, nonnegativity=True, device='cpu'): if isotropic == True: self.methodTV = 0 @@ -306,7 +306,7 @@ def check_input(self, input): class TNV(RegulariserFunction): - def __init__(self,alpha=1, max_iteration=100, tolerance=1e-6): + def __init__(self,alpha=1, max_iteration=100, tolerance=0): '''Creator of TNV Function :param alpha: regularisation parameter From 43cbeabcabba5bcd23baa7b034df71dd1a6a918e Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 21 Oct 2021 11:53:20 +0100 Subject: [PATCH 11/13] updates to the docstrings --- .../functions/regularisers.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py index 3e823d62e4..7831cb49c8 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py @@ -98,8 +98,8 @@ def __init__(self, alpha=1, max_iteration=100, tolerance=0, isotropic=True, nonn :type nonnegativity: boolean, default True :param max_iteration: max number of sub iterations. The algorithm will iterate up to this number of iteration or up to when the tolerance has been reached :type max_iteration: integer, default 100 - :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than num_iter - :type tolerance: float, default 1e-6 + :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than max_iteration. If set to 0 only the max_iteration will be used as stop criterion. + :type tolerance: float, default 0 :param device: determines if the code runs on CPU or GPU :type device: string, default 'cpu', can be 'gpu' if GPU is installed ''' @@ -154,8 +154,8 @@ def __init__(self, alpha=1, gamma=1, max_iteration=100, tolerance=0, device='cpu :type gamma: number, default 1, can range between 1 and 2 :param max_iteration: max number of sub iterations. The algorithm will iterate up to this number of iteration or up to when the tolerance has been reached :type max_iteration: integer, default 100 - :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than num_iter - :type tolerance: float, default 1e-6 + :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than max_iteration. If set to 0 only the max_iteration will be used as stop criterion. + :type tolerance: float, default 0 :param device: determines if the code runs on CPU or GPU :type device: string, default 'cpu', can be 'gpu' if GPU is installed @@ -238,11 +238,11 @@ class FGP_dTV(RegulariserFunction): :type alpha: number, default 1 :param max_iteration: max number of sub iterations. The algorithm will iterate up to this number of iteration or up to when the tolerance has been reached :type max_iteration: integer, default 100 - :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than num_iter - :type tolerance: float, default 1e-6 + :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than max_iteration. If set to 0 only the max_iteration will be used as stop criterion. + :type tolerance: float, default 0 :param eta: smoothing constant to calculate gradient of the reference :type eta: number, default 0.01 - :param isotropic: Whether it uses L2 (isotropic) or L1 (unisotropic) norm + :param isotropic: Whether it uses L2 (isotropic) or L1 (anisotropic) norm :type isotropic: boolean, default True, can range between 1 and 2 :param nonnegativity: Whether to add the non-negativity constraint :type nonnegativity: boolean, default True @@ -313,8 +313,8 @@ def __init__(self,alpha=1, max_iteration=100, tolerance=0): :type alpha: number, default 1 :param max_iteration: max number of sub iterations. The algorithm will iterate up to this number of iteration or up to when the tolerance has been reached :type max_iteration: integer, default 100 - :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than num_iter - :type tolerance: float, default 1e-6 + :param tolerance: minimum difference between previous iteration of the algorithm that determines the stop of the iteration earlier than max_iteration. If set to 0 only the max_iteration will be used as stop criterion. + :type tolerance: float, default 0 ''' # set parameters self.alpha = alpha From 69b707707675e495f3bf3123fcc36f3b66d9faa8 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 21 Oct 2021 12:33:57 +0100 Subject: [PATCH 12/13] regularisation toolkit updates added to the CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b83c5f43b2..7f7102b335 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ * 21.2.1 + - CCPi Regularisation plugin is refactored, only FGP_TV, FGP_dTV, TGV and TNV are exposed. Docstrings and functionality unit tests are added. Tests of the functions are meant to be in the CCPi-Regularisation toolkit itself. - Add dtype for ImageGeometry, AcquisitionGeometry, VectorGeometry, BlockGeometry - Fix GradientOperator to handle pseudo 2D CIL geometries - Created Reconstructor base class for simpler use of CIL methods From b23417400357f7a5abd0995c0caf4d2fc5bb15fb Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 21 Oct 2021 13:24:30 +0100 Subject: [PATCH 13/13] add doc on how the function proximal works on complex images --- .../plugins/ccpi_regularisation/functions/regularisers.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py index 7831cb49c8..b0d39f6222 100644 --- a/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py +++ b/Wrappers/Python/cil/plugins/ccpi_regularisation/functions/regularisers.py @@ -40,7 +40,11 @@ def proximal(self, x, tau, out=None): :param tau: :type tau: Number :param out: a placeholder for the result - :type out: same as x: ImageData''' + :type out: same as x: ImageData + + If the ImageData contains complex data, rather than the default float32, the regularisation + is run indipendently on the real and imaginary part. + ''' self.check_input(x) arr = x.as_array()