From 85e9ae78eba9d7e044c8dd73f8d617f1544c1342 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mirjam=20Schr=C3=B6der?= Date: Tue, 24 Oct 2023 19:31:50 +0200 Subject: [PATCH 01/16] Post-release --- VERSION | 2 +- docs/changelog.rst | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index 1690eb2..4e9ac56 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.5.0.dev11 +0.6.0.dev1 diff --git a/docs/changelog.rst b/docs/changelog.rst index a0368b2..b7501a8 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -4,11 +4,17 @@ Changelog This page contains a summary of changes between the official cwepr releases. Only the biggest changes are listed here. A complete and detailed log of all changes is available through the `GitHub Repository Browser `_. -Version 0.5.0 +Version 0.6.0 ============= Not yet released + +Version 0.5.0 +============= + +Released 2023-10-24 + New features ------------ From 48dcad2a59655b623edc051bca8c3c74a374dc8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mirjam=20Schr=C3=B6der?= Date: Fri, 3 Nov 2023 11:33:01 +0100 Subject: [PATCH 02/16] Update Links in documentation. --- VERSION | 2 +- cwepr/processing.py | 10 +--------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/VERSION b/VERSION index 4e9ac56..f1ecb26 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev1 +0.6.0.dev2 diff --git a/cwepr/processing.py b/cwepr/processing.py index 61085bc..e8637c0 100644 --- a/cwepr/processing.py +++ b/cwepr/processing.py @@ -32,19 +32,11 @@ * :class:`FrequencyCorrection` * :class:`GAxisCreation` - * :class:`BaselineCorrectionWithPolynomial` - * :class:`NormalisationOfDerivativeToArea` * :class:`Normalisation` - * :class:`Integration` - - * :class:`Averaging2DDataset` - * :class:`SubtractVector` - Implemented but not working as they should: - * :class:`PhaseCorrection` * :class:`AutomaticPhaseCorrection` @@ -85,7 +77,7 @@ Correct baseline of dataset. -* :class:`Averaging` +* :class:`aspecd.processing.Averaging` Average data over given range along given axis. From d5bdc3ad156a1e070a3d2a80d98535091b58f9de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mirjam=20Schr=C3=B6der?= Date: Fri, 3 Nov 2023 14:36:41 +0100 Subject: [PATCH 03/16] Remove unnecessary dot --- VERSION | 2 +- docs/conf.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index f1ecb26..a81d87d 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev2 +0.6.0.dev3 diff --git a/docs/conf.py b/docs/conf.py index 39ac17d..de8e741 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -27,7 +27,7 @@ release_ = version_file.read().strip() project = 'cwepr' -copyright = '2020- Mirjam Schröder, 2018/19 Pascal Kirchner, 2018– Till Biskup.' +copyright = '2020- Mirjam Schröder, 2018/19 Pascal Kirchner, 2018– Till Biskup' author = 'Mirjam Schröder, Pascal Kirchner, Till Biskup' # The short X.Y version From 1ef891e56a4a6c4532b442ab50182397782bbce5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mirjam=20Schr=C3=B6der?= Date: Wed, 22 Nov 2023 18:15:07 +0100 Subject: [PATCH 04/16] Small adaptions to be clearer --- VERSION | 2 +- cwepr/analysis.py | 2 +- cwepr/io/esp_winepr.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/VERSION b/VERSION index a81d87d..69ea26e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev3 +0.6.0.dev4 diff --git a/cwepr/analysis.py b/cwepr/analysis.py index 88a79a0..8db41ab 100644 --- a/cwepr/analysis.py +++ b/cwepr/analysis.py @@ -868,7 +868,7 @@ class PtpVsModAmp(aspecd.analysis.SingleAnalysisStep): """Create calculated dataset for modulation sweep analysis. For a modulation sweep analysis, the first step is to get the peak to - peak amplitude and correlate it to the modulation amplitude, + peak linewidth and correlate it to the modulation amplitude, see :ref:`modulation_sweep_analysis` for further details. Examples diff --git a/cwepr/io/esp_winepr.py b/cwepr/io/esp_winepr.py index acd2227..0a2837a 100644 --- a/cwepr/io/esp_winepr.py +++ b/cwepr/io/esp_winepr.py @@ -217,7 +217,7 @@ def _ensure_common_units(self): par-file, some units are wrong and are corrected manually here. """ # microwave frequency - if self.dataset.metadata.bridge.mw_frequency.value > 50: + if self.dataset.metadata.bridge.mw_frequency.value > 500: self.dataset.metadata.bridge.mw_frequency.value /= 1e9 self.dataset.metadata.bridge.mw_frequency.unit = 'GHz' # microwave power From c028f68da31741d61f3f64e840301e12c1736dbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mirjam=20Schr=C3=B6der?= Date: Sat, 13 Jan 2024 21:45:30 +0100 Subject: [PATCH 05/16] Update plotter according to ASpecD release 0.9, update requirements, fix typo in docs --- README.rst | 3 ++- VERSION | 2 +- cwepr/plotting.py | 20 ++++++++++---------- requirements.txt | 3 +-- setup.py | 2 +- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/README.rst b/README.rst index 5375ec6..d42e7a4 100644 --- a/README.rst +++ b/README.rst @@ -38,7 +38,8 @@ What is even better: Actual data processing and analysis **no longer requires pr - first-dataset.pdf - second-dataset.pdf -For more general information on the cwepr package and for how to use it, see its `documentation `_. +For more general information on the cwepr package and for how to use it, see +its `documentation `_. Features diff --git a/VERSION b/VERSION index 69ea26e..fe4d756 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev4 +0.6.0.dev5 diff --git a/cwepr/plotting.py b/cwepr/plotting.py index 7e0ac56..70bf09d 100644 --- a/cwepr/plotting.py +++ b/cwepr/plotting.py @@ -289,7 +289,7 @@ class PowerSweepAnalysisPlotter(aspecd.plotting.MultiPlotter1D): microwave power, resulting in a power saturation curve. As long as the signal is not saturated, the graph shows a linear relationship. - As the class inherites from :class:`aspecd.plotting.MultiPlotter1D` + As the class inherits from :class:`aspecd.plotting.MultiPlotter1D` see there for additional details of the parameters that can be set. Attributes @@ -524,7 +524,7 @@ class for details. def _create_plot(self): super()._create_plot() - if self.parameters['g-axis'] and self.dataset.data.axes[0].unit == 'mT': + if self.parameters['g-axis'] and self.data.axes[0].unit == 'mT': self._create_g_axis(self.dataset.metadata.bridge.mw_frequency.value) @@ -537,7 +537,7 @@ class SinglePlotter2D(aspecd.plotting.SinglePlotter2D, PlotterExtensions): ASpecD documentation of the :class:`aspecd.plotting.SinglePlotter2D` class for details. - Furthermore, the class inhertis all functionality from + Furthermore, the class inherits all functionality from :class:`PlotterExtensions`. See there for additional details. @@ -630,7 +630,7 @@ class for details. def _create_plot(self): super()._create_plot() - if self.parameters['g-axis'] and self.dataset.data.axes[0].unit == 'mT': + if self.parameters['g-axis'] and self.data.axes[0].unit == 'mT': self._create_g_axis(self.dataset.metadata.bridge.mw_frequency.value) @@ -645,7 +645,7 @@ class SinglePlotter2DStacked(aspecd.plotting.SinglePlotter2DStacked, ASpecD documentation of the :class:`aspecd.plotting.SinglePlotter2DStacked` class for details. - Furthermore, the class inhertis all functionality from + Furthermore, the class inherits all functionality from :class:`PlotterExtensions`. See there for additional details. @@ -722,7 +722,7 @@ class for details. def _create_plot(self): super()._create_plot() - if self.parameters['g-axis'] and self.dataset.data.axes[0].unit == 'mT': + if self.parameters['g-axis'] and self.data.axes[0].unit == 'mT': self._create_g_axis(self.dataset.metadata.bridge.mw_frequency.value) @@ -735,7 +735,7 @@ class MultiPlotter1D(aspecd.plotting.MultiPlotter1D, PlotterExtensions): ASpecD documentation of the :class:`aspecd.plotting.MultiPlotter1D` class for details. - Furthermore, the class inhertis all functionality from + Furthermore, the class inherits all functionality from :class:`PlotterExtensions`. See there for additional details. @@ -805,7 +805,7 @@ class for details. def _create_plot(self): super()._create_plot() if self.parameters['g-axis'] \ - and self.datasets[0].data.axes[0].unit == 'mT': + and self.data[0].axes[0].unit == 'mT': self._create_g_axis( self.datasets[0].metadata.bridge.mw_frequency.value) @@ -820,7 +820,7 @@ class MultiPlotter1DStacked(aspecd.plotting.MultiPlotter1DStacked, ASpecD documentation of the :class:`aspecd.plotting.MultiPlotter1DStacked` class for details. - Furthermore, the class inhertis all functionality from + Furthermore, the class inherits all functionality from :class:`PlotterExtensions`. See there for additional details. Examples @@ -903,6 +903,6 @@ class for details. def _create_plot(self): super()._create_plot() if self.parameters['g-axis'] \ - and self.datasets[0].data.axes[0].unit == 'mT': + and self.data[0].axes[0].unit == 'mT': self._create_g_axis( self.datasets[0].metadata.bridge.mw_frequency.value) diff --git a/requirements.txt b/requirements.txt index 7970d57..14b9e00 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ -aspecd>=0.2.1 +aspecd>=0.9.0 numpy scipy matplotlib -setuptools python-dateutil \ No newline at end of file diff --git a/setup.py b/setup.py index 32bd39b..5cafaa9 100644 --- a/setup.py +++ b/setup.py @@ -47,7 +47,7 @@ "Topic :: Scientific/Engineering", ], install_requires=[ - 'aspecd>=0.8.0', + 'aspecd>=0.9.0', 'numpy', 'scipy', 'matplotlib', From 5f7eb7be9557a47bce1b4a257db9376d7119fdde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mirjam=20Schr=C3=B6der?= Date: Sat, 13 Jan 2024 22:08:04 +0100 Subject: [PATCH 06/16] Add makefile and black formatting --- Makefile | 33 +++++++++++++++++++++++++++++++ VERSION | 2 +- bin/formatPythonCode.sh | 43 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 77 insertions(+), 1 deletion(-) create mode 100644 Makefile create mode 100755 bin/formatPythonCode.sh diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..bee1e38 --- /dev/null +++ b/Makefile @@ -0,0 +1,33 @@ +# Minimal Makefile for automating recurring tasks during development +# +# Copyright (c) 2023, Till Biskup +# 2023-12-06 + +.PHONY: docs tests help +.DEFAULT_GOAL := help + +help: + @echo "This makefile automates different recurring tasks" + @echo "" + @echo "The following targets are available:" + @echo "" + @echo "docs - create documentation using Sphinx" + @echo "tests - run unittests" + @echo "check - check code using prospector" + @echo "black - format code using Black" + +docs: + @echo "Create documentation using Sphinx" + $(MAKE) -C docs html + +tests: + @echo "Run unittests" + cd tests/ && python -m unittest discover -s . -t . + +check: + @echo "Check code using prospector... this may take a while" + prospector + +black: + @echo "Automatically format code using Black" + black -l 78 . --extend-exclude templates diff --git a/VERSION b/VERSION index fe4d756..2525037 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev5 +0.6.0.dev6 diff --git a/bin/formatPythonCode.sh b/bin/formatPythonCode.sh new file mode 100755 index 0000000..f84b1e9 --- /dev/null +++ b/bin/formatPythonCode.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# +# Autoformat Python files currently in git staging area. +# +# Intended for using in pre-commit hook +# +# Formatter, formatter options and exclude pattern can be set. +# +# Note that due to calling the formatter explicitly for files, exclude patterns +# via formatter options are likely to not work. Hence the list of files to be +# reformatted needs to be filtered beforehand. +# +# Existence of the formatter is checked, and if it is not present, +# the script silently exits. +# +# Only Python files in the staging area are reformatted and afterwards +# re-added to the staging area. +# +# Copyright (c) 2023, Till Biskup +# 2023-12-06 + +FORMATTER="black" +FORMATTER_OPTIONS="-l 78" +EXCLUDE_PATTERN="templates" + +if ! command -v $FORMATTER &> /dev/null +then + exit +fi + +if \[ -n "$EXCLUDE_PATTERN" \]; +then + CHANGED_FILES=$(git diff --cached --name-only --diff-filter=ACMR -- '*.py' | grep -v $EXCLUDE_PATTERN) +else + CHANGED_FILES=$(git diff --cached --name-only --diff-filter=ACMR -- '*.py') +fi + +for file in $CHANGED_FILES +do + echo "Reformat '$file' using '$FORMATTER $FORMATTER_OPTIONS'" + $FORMATTER $FORMATTER_OPTIONS "$file" + git add "$file" +done From 7cd00161dece9ce2a1d9387bd48e569f59f851bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mirjam=20Schr=C3=B6der?= Date: Sat, 13 Jan 2024 22:09:38 +0100 Subject: [PATCH 07/16] Black code formatting. --- VERSION | 2 +- cwepr/analysis.py | 168 ++++++---- cwepr/dataset.py | 2 +- cwepr/exceptions.py | 18 +- cwepr/io/__init__.py | 8 +- cwepr/io/bes3t.py | 166 +++++----- cwepr/io/esp_winepr.py | 156 +++++---- cwepr/io/exporter.py | 20 +- cwepr/io/factory.py | 53 +-- cwepr/io/magnettech.py | 634 ++++++++++++++++++++---------------- cwepr/io/niehs.py | 170 ++++++---- cwepr/io/txt_file.py | 22 +- cwepr/metadata.py | 51 +-- cwepr/plotting.py | 141 ++++---- cwepr/processing.py | 148 +++++---- cwepr/report.py | 169 +++++----- cwepr/utils.py | 12 +- docs/conf.py | 123 +++---- docs/datasets2yaml.py | 9 +- setup.py | 42 +-- tests/io/test_bes3t.py | 66 ++-- tests/io/test_esp_winepr.py | 109 +++++-- tests/io/test_exporter.py | 24 +- tests/io/test_factory.py | 30 +- tests/io/test_magnettech.py | 270 ++++++++------- tests/io/test_niehs.py | 59 ++-- tests/io/test_txt_file.py | 51 +-- tests/test_analysis.py | 113 ++++--- tests/test_dataset.py | 6 +- tests/test_plotting.py | 185 ++++++----- tests/test_processing.py | 105 +++--- tests/test_report.py | 121 ++++--- tests/test_utils.py | 21 +- 33 files changed, 1872 insertions(+), 1402 deletions(-) diff --git a/VERSION b/VERSION index 2525037..42e4fe3 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev6 +0.6.0.dev7 diff --git a/cwepr/analysis.py b/cwepr/analysis.py index 8db41ab..bfde1ec 100644 --- a/cwepr/analysis.py +++ b/cwepr/analysis.py @@ -263,14 +263,14 @@ class FieldCalibration(aspecd.analysis.SingleAnalysisStep): def __init__(self): super().__init__() - self.parameters['standard'] = '' - self.parameters['g_value'] = None - self.parameters['mw_frequency'] = None + self.parameters["standard"] = "" + self.parameters["g_value"] = None + self.parameters["mw_frequency"] = None self.description = "Determine magnetic field offset from standard" # NOTE: keys need to be all-lowercase self.g_values = { - 'dpph': 2.0036, - 'lilif': 2.002293, + "dpph": 2.0036, + "lilif": 2.002293, } @staticmethod @@ -294,23 +294,27 @@ def applicable(dataset): return dataset.data.data.ndim == 1 def _sanitise_parameters(self): - if not self.parameters['mw_frequency'] and not \ - self.dataset.metadata.bridge.mw_frequency.value: - raise ValueError('No microwave frequency provided, aborting.') - if not self.parameters['standard'] and not self.parameters['g_value']: - raise ValueError('No standard or g value provided, aborting.') + if ( + not self.parameters["mw_frequency"] + and not self.dataset.metadata.bridge.mw_frequency.value + ): + raise ValueError("No microwave frequency provided, aborting.") + if not self.parameters["standard"] and not self.parameters["g_value"]: + raise ValueError("No standard or g value provided, aborting.") def _perform_task(self): self._assign_parameters() self.result = self._get_field_offset() def _assign_parameters(self): - if not self.parameters['mw_frequency']: - self.parameters['mw_frequency'] = \ - self.dataset.metadata.bridge.mw_frequency.value - if not self.parameters['g_value']: - self.parameters['g_value'] = \ - self.g_values[self.parameters['standard'].lower()] + if not self.parameters["mw_frequency"]: + self.parameters[ + "mw_frequency" + ] = self.dataset.metadata.bridge.mw_frequency.value + if not self.parameters["g_value"]: + self.parameters["g_value"] = self.g_values[ + self.parameters["standard"].lower() + ] def _get_field_offset(self): """Calculates a field correction value. @@ -328,13 +332,20 @@ def _get_field_offset(self): """ i_max = np.argmax(self.dataset.data.data) i_min = np.argmin(self.dataset.data.data) - zero_crossing_exp = (self.dataset.data.axes[0].values[i_min] + - self.dataset.data.axes[0].values[i_max]) / 2 - calculated_field = \ - scipy.constants.value('Planck constant') \ - * self.parameters['mw_frequency'] * 1e9 * 1e3 / \ - (self.parameters['g_value'] - * scipy.constants.value('Bohr magneton')) + zero_crossing_exp = ( + self.dataset.data.axes[0].values[i_min] + + self.dataset.data.axes[0].values[i_max] + ) / 2 + calculated_field = ( + scipy.constants.value("Planck constant") + * self.parameters["mw_frequency"] + * 1e9 + * 1e3 + / ( + self.parameters["g_value"] + * scipy.constants.value("Bohr magneton") + ) + ) delta_b0 = calculated_field - zero_crossing_exp return delta_b0 @@ -399,8 +410,10 @@ def get_peak_to_peak_linewidth(self): """ index_max = np.argmax(self.dataset.data.data) index_min = np.argmin(self.dataset.data.data) - linewidth = abs(self.dataset.data.axes[0].values[index_min] - - self.dataset.data.axes[0].values[index_max]) + linewidth = abs( + self.dataset.data.axes[0].values[index_min] + - self.dataset.data.axes[0].values[index_max] + ) return linewidth @@ -428,8 +441,9 @@ class LinewidthFWHM(aspecd.analysis.SingleAnalysisStep): def __init__(self): super().__init__() - self.description = \ + self.description = ( "Determine linewidth (full width at half max; FWHM)" + ) @staticmethod def applicable(dataset): @@ -472,7 +486,9 @@ def _get_fwhm_linewidth(self): left_zero_cross_index = np.argmin(spectral_data[:index_max]) right_zero_cross_index = np.argmin(spectral_data[index_max:]) b_field_left = self.dataset.data.axes[0].values[left_zero_cross_index] - b_field_right = self.dataset.data.axes[0].values[right_zero_cross_index] + b_field_right = self.dataset.data.axes[0].values[ + right_zero_cross_index + ] return b_field_right - b_field_left @@ -536,10 +552,15 @@ def _get_amplitude(data=None): return max(data) - min(data) def _get_noise(self): - number_of_points = math.ceil(len(self.dataset.data.data) * - self.parameters["percentage"] / 100.0) - noise_data = np.append(self.dataset.data.data[:number_of_points], - self.dataset.data.data[-number_of_points:]) + number_of_points = math.ceil( + len(self.dataset.data.data) + * self.parameters["percentage"] + / 100.0 + ) + noise_data = np.append( + self.dataset.data.data[:number_of_points], + self.dataset.data.data[-number_of_points:], + ) return noise_data @@ -574,10 +595,12 @@ def __init__(self): def _perform_task(self): if len(self.dataset.data.axes) == 2: self.result = max(self.dataset.data.data) - min( - self.dataset.data.data) + self.dataset.data.data + ) else: self.result = np.amax(self.dataset.data.data, axis=0) - np.amin( - self.dataset.data.data, axis=0) + self.dataset.data.data, axis=0 + ) class AmplitudeVsPower(aspecd.analysis.SingleAnalysisStep): @@ -657,8 +680,9 @@ class AmplitudeVsPower(aspecd.analysis.SingleAnalysisStep): def __init__(self): super().__init__() - self.description = \ + self.description = ( "Return calculated dataset for power sweep analysis." + ) self.result = aspecd.dataset.CalculatedDataset() # private properties self._analysis = None @@ -710,12 +734,12 @@ def _assign_data_to_result(self): self.result.data.axes[0].values = self._roots_of_mw_power def _assign_units_to_result(self): - if self.dataset.data.axes[1].unit == 'mW': - self.result.data.axes[0].unit = 'sqrt(mW)' - elif self.dataset.data.axes[1].unit == 'W': - self.result.data.axes[0].unit = 'sqrt(W)' - self.result.data.axes[0].quantity = 'square root of mw power' - self.result.data.axes[1].quantity = 'EPR amplitude' + if self.dataset.data.axes[1].unit == "mW": + self.result.data.axes[0].unit = "sqrt(mW)" + elif self.dataset.data.axes[1].unit == "W": + self.result.data.axes[0].unit = "sqrt(W)" + self.result.data.axes[0].quantity = "square root of mw power" + self.result.data.axes[1].quantity = "EPR amplitude" class FitOnData(aspecd.analysis.SingleAnalysisStep): @@ -792,12 +816,12 @@ def __init__(self): super().__init__() self.description = "Perform fit and return parameters." self.result = None - self.parameters['points'] = 3 - self.parameters['order'] = 1 - self.parameters['return_type'] = 'coefficients' - self.parameters['fixed_intercept'] = False - self.parameters['offset'] = 0 - self.parameters['coefficients'] = [] + self.parameters["points"] = 3 + self.parameters["order"] = 1 + self.parameters["return_type"] = "coefficients" + self.parameters["fixed_intercept"] = False + self.parameters["offset"] = 0 + self.parameters["coefficients"] = [] # private properties self._curve = None @@ -822,7 +846,7 @@ def applicable(dataset): return dataset.data.data.ndim == 1 def _perform_task(self): - if self.parameters['fixed_intercept']: + if self.parameters["fixed_intercept"]: self._linear_regression_with_fixed_intercept() self._get_curve() else: @@ -831,37 +855,42 @@ def _perform_task(self): self._assign_result() def _get_coefficients(self): - x_data_to_process = \ - self.dataset.data.axes[0].values[:self.parameters['points']] - y_data_to_process = self.dataset.data.data[:self.parameters['points']] - self.parameters['coefficients'] = \ - np.polyfit(x_data_to_process, y_data_to_process, self.parameters[ - 'order']) + x_data_to_process = self.dataset.data.axes[0].values[ + : self.parameters["points"] + ] + y_data_to_process = self.dataset.data.data[ + : self.parameters["points"] + ] + self.parameters["coefficients"] = np.polyfit( + x_data_to_process, y_data_to_process, self.parameters["order"] + ) def _get_curve(self): self._curve = self.create_dataset() - slope = np.polynomial.Polynomial(self.parameters['coefficients']) + slope = np.polynomial.Polynomial(self.parameters["coefficients"]) self._curve.data.data = slope(self.dataset.data.axes[0].values) self._curve.data.axes[0] = self.dataset.data.axes[0] def _assign_result(self): - if self.parameters['return_type'].lower() == 'dataset': + if self.parameters["return_type"].lower() == "dataset": self.result = self._curve else: - self.result = self.parameters['coefficients'] + self.result = self.parameters["coefficients"] def _linear_regression_with_fixed_intercept(self): _help = self.create_dataset() - _help.data.axes[0].values = \ - self.dataset.data.axes[0].values[:self.parameters['points']] - _help.data.data = self.dataset.data.data[:self.parameters['points']] + _help.data.axes[0].values = self.dataset.data.axes[0].values[ + : self.parameters["points"] + ] + _help.data.data = self.dataset.data.data[: self.parameters["points"]] analysis = aspecd.analysis.LinearRegressionWithFixedIntercept() - aspecd.utils.copy_values_between_dicts(source=self.parameters, - target=analysis.parameters) - analysis.parameters['polynomial_coefficients'] = True + aspecd.utils.copy_values_between_dicts( + source=self.parameters, target=analysis.parameters + ) + analysis.parameters["polynomial_coefficients"] = True analysis.dataset = _help result = _help.analyse(analysis) - self.parameters['coefficients'] = result.result + self.parameters["coefficients"] = result.result class PtpVsModAmp(aspecd.analysis.SingleAnalysisStep): @@ -885,8 +914,9 @@ class PtpVsModAmp(aspecd.analysis.SingleAnalysisStep): def __init__(self): super().__init__() - self.description = \ - 'Create dataset with ptp-linewidth vs modulation Amplitude.' + self.description = ( + "Create dataset with ptp-linewidth vs modulation Amplitude." + ) self.result = aspecd.dataset.CalculatedDataset() self.linewidths = np.ndarray([]) @@ -918,14 +948,16 @@ def applicable(dataset): def _get_linewidths(self): index_max = np.argmax(self.dataset.data.data, axis=0) index_min = np.argmin(self.dataset.data.data, axis=0) - self.linewidths = self.dataset.data.axes[0].values[index_min] - \ - self.dataset.data.axes[0].values[index_max] + self.linewidths = ( + self.dataset.data.axes[0].values[index_min] + - self.dataset.data.axes[0].values[index_max] + ) def _fill_dataset(self): self.result.data.data = self.linewidths self.result.data.axes[0] = self.dataset.data.axes[1] self.result.data.axes[1].unit = self.dataset.data.axes[0].unit - self.result.data.axes[1].quantity = 'peak to peak linewidth' + self.result.data.axes[1].quantity = "peak to peak linewidth" class AreaUnderCurve(aspecd.analysis.SingleAnalysisStep): diff --git a/cwepr/dataset.py b/cwepr/dataset.py index be17bd2..f57cae9 100644 --- a/cwepr/dataset.py +++ b/cwepr/dataset.py @@ -96,7 +96,7 @@ def __init__(self): self.importer_factory = cwepr.io.factory.DatasetImporterFactory() @staticmethod - def _create_dataset(source=''): + def _create_dataset(source=""): """Return cwepr dataset. Parameters diff --git a/cwepr/exceptions.py b/cwepr/exceptions.py index 5b0ca41..302043e 100644 --- a/cwepr/exceptions.py +++ b/cwepr/exceptions.py @@ -23,7 +23,7 @@ class UnsupportedDataFormatError(Error): """ - def __init__(self, message=''): + def __init__(self, message=""): super().__init__(message) self.message = message @@ -38,7 +38,7 @@ class MissingPathError(Error): """ - def __init__(self, message=''): + def __init__(self, message=""): super().__init__(message) self.message = message @@ -53,7 +53,7 @@ class MissingInfoFileError(Error): """ - def __init__(self, message=''): + def __init__(self, message=""): super().__init__(message) self.message = message @@ -72,7 +72,7 @@ class ExperimentTypeError(Error): """ - def __init__(self, message=''): + def __init__(self, message=""): super().__init__(message) self.message = message @@ -87,7 +87,7 @@ class DimensionError(Error): """ - def __init__(self, message=''): + def __init__(self, message=""): super().__init__() self.message = message @@ -95,7 +95,7 @@ def __init__(self, message=''): class MissingInformationError(Error): """Exception raised when not enough information is provided.""" - def __init__(self, message=''): + def __init__(self, message=""): super().__init__() self.message = message @@ -113,7 +113,7 @@ class UnequalUnitsError(Error): """ - def __init__(self, message=''): + def __init__(self, message=""): super().__init__() self.message = message @@ -128,6 +128,6 @@ class RecipeNotFoundError(Error): """ - def __init__(self, message=''): + def __init__(self, message=""): super().__init__() - self.message = message \ No newline at end of file + self.message = message diff --git a/cwepr/io/__init__.py b/cwepr/io/__init__.py index be2e778..38f2763 100644 --- a/cwepr/io/__init__.py +++ b/cwepr/io/__init__.py @@ -34,8 +34,12 @@ # The import statements below should *only* import the respective classes. from .factory import DatasetImporterFactory -from .magnettech import MagnettechXMLImporter, GoniometerSweepImporter, \ - AmplitudeSweepImporter, PowerSweepImporter +from .magnettech import ( + MagnettechXMLImporter, + GoniometerSweepImporter, + AmplitudeSweepImporter, + PowerSweepImporter, +) from .txt_file import CsvImporter, TxtImporter from .bes3t import BES3TImporter from .esp_winepr import ESPWinEPRImporter diff --git a/cwepr/io/bes3t.py b/cwepr/io/bes3t.py index 917fa1b..3306f6a 100644 --- a/cwepr/io/bes3t.py +++ b/cwepr/io/bes3t.py @@ -51,10 +51,10 @@ def __init__(self, source=None): # private properties self._infofile = aspecd.infofile.Infofile() self._dsc_dict = {} - self._mapper_filename = 'dsc_keys.yaml' + self._mapper_filename = "dsc_keys.yaml" self._is_two_dimensional = False self._dimensions = [] - self._file_encoding = '' + self._file_encoding = "" def _import(self): self._clean_filenames() @@ -87,44 +87,44 @@ def _import_data(self): self.dataset.data.data = raw_data def _set_dataset_dimension(self): - for key in ('YPTS', 'XPTS'): + for key in ("YPTS", "XPTS"): if key in self._dsc_dict.keys(): self._dimensions.append(int(self._dsc_dict[key])) if len(self._dimensions) == 2: self._is_two_dimensional = True def _get_file_encoding(self): - encodings = { - 'BIG': '>f8', - 'LIT': 'f8", "LIT": " 1: value = line[1] else: - value = '' - if re.match(r'^[+-]?[0-9.]+([eE][+-]?[0-9]*)?$', value): + value = "" + if re.match(r"^[+-]?[0-9.]+([eE][+-]?[0-9]*)?$", value): value = float(value) self._dsc_dict[key] = value @@ -134,10 +134,12 @@ def _map_dsc_into_dataset(self): yaml_file.read_from(os.path.join(rootpath, self._mapper_filename)) dsc_metadata_dict = {} dsc_metadata_dict = self._traverse(yaml_file.dict, dsc_metadata_dict) - aspecd.utils.copy_keys_between_dicts(dsc_metadata_dict, - self._metadata_dict) - aspecd.utils.copy_values_between_dicts(dsc_metadata_dict, - self._metadata_dict) + aspecd.utils.copy_keys_between_dicts( + dsc_metadata_dict, self._metadata_dict + ) + aspecd.utils.copy_values_between_dicts( + dsc_metadata_dict, self._metadata_dict + ) self.dataset.metadata.from_dict(self._metadata_dict) def _traverse(self, dict_, metadata_dict): @@ -147,21 +149,22 @@ def _traverse(self, dict_, metadata_dict): self._traverse(value, metadata_dict[key]) elif value in self._dsc_dict.keys(): metadata_dict[key] = self._dsc_dict[value] - elif key == 'specified_unit': - metadata_dict['unit'] = value + elif key == "specified_unit": + metadata_dict["unit"] = value return metadata_dict def _fill_axes(self): self._get_magnetic_field_axis() - self.dataset.data.axes[0].quantity = 'magnetic field' - self.dataset.data.axes[0].unit = self._dsc_dict['XUNI'] - self.dataset.data.axes[-1].quantity = 'intensity' + self.dataset.data.axes[0].quantity = "magnetic field" + self.dataset.data.axes[0].unit = self._dsc_dict["XUNI"] + self.dataset.data.axes[-1].quantity = "intensity" if self._is_two_dimensional: - self.dataset.data.axes[1].values = \ - np.fromfile(self.source + '.YGF', dtype=self._file_encoding) - self.dataset.data.axes[1].quantity = self._dsc_dict['YNAM'] - self.dataset.data.axes[1].unit = self._dsc_dict['YUNI'] + self.dataset.data.axes[1].values = np.fromfile( + self.source + ".YGF", dtype=self._file_encoding + ) + self.dataset.data.axes[1].quantity = self._dsc_dict["YNAM"] + self.dataset.data.axes[1].unit = self._dsc_dict["YUNI"] def _get_magnetic_field_axis(self): # Abbreviations: @@ -172,10 +175,12 @@ def _get_magnetic_field_axis(self): stop = start + sweep_width - (sweep_width / (points + 1)) # Set axis magnetic_field_axis = np.linspace(start, stop, points) - assert len(magnetic_field_axis) == points, \ - 'Length of magnetic field and number of points differ' - assert len(magnetic_field_axis) == self.dataset.data.data.shape[0], \ - 'Length of magnetic field and size of data differ' + assert ( + len(magnetic_field_axis) == points + ), "Length of magnetic field and number of points differ" + assert ( + len(magnetic_field_axis) == self.dataset.data.data.shape[0] + ), "Length of magnetic field and size of data differ" # set more values in dataset self.dataset.metadata.magnetic_field.stop.value = stop self.dataset.data.axes[0].values = magnetic_field_axis @@ -187,11 +192,11 @@ def _load_infofile(self): self._infofile.parse() def _get_infofile_name(self): - return glob.glob(''.join([self.source.strip(), '.info'])) + return glob.glob("".join([self.source.strip(), ".info"])) def _assign_comment_as_annotation(self): comment = aspecd.annotation.Comment() - comment.comment = self._infofile.parameters['COMMENT'] + comment.comment = self._infofile.parameters["COMMENT"] self.dataset.annotate(comment) def _map_metadata(self, infofile_version): @@ -199,14 +204,15 @@ def _map_metadata(self, infofile_version): mapper = aspecd.metadata.MetadataMapper() mapper.version = infofile_version mapper.metadata = self._infofile.parameters - mapper.recipe_filename = 'cwepr@metadata_mapper_cwepr.yaml' + mapper.recipe_filename = "cwepr@metadata_mapper_cwepr.yaml" mapper.map() self._metadata_dict = aspecd.utils.convert_keys_to_variable_names( - mapper.metadata) + mapper.metadata + ) def _map_infofile(self): """Bring the metadata to a given format.""" - infofile_version = self._infofile.infofile_info['version'] + infofile_version = self._infofile.infofile_info["version"] self._map_metadata(infofile_version) self._assign_comment_as_annotation() @@ -217,53 +223,67 @@ def _ensure_common_units(self): DSC-file, some units are wrong and are corrected manually here. """ # microwave frequency - if self.dataset.metadata.bridge.mw_frequency.unit == 'Hz': + if self.dataset.metadata.bridge.mw_frequency.unit == "Hz": self.dataset.metadata.bridge.mw_frequency.value /= 1e9 - self.dataset.metadata.bridge.mw_frequency.unit = 'GHz' + self.dataset.metadata.bridge.mw_frequency.unit = "GHz" # microwave power - if self.dataset.metadata.bridge.power.unit == 'W': + if self.dataset.metadata.bridge.power.unit == "W": self.dataset.metadata.bridge.power.value *= 1e3 - self.dataset.metadata.bridge.power.unit = 'mW' + self.dataset.metadata.bridge.power.unit = "mW" # time objects - objects_ = ('conversion_time', 'time_constant') + objects_ = ("conversion_time", "time_constant") for object_ in objects_: time_object = getattr( - self.dataset.metadata.signal_channel, object_) - if time_object.unit == 's': + self.dataset.metadata.signal_channel, object_ + ) + if time_object.unit == "s": time_object.value *= 1e3 - time_object.unit = 'ms' - setattr(self.dataset.metadata.magnetic_field, object_, - time_object) + time_object.unit = "ms" + setattr( + self.dataset.metadata.magnetic_field, object_, time_object + ) # magnetic field objects - objects_ = ('start', 'stop', 'sweep_width') + objects_ = ("start", "stop", "sweep_width") for object_ in objects_: magnetic_field_object = getattr( - self.dataset.metadata.magnetic_field, object_) - if magnetic_field_object.unit == 'G': + self.dataset.metadata.magnetic_field, object_ + ) + if magnetic_field_object.unit == "G": magnetic_field_object.value /= 10 - magnetic_field_object.unit = 'mT' - setattr(self.dataset.metadata.magnetic_field, object_, - magnetic_field_object) + magnetic_field_object.unit = "mT" + setattr( + self.dataset.metadata.magnetic_field, + object_, + magnetic_field_object, + ) # axes - if self.dataset.data.axes[0].unit == 'G': + if self.dataset.data.axes[0].unit == "G": self.dataset.data.axes[0].values /= 10 - self.dataset.data.axes[0].unit = 'mT' + self.dataset.data.axes[0].unit = "mT" # modulation frequency - if self.dataset.metadata.signal_channel.modulation_frequency.unit ==\ - 'Hz': - self.dataset.metadata.signal_channel.modulation_frequency.value \ - /= 1e3 - self.dataset.metadata.signal_channel.modulation_frequency.unit = \ - 'kHz' - if self.dataset.metadata.signal_channel.modulation_amplitude.unit == \ - 'T': - self.dataset.metadata.signal_channel.modulation_amplitude.value \ - *= 1e3 - self.dataset.metadata.signal_channel.modulation_amplitude.unit = \ - 'mT' + if ( + self.dataset.metadata.signal_channel.modulation_frequency.unit + == "Hz" + ): + self.dataset.metadata.signal_channel.modulation_frequency.value /= ( + 1e3 + ) + self.dataset.metadata.signal_channel.modulation_frequency.unit = ( + "kHz" + ) + if ( + self.dataset.metadata.signal_channel.modulation_amplitude.unit + == "T" + ): + self.dataset.metadata.signal_channel.modulation_amplitude.value *= ( + 1e3 + ) + self.dataset.metadata.signal_channel.modulation_amplitude.unit = ( + "mT" + ) def _check_experiment(self): - if self._dsc_dict['EXPT'] != 'CW': + if self._dsc_dict["EXPT"] != "CW": raise cwepr.exceptions.ExperimentTypeError( - message='Experiment seems not to be a cw-Experiment.' + message="Experiment seems not to be a cw-Experiment." ) diff --git a/cwepr/io/esp_winepr.py b/cwepr/io/esp_winepr.py index 0a2837a..f5cb509 100644 --- a/cwepr/io/esp_winepr.py +++ b/cwepr/io/esp_winepr.py @@ -55,9 +55,9 @@ def __init__(self, source=None): # private properties self._infofile = aspecd.infofile.Infofile() self._par_dict = {} - self._mapper_filename = 'par_keys.yaml' + self._mapper_filename = "par_keys.yaml" self._metadata_dict = OrderedDict() - self._file_encoding = '' + self._file_encoding = "" def _import(self): self._clean_filenames() @@ -82,13 +82,16 @@ def _clean_filenames(self): def _set_defaults(self): default_file = aspecd.utils.Yaml() - default_file.read_stream(aspecd.utils.get_package_data( - 'cwepr@io/par_defaults.yaml').encode()) + default_file.read_stream( + aspecd.utils.get_package_data( + "cwepr@io/par_defaults.yaml" + ).encode() + ) self._metadata_dict = default_file.dict def _read_parameter_file(self): - par_filename = self.source + '.par' - with open(par_filename, 'r', encoding='ascii') as file: + par_filename = self.source + ".par" + with open(par_filename, "r", encoding="ascii") as file: lines = file.read().splitlines() for line in lines: @@ -97,29 +100,32 @@ def _read_parameter_file(self): if len(line) > 1: value = line[1] else: - value = '' - if re.match(r'^[+-]?[0-9.]+([eE][+-]?[0-9]*)?$', value): + value = "" + if re.match(r"^[+-]?[0-9.]+([eE][+-]?[0-9]*)?$", value): value = float(value) self._par_dict[key] = value def _import_data(self): - complete_filename = self.source + '.spc' + complete_filename = self.source + ".spc" self._get_file_encoding() raw_data = np.fromfile(complete_filename, self._file_encoding) self.dataset.data.data = raw_data def _get_file_encoding(self): - if ('DOS', 'Format') in self._par_dict.items(): - self._file_encoding = ' 500: self.dataset.metadata.bridge.mw_frequency.value /= 1e9 - self.dataset.metadata.bridge.mw_frequency.unit = 'GHz' + self.dataset.metadata.bridge.mw_frequency.unit = "GHz" # microwave power if self.dataset.metadata.bridge.power.value < 0.001: self.dataset.metadata.bridge.power.value *= 1e3 - self.dataset.metadata.bridge.power.unit = 'mW' + self.dataset.metadata.bridge.power.unit = "mW" # magnetic field objects - objects_ = ('start', 'stop', 'sweep_width') + objects_ = ("start", "stop", "sweep_width") for object_ in objects_: magnetic_field_object = getattr( - self.dataset.metadata.magnetic_field, object_) - if magnetic_field_object.unit in ('G', ''): + self.dataset.metadata.magnetic_field, object_ + ) + if magnetic_field_object.unit in ("G", ""): magnetic_field_object.value /= 10 - magnetic_field_object.unit = 'mT' + magnetic_field_object.unit = "mT" setattr( - self.dataset.metadata.magnetic_field, object_, - magnetic_field_object) + self.dataset.metadata.magnetic_field, + object_, + magnetic_field_object, + ) if not self.dataset.metadata.temperature_control.temperature.unit: - self.dataset.metadata.temperature_control.temperature.unit = 'K' + self.dataset.metadata.temperature_control.temperature.unit = "K" def _fill_axes(self): self._get_magnetic_field_axis() - self.dataset.data.axes[0].quantity = 'magnetic field' - self.dataset.data.axes[0].unit = \ - self.dataset.metadata.magnetic_field.start.unit - self.dataset.data.axes[-1].quantity = 'intensity' + self.dataset.data.axes[0].quantity = "magnetic field" + self.dataset.data.axes[ + 0 + ].unit = self.dataset.metadata.magnetic_field.start.unit + self.dataset.data.axes[-1].quantity = "intensity" def _get_magnetic_field_axis(self): # Abbreviations: @@ -255,17 +277,21 @@ def _get_magnetic_field_axis(self): stop = start + sweep_width # Set axis magnetic_field_axis = np.linspace(start, stop, points) - assert len(magnetic_field_axis) == points, \ - 'Length of magnetic field and number of points differ' - assert len(magnetic_field_axis) == self.dataset.data.data.shape[0], \ - 'Length of magnetic field and size of data differ' + assert ( + len(magnetic_field_axis) == points + ), "Length of magnetic field and number of points differ" + assert ( + len(magnetic_field_axis) == self.dataset.data.data.shape[0] + ), "Length of magnetic field and size of data differ" # set more values in dataset self.dataset.metadata.magnetic_field.stop.value = stop - self.dataset.metadata.magnetic_field.stop.unit = \ + self.dataset.metadata.magnetic_field.stop.unit = ( self.dataset.metadata.magnetic_field.start.unit + ) self.dataset.data.axes[0].values = magnetic_field_axis def _get_number_of_points(self): self.dataset.metadata.magnetic_field.points = len( - self.dataset.data.data) + self.dataset.data.data + ) diff --git a/cwepr/io/exporter.py b/cwepr/io/exporter.py index ebb092a..5b8c4b0 100644 --- a/cwepr/io/exporter.py +++ b/cwepr/io/exporter.py @@ -119,20 +119,21 @@ class MetadataExporter(aspecd.io.DatasetExporter): def __init__(self): super().__init__() self.metadata_dict = collections.OrderedDict() - self.filename = '' + self.filename = "" def _export(self): if not self.filename: - timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S') - self.filename = 'metadata-' + timestamp + '.yaml' - if not self.filename.endswith(('.yaml', '.yml')): - self.filename = self.filename + '.yaml' + timestamp = datetime.datetime.now().strftime("%Y%m%dT%H%M%S") + self.filename = "metadata-" + timestamp + ".yaml" + if not self.filename.endswith((".yaml", ".yml")): + self.filename = self.filename + ".yaml" self._write_metadata() def _write_metadata(self): self.metadata_dict = self.dataset.metadata.to_dict() - self.metadata_dict = \ - self._remove_empty_items_recursively(dict_=self.metadata_dict) + self.metadata_dict = self._remove_empty_items_recursively( + dict_=self.metadata_dict + ) yaml_file = aspecd.utils.Yaml() yaml_file.dict = self.metadata_dict yaml_file.write_to(self.filename) @@ -141,10 +142,9 @@ def _remove_empty_items_recursively(self, dict_=None): tmp_dict = collections.OrderedDict() for key, value in dict_.items(): if isinstance(value, dict): - dict_[key] = \ - self._remove_empty_items_recursively(value) + dict_[key] = self._remove_empty_items_recursively(value) # if magnettech has not measured the q-value it is -1 - if key == 'q_value' and value == -1: + if key == "q_value" and value == -1: continue if dict_[key]: tmp_dict[key] = dict_[key] diff --git a/cwepr/io/factory.py b/cwepr/io/factory.py index 9e5854d..528091c 100644 --- a/cwepr/io/factory.py +++ b/cwepr/io/factory.py @@ -47,15 +47,16 @@ class DatasetImporterFactory(aspecd.io.DatasetImporterFactory): def __init__(self): super().__init__() - self.supported_formats = {"BES3T": [".DTA", ".DSC"], - "ESPWinEPR": [".spc", ".par"], - "MagnettechXML": [".xml"], - "NIEHSDat": [".dat"], - "NIEHSLmb": [".lmb"], - "NIEHSExp": [".exp"], - "Txt": [".txt"], - "Csv": [".csv"], - } + self.supported_formats = { + "BES3T": [".DTA", ".DSC"], + "ESPWinEPR": [".spc", ".par"], + "MagnettechXML": [".xml"], + "NIEHSDat": [".dat"], + "NIEHSLmb": [".lmb"], + "NIEHSExp": [".exp"], + "Txt": [".txt"], + "Csv": [".csv"], + } self.data_format = None def _get_importer(self): @@ -80,27 +81,31 @@ class created using :func:`aspecd.utils.object_from_class_name`. """ if os.path.isdir(self.source): if self._directory_contains_gon_data(): - self.data_format = 'GoniometerSweep' - importer = \ - object_from_class_name('cwepr.io.GoniometerSweepImporter') + self.data_format = "GoniometerSweep" + importer = object_from_class_name( + "cwepr.io.GoniometerSweepImporter" + ) importer.source = self.source return importer if self._directory_contains_amplitude_sweep_data(): - self.data_format = 'AmplitudeSweep' - importer = \ - object_from_class_name('cwepr.io.AmplitudeSweepImporter') + self.data_format = "AmplitudeSweep" + importer = object_from_class_name( + "cwepr.io.AmplitudeSweepImporter" + ) importer.source = self.source return importer if self._directory_contains_power_sweep_data(): - self.data_format = 'PowerSweep' - importer = \ - object_from_class_name('cwepr.io.PowerSweepImporter') + self.data_format = "PowerSweep" + importer = object_from_class_name( + "cwepr.io.PowerSweepImporter" + ) importer.source = self.source return importer self.data_format = self._find_format() if self.data_format: importer = object_from_class_name( - ".".join(["cwepr", "io", self.data_format + "Importer"])) + ".".join(["cwepr", "io", self.data_format + "Importer"]) + ) importer.source = self.source return importer @@ -117,7 +122,9 @@ def _find_format(self): detected_format = file_format elif not file_extension: for extension in extensions: - file_exists.append(os.path.isfile(self.source + extension)) + file_exists.append( + os.path.isfile(self.source + extension) + ) if all(file_exists): detected_format = file_format return detected_format @@ -127,7 +134,7 @@ def _directory_contains_gon_data(self): if not os.listdir(self.source): return False for element in os.listdir(self.source): - if 'gon' in element: + if "gon" in element: check_gon_filenames.append(True) else: check_gon_filenames.append(False) @@ -140,7 +147,7 @@ def _directory_contains_amplitude_sweep_data(self): if not os.listdir(self.source): return False for element in os.listdir(self.source): - if 'mod' in element: + if "mod" in element: check_modamp_filenames.append(True) else: check_modamp_filenames.append(False) @@ -153,7 +160,7 @@ def _directory_contains_power_sweep_data(self): if not os.listdir(self.source): return False for element in os.listdir(self.source): - if 'pow' in element: + if "pow" in element: check_powersweep_filenames.append(True) else: check_powersweep_filenames.append(False) diff --git a/cwepr/io/magnettech.py b/cwepr/io/magnettech.py index c7d1b7a..1d08f7a 100644 --- a/cwepr/io/magnettech.py +++ b/cwepr/io/magnettech.py @@ -83,15 +83,15 @@ class MagnettechXMLImporter(aspecd.io.DatasetImporter): """ - def __init__(self, source=''): + def __init__(self, source=""): super().__init__(source=source) # public properties self.root = None - self.full_filename = '' + self.full_filename = "" self.load_infofile = True self.xml_metadata = {} - self.parameters['data_curve_type'] = 'MW_Absorption' - self.parameters['axis_curve_type'] = 'BField' + self.parameters["data_curve_type"] = "MW_Absorption" + self.parameters["axis_curve_type"] = "BField" # private properties self._infofile = aspecd.infofile.Infofile() self._data_curve = None @@ -124,55 +124,60 @@ def _import(self): def _clean_up_filename(self): if self.source: - if self.source.endswith('.xml'): + if self.source.endswith(".xml"): self.full_filename = self.source self.source = self.source[:-4] else: - self.full_filename = self.source + '.xml' + self.full_filename = self.source + ".xml" def _get_xml_root_element(self): """Get the root object/name of the xml document.""" if not self.source: - raise cwepr.exceptions.MissingPathError('No path provided') + raise cwepr.exceptions.MissingPathError("No path provided") if not os.path.exists(self.full_filename): - raise FileNotFoundError('XML file not found.') + raise FileNotFoundError("XML file not found.") self.root = et.parse(self.full_filename).getroot() def _choose_data_source(self): - for curve in self.root[0][0][1]: - if self.parameters['data_curve_type'] == curve.attrib['YType']: + if self.parameters["data_curve_type"] == curve.attrib["YType"]: self._data_curve = curve - if self.parameters['axis_curve_type'] == curve.attrib['YType']: + if self.parameters["axis_curve_type"] == curve.attrib["YType"]: self._axis_curve = curve def _get_raw_data(self): - self._xvalues = \ - self._convert_base64string_to_np_array(self._axis_curve.text) - self._yvalues = \ - self._convert_base64string_to_np_array(self._data_curve.text) + self._xvalues = self._convert_base64string_to_np_array( + self._axis_curve.text + ) + self._yvalues = self._convert_base64string_to_np_array( + self._data_curve.text + ) @staticmethod def _convert_base64string_to_np_array(string): # Split string at "=" and add the delimiter afterwards again tmpdata = [x + "=" for x in string.split("=") if x] # Decode and unpack list of strings - data = [struct.unpack('d', base64.b64decode(x)) for x in tmpdata] + data = [struct.unpack("d", base64.b64decode(x)) for x in tmpdata] data = [i[0] for i in data] return np.asarray(data) def _create_x_axis(self): - b_field_x_offset = float(self._axis_curve.attrib['XOffset']) - b_field_x_slope = float(self._axis_curve.attrib['XSlope']) - mw_abs_x_offset = float(self._data_curve.attrib['XOffset']) - mw_abs_x_slope = float(self._data_curve.attrib['XSlope']) - - mw_x = mw_abs_x_offset + \ - np.linspace(0, len(self._yvalues) - 1, num=len(self._yvalues)) \ + b_field_x_offset = float(self._axis_curve.attrib["XOffset"]) + b_field_x_slope = float(self._axis_curve.attrib["XSlope"]) + mw_abs_x_offset = float(self._data_curve.attrib["XOffset"]) + mw_abs_x_slope = float(self._data_curve.attrib["XSlope"]) + + mw_x = ( + mw_abs_x_offset + + np.linspace(0, len(self._yvalues) - 1, num=len(self._yvalues)) * mw_abs_x_slope - b_field_x = b_field_x_offset + \ - np.linspace(0, len(self._xvalues) - 1, - num=len(self._xvalues)) * b_field_x_slope + ) + b_field_x = ( + b_field_x_offset + + np.linspace(0, len(self._xvalues) - 1, num=len(self._xvalues)) + * b_field_x_slope + ) self._xvalues = np.interp(mw_x, b_field_x, self._xvalues) def _extract_metadata_from_xml(self): @@ -180,11 +185,13 @@ def _extract_metadata_from_xml(self): xml_metadata = self.root[0][0][0].attrib xml_metadata.update(self.root[0][0].attrib) for childnode in self.root[0][0][0][0]: - if 'Unit' in childnode.attrib: - xml_metadata[childnode.attrib['Name']] = \ - {'value': childnode.text, 'unit': childnode.attrib['Unit']} + if "Unit" in childnode.attrib: + xml_metadata[childnode.attrib["Name"]] = { + "value": childnode.text, + "unit": childnode.attrib["Unit"], + } else: - xml_metadata[childnode.attrib['Name']] = childnode.text + xml_metadata[childnode.attrib["Name"]] = childnode.text self.xml_metadata = xml_metadata def _cut_data(self): @@ -195,28 +202,35 @@ def _cut_data(self): def _get_magnetic_field_range(self): """Get magnetic field range from preprocessed XML data.""" - if not isinstance(self.xml_metadata['Bfrom'], dict): - self.xml_metadata['Bfrom'] = \ - {'value': self.xml_metadata['Bfrom'], 'unit': 'mT'} - self.xml_metadata['Bto'] = \ - {'value': self.xml_metadata['Bto'], 'unit': 'mT'} - self._bfrom = float(self.xml_metadata['Bfrom']['value']) - self._bto = float(self.xml_metadata['Bto']['value']) + if not isinstance(self.xml_metadata["Bfrom"], dict): + self.xml_metadata["Bfrom"] = { + "value": self.xml_metadata["Bfrom"], + "unit": "mT", + } + self.xml_metadata["Bto"] = { + "value": self.xml_metadata["Bto"], + "unit": "mT", + } + self._bfrom = float(self.xml_metadata["Bfrom"]["value"]) + self._bto = float(self.xml_metadata["Bto"]["value"]) def _hand_data_to_dataset(self): self.dataset.data.data = self._yvalues self.dataset.data.axes[0].values = self._xvalues - self.dataset.data.axes[0].unit = 'mT' - self.dataset.data.axes[0].quantity = 'magnetic field' - self.dataset.data.axes[1].unit = 'mV' - self.dataset.data.axes[1].quantity = 'intensity' + self.dataset.data.axes[0].unit = "mT" + self.dataset.data.axes[0].quantity = "magnetic field" + self.dataset.data.axes[1].unit = "mV" + self.dataset.data.axes[1].quantity = "intensity" def _infofile_exists(self): if self._get_infofile_name() and os.path.exists( - self._get_infofile_name()[0]): + self._get_infofile_name()[0] + ): return True - print(f'No infofile found for dataset {os.path.split(self.source)[1]},' - f' import continued without infofile.') + print( + f"No infofile found for dataset {os.path.split(self.source)[1]}," + f" import continued without infofile." + ) return False def _load_infofile(self): @@ -226,11 +240,11 @@ def _load_infofile(self): self._infofile.parse() def _get_infofile_name(self): - return glob.glob(self.source + '.info') + return glob.glob(self.source + ".info") def _assign_comment_as_annotation(self): comment = aspecd.annotation.Comment() - comment.comment = self._infofile.parameters['COMMENT'] + comment.comment = self._infofile.parameters["COMMENT"] self.dataset.annotate(comment) def _map_metadata(self, infofile_version): @@ -238,91 +252,114 @@ def _map_metadata(self, infofile_version): mapper = aspecd.metadata.MetadataMapper() mapper.version = infofile_version mapper.metadata = self._infofile.parameters - mapper.recipe_filename = 'cwepr@metadata_mapper_cwepr.yaml' + mapper.recipe_filename = "cwepr@metadata_mapper_cwepr.yaml" mapper.map() self.dataset.metadata.from_dict(mapper.metadata) def _map_infofile(self): """Bring the metadata to a given format.""" - infofile_version = self._infofile.infofile_info['version'] + infofile_version = self._infofile.infofile_info["version"] self._map_metadata(infofile_version) self._assign_comment_as_annotation() def _map_metadata_from_xml(self): - self.dataset.metadata.temperature_control.temperature.value = \ - float(self.xml_metadata['Temperature']) + 273.15 - self.dataset.metadata.temperature_control.temperature.unit = 'K' - if self.xml_metadata['Type'] == 'single': - self.dataset.metadata.experiment.type = \ - self.xml_metadata['KineticMode'] + self.dataset.metadata.temperature_control.temperature.value = ( + float(self.xml_metadata["Temperature"]) + 273.15 + ) + self.dataset.metadata.temperature_control.temperature.unit = "K" + if self.xml_metadata["Type"] == "single": + self.dataset.metadata.experiment.type = self.xml_metadata[ + "KineticMode" + ] else: - self.dataset.metadata.experiment.type = self.xml_metadata['Type'] - self.dataset.metadata.signal_channel.accumulations = \ - self.xml_metadata['MeasurementCount'] - self.dataset.metadata.experiment.variable_parameter = \ - self.xml_metadata['XDatasource'] - self.dataset.metadata.spectrometer.from_dict({ - 'model': self.xml_metadata['Device'], - 'software': self.xml_metadata['SWV']}) + self.dataset.metadata.experiment.type = self.xml_metadata["Type"] + self.dataset.metadata.signal_channel.accumulations = ( + self.xml_metadata["MeasurementCount"] + ) + self.dataset.metadata.experiment.variable_parameter = ( + self.xml_metadata["XDatasource"] + ) + self.dataset.metadata.spectrometer.from_dict( + { + "model": self.xml_metadata["Device"], + "software": self.xml_metadata["SWV"], + } + ) self.dataset.metadata.magnetic_field.start.from_string( - self._dict_to_string(self.xml_metadata['Bfrom'])) + self._dict_to_string(self.xml_metadata["Bfrom"]) + ) self.dataset.metadata.magnetic_field.stop.from_string( - self._dict_to_string(self.xml_metadata['Bto'])) - self.dataset.metadata.magnetic_field.sweep_width.value = \ - float(self.xml_metadata['Bto']['value']) - \ - float(self.xml_metadata['Bfrom']['value']) - self.dataset.metadata.magnetic_field.sweep_width.unit = \ - self.xml_metadata['Bfrom']['unit'] - self.dataset.metadata.magnetic_field.points = \ - len(self.dataset.data.axes[0].values) - self.dataset.metadata.magnetic_field.field_probe_type = 'Hall' - self.dataset.metadata.magnetic_field.field_probe_model = 'builtin' + self._dict_to_string(self.xml_metadata["Bto"]) + ) + self.dataset.metadata.magnetic_field.sweep_width.value = float( + self.xml_metadata["Bto"]["value"] + ) - float(self.xml_metadata["Bfrom"]["value"]) + self.dataset.metadata.magnetic_field.sweep_width.unit = ( + self.xml_metadata["Bfrom"]["unit"] + ) + self.dataset.metadata.magnetic_field.points = len( + self.dataset.data.axes[0].values + ) + self.dataset.metadata.magnetic_field.field_probe_type = "Hall" + self.dataset.metadata.magnetic_field.field_probe_model = "builtin" if self._xvalues[-1] - self._xvalues[0] > 0: - self.dataset.metadata.magnetic_field.sequence = 'up' + self.dataset.metadata.magnetic_field.sequence = "up" else: - self.dataset.metadata.magnetic_field.sequence = 'down' - self.dataset.metadata.magnetic_field.controller = 'builtin' - self.dataset.metadata.magnetic_field.power_supply = 'builtin' - self.dataset.metadata.bridge.model = 'builtin' - self.dataset.metadata.bridge.controller = 'builtin' - self.dataset.metadata.bridge.power.from_string(self._dict_to_string( - self.xml_metadata['MicrowavePower'])) - self.dataset.metadata.bridge.detection = 'mixer' - self.dataset.metadata.bridge.frequency_counter = 'builtin' - self.dataset.metadata.bridge.mw_frequency.value = \ - float(self.xml_metadata['MwFreq']) - self.dataset.metadata.bridge.mw_frequency.unit = 'GHz' - self.dataset.metadata.bridge.q_value = \ - float(self.xml_metadata['QFactor']) - self.dataset.metadata.signal_channel.model = 'builtin' - self.dataset.metadata.signal_channel.modulation_amplifier = 'builtin' - self.dataset.metadata.signal_channel.accumulations = \ - int(self.xml_metadata['Accumulations']) + self.dataset.metadata.magnetic_field.sequence = "down" + self.dataset.metadata.magnetic_field.controller = "builtin" + self.dataset.metadata.magnetic_field.power_supply = "builtin" + self.dataset.metadata.bridge.model = "builtin" + self.dataset.metadata.bridge.controller = "builtin" + self.dataset.metadata.bridge.power.from_string( + self._dict_to_string(self.xml_metadata["MicrowavePower"]) + ) + self.dataset.metadata.bridge.detection = "mixer" + self.dataset.metadata.bridge.frequency_counter = "builtin" + self.dataset.metadata.bridge.mw_frequency.value = float( + self.xml_metadata["MwFreq"] + ) + self.dataset.metadata.bridge.mw_frequency.unit = "GHz" + self.dataset.metadata.bridge.q_value = float( + self.xml_metadata["QFactor"] + ) + self.dataset.metadata.signal_channel.model = "builtin" + self.dataset.metadata.signal_channel.modulation_amplifier = "builtin" + self.dataset.metadata.signal_channel.accumulations = int( + self.xml_metadata["Accumulations"] + ) self.dataset.metadata.signal_channel.modulation_frequency.from_string( - self._dict_to_string(self.xml_metadata['ModulationFreq'])) + self._dict_to_string(self.xml_metadata["ModulationFreq"]) + ) self.dataset.metadata.signal_channel.modulation_amplitude.from_string( - self._dict_to_string(self.xml_metadata['Modulation'])) - self.dataset.metadata.signal_channel.phase.value = \ - float(self.xml_metadata['Phase']) - self.dataset.metadata.probehead.model = 'builtin' - self.dataset.metadata.probehead.coupling = 'critical' - self.dataset.metadata.digital_filter.mode = \ - self.xml_metadata['FilterType'] + self._dict_to_string(self.xml_metadata["Modulation"]) + ) + self.dataset.metadata.signal_channel.phase.value = float( + self.xml_metadata["Phase"] + ) + self.dataset.metadata.probehead.model = "builtin" + self.dataset.metadata.probehead.coupling = "critical" + self.dataset.metadata.digital_filter.mode = self.xml_metadata[ + "FilterType" + ] self.dataset.metadata.digital_filter.parameter.from_string( - (self.xml_metadata['FilterPrm0'])) + (self.xml_metadata["FilterPrm0"]) + ) def _map_dates(self): self.dataset.metadata.measurement.start = dateutil.parser.parse( - self.xml_metadata['Timestamp']) - end = dateutil.parser.parse(self.root.attrib['Timestamp']) + self.xml_metadata["Timestamp"] + ) + end = dateutil.parser.parse(self.root.attrib["Timestamp"]) diff = self.dataset.metadata.measurement.start.tzinfo self.dataset.metadata.measurement.end = end.astimezone(diff) - assert (self.dataset.metadata.measurement.start < - self.dataset.metadata.measurement.end) + assert ( + self.dataset.metadata.measurement.start + < self.dataset.metadata.measurement.end + ) @staticmethod def _dict_to_string(dict_): - return dict_['value'] + ' ' + dict_['unit'] + return dict_["value"] + " " + dict_["unit"] class GoniometerSweepImporter(aspecd.io.DatasetImporter): @@ -339,7 +376,7 @@ class GoniometerSweepImporter(aspecd.io.DatasetImporter): """ - def __init__(self, source=''): + def __init__(self, source=""): super().__init__(source=source) self.dataset = cwepr.dataset.ExperimentalDataset() self.filenames = None @@ -361,13 +398,12 @@ def _import(self): def _get_filenames(self): if not os.path.exists(self.source): raise FileNotFoundError - self.filenames = glob.glob(os.path.join(self.source, '*[0-9]dg*.xml')) + self.filenames = glob.glob(os.path.join(self.source, "*[0-9]dg*.xml")) def _sort_filenames(self): - - def sort_key(string=''): - num = string.split('gon_')[1] - num = num.split('dg')[0] + def sort_key(string=""): + num = string.split("gon_")[1] + num = num.split("dg")[0] return int(num) self.filenames = sorted(self.filenames, key=sort_key) @@ -381,12 +417,13 @@ def _import_all_spectra_to_list(self): importer.load_infofile = False self._data.append(cwepr.dataset.ExperimentalDataset()) self._data[num].import_from(importer) - self._angles.append(float(importer.xml_metadata['GonAngle'])) + self._angles.append(float(importer.xml_metadata["GonAngle"])) # bring all measurements to the frequency of the first if num > 0: freq_correction = cwepr.processing.FrequencyCorrection() - freq_correction.parameters['frequency'] = \ - self._data[0].metadata.bridge.mw_frequency.value + freq_correction.parameters["frequency"] = self._data[ + 0 + ].metadata.bridge.mw_frequency.value self._data[num].process(freq_correction) for idx, angle in enumerate(self._angles): @@ -399,7 +436,7 @@ def _bring_axes_to_same_values(self): extract_range.process() def _interpolation_to_same_number_of_points(self, interpolate, num): - interpolate.parameters['points'] = len(self._data[0].data.data) + interpolate.parameters["points"] = len(self._data[0].data.data) self._data[num].process(interpolate) def _hand_data_to_dataset(self): @@ -417,8 +454,8 @@ def _fill_field_axis(self): def _fill_angle_axis(self): self.dataset.data.axes[1].values = np.asarray(self._angles) - self.dataset.data.axes[1].unit = 'degree' - self.dataset.data.axes[1].quantity = 'goniometer angle' + self.dataset.data.axes[1].unit = "degree" + self.dataset.data.axes[1].quantity = "goniometer angle" def _get_metadata(self): """Import metadata from infofile. @@ -434,30 +471,33 @@ def _get_metadata(self): def _infofile_exists(self): if self._get_infofile_name() and os.path.exists( - self._get_infofile_name()[0]): + self._get_infofile_name()[0] + ): return True - print(f'No infofile found for dataset ' - f'{os.path.split(self.source)[1]}, import continued without ' - f'infofile.') + print( + f"No infofile found for dataset " + f"{os.path.split(self.source)[1]}, import continued without " + f"infofile." + ) return False def _load_infofile(self): """Import infofile and parse it.""" infofile_name = self._get_infofile_name() if not infofile_name: - raise FileNotFoundError('Infofile not found') + raise FileNotFoundError("Infofile not found") self._infofile.filename = infofile_name[0] self._infofile.parse() def _get_infofile_name(self): - if self.source.endswith('/'): + if self.source.endswith("/"): folder_path = os.path.split(self.source)[0] - return glob.glob(folder_path + '.info') - return glob.glob(self.source + '.info') + return glob.glob(folder_path + ".info") + return glob.glob(self.source + ".info") def _assign_comment_as_annotation(self): comment = aspecd.annotation.Comment() - comment.comment = self._infofile.parameters['COMMENT'] + comment.comment = self._infofile.parameters["COMMENT"] self.dataset.annotate(comment) def _map_metadata(self, infofile_version): @@ -465,30 +505,32 @@ def _map_metadata(self, infofile_version): mapper = aspecd.metadata.MetadataMapper() mapper.version = infofile_version mapper.metadata = self._infofile.parameters - mapper.recipe_filename = 'cwepr@metadata_mapper_cwepr.yaml' + mapper.recipe_filename = "cwepr@metadata_mapper_cwepr.yaml" mapper.map() self.dataset.metadata.from_dict(mapper.metadata) self._convert_values_to_strings() def _map_infofile(self): """Bring the metadata to a given format.""" - infofile_version = self._infofile.infofile_info['version'] + infofile_version = self._infofile.infofile_info["version"] self._map_metadata(infofile_version) self._assign_comment_as_annotation() def _convert_values_to_strings(self): def _convert_(value): if isinstance(value, str): - match = re.match(r'[\d+.]', value) + match = re.match(r"[\d+.]", value) if match: value = float(value) return value # ugly but works - self.dataset.metadata.signal_channel.accumulations = \ - _convert_(self.dataset.metadata.signal_channel.accumulations) - self.dataset.metadata.bridge.q_value = \ - _convert_(self.dataset.metadata.bridge.q_value) + self.dataset.metadata.signal_channel.accumulations = _convert_( + self.dataset.metadata.signal_channel.accumulations + ) + self.dataset.metadata.bridge.q_value = _convert_( + self.dataset.metadata.bridge.q_value + ) class AmplitudeSweepImporter(aspecd.io.DatasetImporter): @@ -522,7 +564,7 @@ class AmplitudeSweepImporter(aspecd.io.DatasetImporter): """ - def __init__(self, source=''): + def __init__(self, source=""): super().__init__(source=source) self.dataset = cwepr.dataset.ExperimentalDataset() self.filenames = None @@ -545,13 +587,12 @@ def _get_filenames(self): if not os.path.exists(self.source): raise FileNotFoundError if not self.filenames: - self.filenames = glob.glob(os.path.join(self.source, '*mod*.xml')) + self.filenames = glob.glob(os.path.join(self.source, "*mod*.xml")) def _sort_filenames(self): - - def sort_key(string=''): - num = string.split('mod_')[1] - num = num.split('mT')[0] + def sort_key(string=""): + num = string.split("mod_")[1] + num = num.split("mT")[0] return int(num) self.filenames = sorted(self.filenames, key=sort_key) @@ -565,13 +606,15 @@ def _import_all_spectra_to_list(self): self._data.append(cwepr.dataset.ExperimentalDataset()) self._data[num].import_from(importer) self._amplitudes.append( - self._data[num].metadata.signal_channel.modulation_amplitude) + self._data[num].metadata.signal_channel.modulation_amplitude + ) # bring all measurements to the frequency of the first if num > 0: freq_correction = cwepr.processing.FrequencyCorrection() - freq_correction.parameters['frequency'] = \ - self._data[0].metadata.bridge.mw_frequency.value + freq_correction.parameters["frequency"] = self._data[ + 0 + ].metadata.bridge.mw_frequency.value self._data[num].process(freq_correction) def _bring_axes_to_same_values(self): @@ -581,9 +624,9 @@ def _bring_axes_to_same_values(self): def _check_amplitudes_and_put_into_list_as_axis(self): for amplitude in self._amplitudes: - if amplitude.unit == 'G': + if amplitude.unit == "G": amplitude.value /= 10 - amplitude.unit = 'mT' + amplitude.unit = "mT" self._amplitude_list.append(amplitude.value) def _hand_data_to_dataset(self): @@ -601,8 +644,8 @@ def _fill_field_axis(self): def _fill_amplitude_axis(self): self.dataset.data.axes[1].values = np.asarray(self._amplitude_list) - self.dataset.data.axes[1].unit = 'mT' - self.dataset.data.axes[1].quantity = 'modulation amplitude' + self.dataset.data.axes[1].unit = "mT" + self.dataset.data.axes[1].quantity = "modulation amplitude" def _import_collected_metadata(self): self._import_fixed_metadata() @@ -611,68 +654,91 @@ def _import_collected_metadata(self): self._import_date_time_metadata() def _import_fixed_metadata(self): - self.dataset.metadata.experiment.type = \ - self._data[0].metadata.experiment.type - self.dataset.metadata.experiment.variable_parameter = 'Modulation ' \ - 'Amplitude' - self.dataset.metadata.signal_channel.accumulations = \ - self._data[0].metadata.experiment.runs - self.dataset.metadata.spectrometer = self._data[0].metadata.spectrometer + self.dataset.metadata.experiment.type = self._data[ + 0 + ].metadata.experiment.type + self.dataset.metadata.experiment.variable_parameter = ( + "Modulation " "Amplitude" + ) + self.dataset.metadata.signal_channel.accumulations = self._data[ + 0 + ].metadata.experiment.runs + self.dataset.metadata.spectrometer = self._data[ + 0 + ].metadata.spectrometer self.dataset.metadata.magnetic_field.start.from_string( - (f"{self.dataset.data.axes[0].values[0]:.4f} " + - self._data[0].metadata.magnetic_field.start.unit)) + ( + f"{self.dataset.data.axes[0].values[0]:.4f} " + + self._data[0].metadata.magnetic_field.start.unit + ) + ) self.dataset.metadata.magnetic_field.stop.from_string( - f"{self.dataset.data.axes[0].values[-1]:.4f} " + - self._data[0].metadata.magnetic_field.stop.unit) - self.dataset.metadata.magnetic_field.sweep_width.value = \ - self.dataset.metadata.magnetic_field.stop.value - \ - self.dataset.metadata.magnetic_field.start.value - self.dataset.metadata.magnetic_field.sweep_width.unit = \ + f"{self.dataset.data.axes[0].values[-1]:.4f} " + + self._data[0].metadata.magnetic_field.stop.unit + ) + self.dataset.metadata.magnetic_field.sweep_width.value = ( + self.dataset.metadata.magnetic_field.stop.value + - self.dataset.metadata.magnetic_field.start.value + ) + self.dataset.metadata.magnetic_field.sweep_width.unit = ( self.dataset.metadata.magnetic_field.stop.unit - self.dataset.metadata.magnetic_field.points = \ - len(self.dataset.data.axes[0].values) - self.dataset.metadata.magnetic_field.field_probe_type = 'Hall' - self.dataset.metadata.magnetic_field.field_probe_model = 'builtin' - if self.dataset.data.axes[0].values[-1] - \ - self.dataset.data.axes[0].values[0] > 0: - self.dataset.metadata.magnetic_field.sequence = 'up' + ) + self.dataset.metadata.magnetic_field.points = len( + self.dataset.data.axes[0].values + ) + self.dataset.metadata.magnetic_field.field_probe_type = "Hall" + self.dataset.metadata.magnetic_field.field_probe_model = "builtin" + if ( + self.dataset.data.axes[0].values[-1] + - self.dataset.data.axes[0].values[0] + > 0 + ): + self.dataset.metadata.magnetic_field.sequence = "up" else: - self.dataset.metadata.magnetic_field.sequence = 'down' - self.dataset.metadata.magnetic_field.controller = 'builtin' - self.dataset.metadata.magnetic_field.power_supply = 'builtin' - self.dataset.metadata.bridge.model = 'builtin' - self.dataset.metadata.bridge.controller = 'builtin' - self.dataset.metadata.bridge.power = self._data[0].metadata.bridge.power - self.dataset.metadata.bridge.detection = 'mixer' - self.dataset.metadata.bridge.frequency_counter = 'builtin' - self.dataset.metadata.bridge.mw_frequency = \ - self._data[0].metadata.bridge.mw_frequency - self.dataset.metadata.signal_channel.model = 'builtin' - self.dataset.metadata.signal_channel.modulation_amplifier = 'builtin' - self.dataset.metadata.signal_channel.accumulations = \ - self._data[0].metadata.signal_channel.accumulations - self.dataset.metadata.signal_channel.modulation_frequency = \ + self.dataset.metadata.magnetic_field.sequence = "down" + self.dataset.metadata.magnetic_field.controller = "builtin" + self.dataset.metadata.magnetic_field.power_supply = "builtin" + self.dataset.metadata.bridge.model = "builtin" + self.dataset.metadata.bridge.controller = "builtin" + self.dataset.metadata.bridge.power = self._data[ + 0 + ].metadata.bridge.power + self.dataset.metadata.bridge.detection = "mixer" + self.dataset.metadata.bridge.frequency_counter = "builtin" + self.dataset.metadata.bridge.mw_frequency = self._data[ + 0 + ].metadata.bridge.mw_frequency + self.dataset.metadata.signal_channel.model = "builtin" + self.dataset.metadata.signal_channel.modulation_amplifier = "builtin" + self.dataset.metadata.signal_channel.accumulations = self._data[ + 0 + ].metadata.signal_channel.accumulations + self.dataset.metadata.signal_channel.modulation_frequency = ( self._data[0].metadata.signal_channel.modulation_frequency - self.dataset.metadata.signal_channel.phase.value = \ - self._data[0].metadata.signal_channel.phase.value - self.dataset.metadata.probehead.model = 'builtin' - self.dataset.metadata.probehead.coupling = 'critical' + ) + self.dataset.metadata.signal_channel.phase.value = self._data[ + 0 + ].metadata.signal_channel.phase.value + self.dataset.metadata.probehead.model = "builtin" + self.dataset.metadata.probehead.coupling = "critical" def _import_variable_metadata(self): temperatures = [] qfactors = [] for _, dataset_ in enumerate(self._data): temperatures.append( - dataset_.metadata.temperature_control.temperature.value) - qfactors.append( - dataset_.metadata.bridge.q_value) + dataset_.metadata.temperature_control.temperature.value + ) + qfactors.append(dataset_.metadata.bridge.q_value) temperature = self._average_and_check_for_deviation(temperatures) qfactor = self._average_and_check_for_deviation(qfactors) - self.dataset.metadata.temperature_control.temperature.value = \ + self.dataset.metadata.temperature_control.temperature.value = ( temperature - self.dataset.metadata.temperature_control.temperature.unit = \ + ) + self.dataset.metadata.temperature_control.temperature.unit = ( self._data[0].metadata.temperature_control.temperature.unit + ) self.dataset.metadata.bridge.q_value = qfactor @staticmethod @@ -680,8 +746,10 @@ def _average_and_check_for_deviation(list_of_values, offset_range=0.1): value = np.average(list_of_values) value_range = max(list_of_values) - min(list_of_values) if value_range > offset_range * value: - logger.warning('Value deviation is more than 10 % of the value ' - 'itself. Please check the measurement conditions.') + logger.warning( + "Value deviation is more than 10 % of the value " + "itself. Please check the measurement conditions." + ) # TODO: Check that logging is working with ASpecD # TODO: implement offset range in parameters, make this method # non-static? @@ -691,15 +759,15 @@ def _import_date_time_metadata(self): starts = [] ends = [] for _, dataset_ in enumerate(self._data): - starts.append( - dataset_.metadata.measurement.start) - ends.append( - dataset_.metadata.measurement.end) + starts.append(dataset_.metadata.measurement.start) + ends.append(dataset_.metadata.measurement.end) - self.dataset.metadata.measurement.start = \ - min(starts).strftime("%Y-%m-%d %H:%M:%S") - self.dataset.metadata.measurement.end = \ - max(ends).strftime("%Y-%m-%d %H:%M:%S") + self.dataset.metadata.measurement.start = min(starts).strftime( + "%Y-%m-%d %H:%M:%S" + ) + self.dataset.metadata.measurement.end = max(ends).strftime( + "%Y-%m-%d %H:%M:%S" + ) class PowerSweepImporter(aspecd.io.DatasetImporter): @@ -735,7 +803,7 @@ class PowerSweepImporter(aspecd.io.DatasetImporter): """ - def __init__(self, source=''): + def __init__(self, source=""): super().__init__(source=source) self.dataset = cwepr.dataset.ExperimentalDataset() self.filenames = None @@ -758,13 +826,12 @@ def _get_filenames(self): if not os.path.exists(self.source): raise FileNotFoundError if not self.filenames: - self.filenames = glob.glob(os.path.join(self.source, '*pow*.xml')) + self.filenames = glob.glob(os.path.join(self.source, "*pow*.xml")) def _sort_filenames(self): - - def sort_key(string=''): - num = string.split('pow_')[1] - num = num.split('mW')[0] + def sort_key(string=""): + num = string.split("pow_")[1] + num = num.split("mW")[0] return int(num) self.filenames = sorted(self.filenames, key=sort_key) @@ -777,14 +844,14 @@ def _import_all_spectra_to_list(self): importer.load_infofile = False self._data.append(cwepr.dataset.ExperimentalDataset()) self._data[num].import_from(importer) - self._power.append( - self._data[num].metadata.bridge.power) + self._power.append(self._data[num].metadata.bridge.power) # bring all measurements to the frequency of the first if num > 0: freq_correction = cwepr.processing.FrequencyCorrection() - freq_correction.parameters['frequency'] = \ - self._data[0].metadata.bridge.mw_frequency.value + freq_correction.parameters["frequency"] = self._data[ + 0 + ].metadata.bridge.mw_frequency.value self._data[num].process(freq_correction) def _bring_axes_to_same_values(self): @@ -811,8 +878,8 @@ def _fill_field_axis(self): def _fill_power_axis(self): self.dataset.data.axes[1].values = np.asarray(self._power_list) - self.dataset.data.axes[1].unit = 'mW' - self.dataset.data.axes[1].quantity = 'microwave power' + self.dataset.data.axes[1].unit = "mW" + self.dataset.data.axes[1].quantity = "microwave power" def _import_collected_metadata(self): self._import_fixed_metadata() @@ -821,68 +888,89 @@ def _import_collected_metadata(self): self._import_date_time_metadata() def _import_fixed_metadata(self): - self.dataset.metadata.experiment.type = \ - self._data[0].metadata.experiment.type - self.dataset.metadata.experiment.variable_parameter = 'Modulation ' \ - 'Amplitude' - self.dataset.metadata.signal_channel.accumulations = \ - self._data[0].metadata.experiment.runs - self.dataset.metadata.spectrometer = self._data[0].metadata.spectrometer + self.dataset.metadata.experiment.type = self._data[ + 0 + ].metadata.experiment.type + self.dataset.metadata.experiment.variable_parameter = ( + "Modulation " "Amplitude" + ) + self.dataset.metadata.signal_channel.accumulations = self._data[ + 0 + ].metadata.experiment.runs + self.dataset.metadata.spectrometer = self._data[ + 0 + ].metadata.spectrometer self.dataset.metadata.magnetic_field.start.from_string( - f"{self.dataset.data.axes[0].values[0]:.4f} " + - self._data[0].metadata.magnetic_field.start.unit) + f"{self.dataset.data.axes[0].values[0]:.4f} " + + self._data[0].metadata.magnetic_field.start.unit + ) self.dataset.metadata.magnetic_field.stop.from_string( - f"{self.dataset.data.axes[0].values[-1]:.4f} " + - self._data[0].metadata.magnetic_field.stop.unit) - self.dataset.metadata.magnetic_field.sweep_width.value = \ - self.dataset.metadata.magnetic_field.stop.value - \ - self.dataset.metadata.magnetic_field.start.value - self.dataset.metadata.magnetic_field.sweep_width.unit = \ + f"{self.dataset.data.axes[0].values[-1]:.4f} " + + self._data[0].metadata.magnetic_field.stop.unit + ) + self.dataset.metadata.magnetic_field.sweep_width.value = ( + self.dataset.metadata.magnetic_field.stop.value + - self.dataset.metadata.magnetic_field.start.value + ) + self.dataset.metadata.magnetic_field.sweep_width.unit = ( self.dataset.metadata.magnetic_field.stop.unit - self.dataset.metadata.magnetic_field.points = \ - len(self.dataset.data.axes[0].values) - self.dataset.metadata.magnetic_field.field_probe_type = 'Hall' - self.dataset.metadata.magnetic_field.field_probe_model = 'builtin' - if self.dataset.data.axes[0].values[-1] - \ - self.dataset.data.axes[0].values[0] > 0: - self.dataset.metadata.magnetic_field.sequence = 'up' + ) + self.dataset.metadata.magnetic_field.points = len( + self.dataset.data.axes[0].values + ) + self.dataset.metadata.magnetic_field.field_probe_type = "Hall" + self.dataset.metadata.magnetic_field.field_probe_model = "builtin" + if ( + self.dataset.data.axes[0].values[-1] + - self.dataset.data.axes[0].values[0] + > 0 + ): + self.dataset.metadata.magnetic_field.sequence = "up" else: - self.dataset.metadata.magnetic_field.sequence = 'down' - self.dataset.metadata.magnetic_field.controller = 'builtin' - self.dataset.metadata.magnetic_field.power_supply = 'builtin' - self.dataset.metadata.bridge.model = 'builtin' - self.dataset.metadata.bridge.controller = 'builtin' - self.dataset.metadata.bridge.power = self._data[0].metadata.bridge.power - self.dataset.metadata.bridge.detection = 'mixer' - self.dataset.metadata.bridge.frequency_counter = 'builtin' - self.dataset.metadata.bridge.mw_frequency = \ - self._data[0].metadata.bridge.mw_frequency - self.dataset.metadata.signal_channel.model = 'builtin' - self.dataset.metadata.signal_channel.modulation_amplifier = 'builtin' - self.dataset.metadata.signal_channel.accumulations = \ - self._data[0].metadata.signal_channel.accumulations - self.dataset.metadata.signal_channel.modulation_frequency = \ + self.dataset.metadata.magnetic_field.sequence = "down" + self.dataset.metadata.magnetic_field.controller = "builtin" + self.dataset.metadata.magnetic_field.power_supply = "builtin" + self.dataset.metadata.bridge.model = "builtin" + self.dataset.metadata.bridge.controller = "builtin" + self.dataset.metadata.bridge.power = self._data[ + 0 + ].metadata.bridge.power + self.dataset.metadata.bridge.detection = "mixer" + self.dataset.metadata.bridge.frequency_counter = "builtin" + self.dataset.metadata.bridge.mw_frequency = self._data[ + 0 + ].metadata.bridge.mw_frequency + self.dataset.metadata.signal_channel.model = "builtin" + self.dataset.metadata.signal_channel.modulation_amplifier = "builtin" + self.dataset.metadata.signal_channel.accumulations = self._data[ + 0 + ].metadata.signal_channel.accumulations + self.dataset.metadata.signal_channel.modulation_frequency = ( self._data[0].metadata.signal_channel.modulation_frequency - self.dataset.metadata.signal_channel.phase.value = \ - self._data[0].metadata.signal_channel.phase.value - self.dataset.metadata.probehead.model = 'builtin' - self.dataset.metadata.probehead.coupling = 'critical' + ) + self.dataset.metadata.signal_channel.phase.value = self._data[ + 0 + ].metadata.signal_channel.phase.value + self.dataset.metadata.probehead.model = "builtin" + self.dataset.metadata.probehead.coupling = "critical" def _import_variable_metadata(self): temperatures = [] qfactors = [] for _, dataset_ in enumerate(self._data): temperatures.append( - dataset_.metadata.temperature_control.temperature.value) - qfactors.append( - dataset_.metadata.bridge.q_value) + dataset_.metadata.temperature_control.temperature.value + ) + qfactors.append(dataset_.metadata.bridge.q_value) temperature = self._average_and_check_for_deviation(temperatures) qfactor = self._average_and_check_for_deviation(qfactors) - self.dataset.metadata.temperature_control.temperature.value = \ + self.dataset.metadata.temperature_control.temperature.value = ( temperature - self.dataset.metadata.temperature_control.temperature.unit = \ + ) + self.dataset.metadata.temperature_control.temperature.unit = ( self._data[0].metadata.temperature_control.temperature.unit + ) self.dataset.metadata.bridge.q_value = qfactor @staticmethod @@ -890,8 +978,10 @@ def _average_and_check_for_deviation(list_of_values, offset_range=0.1): value = np.average(list_of_values) value_range = max(list_of_values) - min(list_of_values) if value_range > offset_range * value: - logger.warning('Value deviation is more than 10 % of the value ' - 'itself. Please check the measurement conditions.') + logger.warning( + "Value deviation is more than 10 % of the value " + "itself. Please check the measurement conditions." + ) # TODO: Check that logging is working with ASpecD # TODO: implement offset range in parameters, make this method # non-static? @@ -901,12 +991,12 @@ def _import_date_time_metadata(self): starts = [] ends = [] for _, dataset_ in enumerate(self._data): - starts.append( - dataset_.metadata.measurement.start) - ends.append( - dataset_.metadata.measurement.end) - - self.dataset.metadata.measurement.start = \ - min(starts).strftime("%Y-%m-%d %H:%M:%S") - self.dataset.metadata.measurement.end = \ - max(ends).strftime("%Y-%m-%d %H:%M:%S") + starts.append(dataset_.metadata.measurement.start) + ends.append(dataset_.metadata.measurement.end) + + self.dataset.metadata.measurement.start = min(starts).strftime( + "%Y-%m-%d %H:%M:%S" + ) + self.dataset.metadata.measurement.end = max(ends).strftime( + "%Y-%m-%d %H:%M:%S" + ) diff --git a/cwepr/io/niehs.py b/cwepr/io/niehs.py index 6fd26ba..f716559 100644 --- a/cwepr/io/niehs.py +++ b/cwepr/io/niehs.py @@ -441,35 +441,44 @@ def _import_data(self): self.dataset.data.data = self._raw_data[3:] def _create_axis(self): - self._raw_metadata['center_field_mT'] = center_field = \ + self._raw_metadata["center_field_mT"] = center_field = ( self._raw_data[1] / 10 - self._raw_metadata['number_points'] = number_points = \ - int(self._raw_data[2]) - self._raw_metadata['sweep_width_mT'] = sweep_width = \ + ) + self._raw_metadata["number_points"] = number_points = int( + self._raw_data[2] + ) + self._raw_metadata["sweep_width_mT"] = sweep_width = ( self._raw_data[0] / 10 - self._raw_metadata['start'] = center_field - sweep_width / 2 - self._raw_metadata['stop'] = center_field + sweep_width / 2 - axis = np.linspace(center_field - sweep_width / 2, - center_field + sweep_width / 2, - num=number_points) + ) + self._raw_metadata["start"] = center_field - sweep_width / 2 + self._raw_metadata["stop"] = center_field + sweep_width / 2 + axis = np.linspace( + center_field - sweep_width / 2, + center_field + sweep_width / 2, + num=number_points, + ) assert axis[0] == center_field - sweep_width / 2 self.dataset.data.axes[0].values = axis def _assign_units(self): - self.dataset.data.axes[0].unit = 'mT' + self.dataset.data.axes[0].unit = "mT" def _assign_some_metadata(self): - self.dataset.metadata.magnetic_field.start.value = \ - self._raw_metadata['start'] - self.dataset.metadata.magnetic_field.stop.value = \ - self._raw_metadata['stop'] - self.dataset.metadata.magnetic_field.points = \ - self._raw_metadata['number_points'] - self.dataset.metadata.magnetic_field.sweep_width.value = \ - self._raw_metadata['sweep_width_mT'] - self.dataset.metadata.magnetic_field.sweep_width.unit = \ - self.dataset.metadata.magnetic_field.start.unit = \ - self.dataset.metadata.magnetic_field.stop.unit = 'mT' + self.dataset.metadata.magnetic_field.start.value = self._raw_metadata[ + "start" + ] + self.dataset.metadata.magnetic_field.stop.value = self._raw_metadata[ + "stop" + ] + self.dataset.metadata.magnetic_field.points = self._raw_metadata[ + "number_points" + ] + self.dataset.metadata.magnetic_field.sweep_width.value = ( + self._raw_metadata["sweep_width_mT"] + ) + self.dataset.metadata.magnetic_field.sweep_width.unit = ( + self.dataset.metadata.magnetic_field.start.unit + ) = self.dataset.metadata.magnetic_field.stop.unit = "mT" class NIEHSLmbImporter(aspecd.io.DatasetImporter): @@ -518,11 +527,11 @@ def _clean_filenames(self): def _get_raw_data(self): filename = self.source + ".lmb" - with open(filename, 'rb') as file: + with open(filename, "rb") as file: self._file_contents = file.read() def _detect_file_format(self): - self._file_format = self._file_contents[:4].decode('utf-8') + self._file_format = self._file_contents[:4].decode("utf-8") def _read_and_assign_parameters(self): parameters = [] @@ -531,9 +540,10 @@ def _read_and_assign_parameters(self): for _ in range(max_par): parameters.append( struct.unpack( - 'f', - self._file_contents[self._position: - self._position + 4])[0]) + "f", + self._file_contents[self._position : self._position + 4], + )[0] + ) self._position += 4 # Note: Only assign those parameters necessary in the given context self._parameters = { @@ -548,9 +558,10 @@ def _read_data(self): for _ in range(int(self._parameters["n_points"])): data.append( struct.unpack( - 'f', - self._file_contents[self._position: - self._position + 4])[0]) + "f", + self._file_contents[self._position : self._position + 4], + )[0] + ) self._position += 4 self.dataset.data.data = np.asarray(data) @@ -558,28 +569,39 @@ def _read_data(self): def _read_comments_and_metadata(self): comment_size = 60 self._comment = [ - self._file_contents[self._position: - self._position + comment_size - ].decode('utf-8').replace('\x00', '').strip()] + self._file_contents[ + self._position : self._position + comment_size + ] + .decode("utf-8") + .replace("\x00", "") + .strip() + ] self._position += comment_size str_num = 20 str_size = 12 strings = [] for _ in range(str_num): - strings.append(self._file_contents[ - self._position: - self._position + str_size].decode( - 'utf-8').replace('\x00', '').strip()) + strings.append( + self._file_contents[ + self._position : self._position + str_size + ] + .decode("utf-8") + .replace("\x00", "") + .strip() + ) self._position += str_size - if self._file_format == 'ESR2': + if self._file_format == "ESR2": for _ in range(2): self._comment.append( - self._file_contents[self._position: - self._position + comment_size - ].decode('utf-8').replace('\x00', - '').strip()) + self._file_contents[ + self._position : self._position + comment_size + ] + .decode("utf-8") + .replace("\x00", "") + .strip() + ) self._position += comment_size # Only those metadata of interest are mapped @@ -598,17 +620,21 @@ def _read_comments_and_metadata(self): } def _create_axis(self): - self._parameters['start'] = \ - self._parameters['center_field'] \ - - self._parameters['sweep_width'] / 2 - self._parameters['stop'] = \ - self._parameters['center_field'] \ - + self._parameters['sweep_width'] / 2 - axis = np.linspace(self._parameters['start'], - self._parameters['stop'], - num=self._parameters['n_points']) + self._parameters["start"] = ( + self._parameters["center_field"] + - self._parameters["sweep_width"] / 2 + ) + self._parameters["stop"] = ( + self._parameters["center_field"] + + self._parameters["sweep_width"] / 2 + ) + axis = np.linspace( + self._parameters["start"], + self._parameters["stop"], + num=self._parameters["n_points"], + ) self.dataset.data.axes[0].values = axis - self.dataset.data.axes[0].unit = 'mT' + self.dataset.data.axes[0].unit = "mT" def _assign_comment(self): comment_annotation = aspecd.annotation.Comment() @@ -616,17 +642,21 @@ def _assign_comment(self): self.dataset.annotate(comment_annotation) def _assign_metadata(self): - self.dataset.metadata.magnetic_field.start.value = \ + self.dataset.metadata.magnetic_field.start.value = ( self.dataset.data.axes[0].values[0] - self.dataset.metadata.magnetic_field.stop.value = \ + ) + self.dataset.metadata.magnetic_field.stop.value = ( self.dataset.data.axes[0].values[-1] - self.dataset.metadata.magnetic_field.sweep_width.value = \ + ) + self.dataset.metadata.magnetic_field.sweep_width.value = ( self._parameters["sweep_width"] - self.dataset.metadata.magnetic_field.start.unit = \ - self.dataset.metadata.magnetic_field.stop.unit = \ - self.dataset.metadata.magnetic_field.sweep_width.unit = "mT" - self.dataset.metadata.signal_channel.accumulations = \ - self._metadata["n_scans"] + ) + self.dataset.metadata.magnetic_field.start.unit = ( + self.dataset.metadata.magnetic_field.stop.unit + ) = self.dataset.metadata.magnetic_field.sweep_width.unit = "mT" + self.dataset.metadata.signal_channel.accumulations = self._metadata[ + "n_scans" + ] class NIEHSExpImporter(aspecd.io.DatasetImporter): @@ -677,37 +707,39 @@ def _clean_filenames(self): def _read_file_contents(self): filename = self.source + ".exp" - with open(filename, 'r', encoding='ascii') as file: + with open(filename, "r", encoding="ascii") as file: self._file_contents = file.read() self._lines = self._file_contents.splitlines() def _detect_file_format(self): - if self._lines[0].startswith('['): - self._file_format = 'with_blocks' + if self._lines[0].startswith("["): + self._file_format = "with_blocks" else: - self._file_format = 'DSV' + self._file_format = "DSV" def _import_data(self): - if self._file_format == 'with_blocks': + if self._file_format == "with_blocks": # noinspection PyTypeChecker self._raw_data = np.loadtxt( io.StringIO(self._file_contents), - skiprows=self._lines.index('[DATA]') + 1) - self._header = self._lines[:self._lines.index('[DATA]')] + skiprows=self._lines.index("[DATA]") + 1, + ) + self._header = self._lines[: self._lines.index("[DATA]")] else: # noinspection PyTypeChecker self._raw_data = np.loadtxt(io.StringIO(self._file_contents)) def _assign_data_and_axis(self): self.dataset.data.axes[0].values = self._raw_data[:, 0] / 10 - self.dataset.data.axes[0].unit = 'mT' + self.dataset.data.axes[0].unit = "mT" self.dataset.data.data = self._raw_data[:, 1] def _assign_comment(self): if self._header: # Remove empty lines and trailing spaces - self._header = [element.rstrip() for element in self._header - if element != ''] + self._header = [ + element.rstrip() for element in self._header if element != "" + ] comment_annotation = aspecd.annotation.Comment() comment_annotation.comment = self._header self.dataset.annotate(comment_annotation) diff --git a/cwepr/io/txt_file.py b/cwepr/io/txt_file.py index 3e48383..1702359 100644 --- a/cwepr/io/txt_file.py +++ b/cwepr/io/txt_file.py @@ -120,10 +120,10 @@ class TxtImporter(aspecd.io.TxtImporter): """ - def __init__(self, source=''): + def __init__(self, source=""): super().__init__(source=source) # public properties - self.extension = '.txt' + self.extension = ".txt" self.parameters["skiprows"] = 0 self.parameters["delimiter"] = None self.parameters["comments"] = "#" @@ -135,19 +135,19 @@ def _import(self): self._create_metadata() def _get_extension(self): - if '.' in self.source: - extension = self.source[self.source.rfind('.'):] + if "." in self.source: + extension = self.source[self.source.rfind(".") :] else: extension = None if extension: self.extension = extension - self.source = self.source[:self.source.rfind('.')] + self.source = self.source[: self.source.rfind(".")] self.source += self.extension def _create_metadata(self): - self.dataset.data.axes[0].unit = 'mT' - self.dataset.data.axes[0].quantity = 'magnetic field' - self.dataset.data.axes[1].quantity = 'intensity' + self.dataset.data.axes[0].unit = "mT" + self.dataset.data.axes[0].quantity = "magnetic field" + self.dataset.data.axes[1].quantity = "intensity" class CsvImporter(TxtImporter): @@ -174,8 +174,8 @@ class CsvImporter(TxtImporter): """ - def __init__(self, source=''): + def __init__(self, source=""): super().__init__(source=source) # public properties - self.extension = '.csv' - self.parameters["delimiter"] = ',' + self.extension = ".csv" + self.parameters["delimiter"] = "," diff --git a/cwepr/metadata.py b/cwepr/metadata.py index c232f12..f2c9d40 100644 --- a/cwepr/metadata.py +++ b/cwepr/metadata.py @@ -34,7 +34,9 @@ from cwepr.exceptions import UnequalUnitsError -class ExperimentalDatasetMetadata(aspecd.metadata.ExperimentalDatasetMetadata): +class ExperimentalDatasetMetadata( + aspecd.metadata.ExperimentalDatasetMetadata +): """Set of all metadata for a dataset object. Metadata as a unified structure of information coupled to the dataset are @@ -129,7 +131,7 @@ class Measurement(aspecd.metadata.Measurement): def __init__(self, dict_=None): # public properties - self.label = '' + self.label = "" super().__init__(dict_=dict_) @@ -163,10 +165,10 @@ class Sample(aspecd.metadata.Sample): def __init__(self, dict_=None): # public properties - self.description = '' - self.solvent = '' - self.preparation = '' - self.tube = '' + self.description = "" + self.solvent = "" + self.preparation = "" + self.tube = "" super().__init__(dict_=dict_) @@ -247,13 +249,13 @@ def can_calculate(self): sector_par = 0 step_def = False step_par = 0 - if self.start.value != 0.: + if self.start.value != 0.0: sector_def = True sector_par += 1 - if self.stop.value != 0.: + if self.stop.value != 0.0: sector_def = True sector_par += 1 - if self.sweep_width.value != 0.: + if self.sweep_width.value != 0.0: sector_par += 1 step_par += 1 if self.points != 0: @@ -290,19 +292,19 @@ def calculate_values(self): self._calc_step_data(units_error_message) def _calc_field_width(self, units_error_message): - if self.sweep_width.value == 0.: + if self.sweep_width.value == 0.0: if self.stop.unit != self.start.unit: raise UnequalUnitsError(units_error_message) self.sweep_width.value = self.stop.value - self.start.value self.sweep_width.unit = self.stop.unit def _calc_field_limits(self, units_error_message): - if self.stop.value == 0.: + if self.stop.value == 0.0: if self.sweep_width.unit != self.start.unit: raise UnequalUnitsError(units_error_message) self.stop.value = self.start.value + self.sweep_width.value self.stop.unit = self.start.unit - if self.start.value == 0.: + if self.start.value == 0.0: if self.stop.unit != self.sweep_width.unit: raise UnequalUnitsError(units_error_message) self.start.value = self.stop.value - self.sweep_width.value @@ -312,17 +314,24 @@ def _calc_step_data(self, units_error_message): if self.points == 0: if self.sweep_width.unit != self.step_width.unit: raise UnequalUnitsError(units_error_message) - self.points = int(round((self.sweep_width.value / - self.step_width.value), 0)) + 1 - if self.step_width.value == 0.: - self.step_width.value = \ - self.sweep_width.value / (self.points - 1) + self.points = ( + int( + round((self.sweep_width.value / self.step_width.value), 0) + ) + + 1 + ) + if self.step_width.value == 0.0: + self.step_width.value = self.sweep_width.value / (self.points - 1) self.step_width.unit = self.stop.unit def gauss_to_millitesla(self): """Transform magnetic field parameters from gauss to millitesla.""" - for quantity in [self.start, self.stop, - self.sweep_width, self.step_width]: + for quantity in [ + self.start, + self.stop, + self.sweep_width, + self.step_width, + ]: quantity.value /= 10 quantity.unit = "mT" @@ -643,6 +652,6 @@ class TemperatureControl(aspecd.metadata.TemperatureControl): def __init__(self, dict_=None): # public properties - self.cryostat = '' - self.cryogen = '' + self.cryostat = "" + self.cryogen = "" super().__init__(dict_=dict_) diff --git a/cwepr/plotting.py b/cwepr/plotting.py index 70bf09d..894b567 100644 --- a/cwepr/plotting.py +++ b/cwepr/plotting.py @@ -203,21 +203,29 @@ class GoniometerSweepPlotter(aspecd.plotting.SingleCompositePlotter): def __init__(self): super().__init__() - self.description = 'Plot for one goniometric dataset in different ' \ - 'representations.' + self.description = ( + "Plot for one goniometric dataset in different " + "representations." + ) self.grid_dimensions = [2, 2] self.subplot_locations = [[0, 0, 1, 1], [1, 0, 1, 1], [0, 1, 2, 1]] - self.plotter = [aspecd.plotting.SinglePlotter2D(), - aspecd.plotting.MultiPlotter1D(), - aspecd.plotting.SinglePlotter2DStacked()] - self.axes_positions = [[0, 0.15, 1, 1], [0, 0, 1, 1], - [0.25, 0, 0.9, 1.07]] + self.plotter = [ + aspecd.plotting.SinglePlotter2D(), + aspecd.plotting.MultiPlotter1D(), + aspecd.plotting.SinglePlotter2DStacked(), + ] + self.axes_positions = [ + [0, 0.15, 1, 1], + [0, 0, 1, 1], + [0.25, 0, 0.9, 1.07], + ] self.zero_deg_slice = None self.hundredeighty_deg_slice = None - self.parameters['show_zero_lines'] = False - self.__kind__ = 'singleplot' - self._exclude_from_to_dict.extend(['dataset', 'zero_deg_slice', - 'hundredeighty_deg_slice']) + self.parameters["show_zero_lines"] = False + self.__kind__ = "singleplot" + self._exclude_from_to_dict.extend( + ["dataset", "zero_deg_slice", "hundredeighty_deg_slice"] + ) def _create_plot(self): self._configure_traces_plotter() @@ -228,29 +236,27 @@ def _create_plot(self): def _configure_contour_plotter(self): upper_contour = self.plotter[0] - upper_contour.type = 'contourf' - upper_contour.parameters['show_contour_lines'] = True - upper_contour.properties.from_dict({ - 'axes': { - 'yticks': [0, 30, 60, 90, 120, 150, 180] - } - }) + upper_contour.type = "contourf" + upper_contour.parameters["show_contour_lines"] = True + upper_contour.properties.from_dict( + {"axes": {"yticks": [0, 30, 60, 90, 120, 150, 180]}} + ) self.plotter[0] = upper_contour def _extract_traces(self): slicing = aspecd.processing.SliceExtraction() - slicing.parameters['axis'] = axis_no = 1 + slicing.parameters["axis"] = axis_no = 1 zero_value = self._get_angle_closest_to_value(axis_no, 0) hundredeighty_value = self._get_angle_closest_to_value(axis_no, 180) - slicing.parameters['unit'] = 'axis' - slicing.parameters['position'] = zero_value + slicing.parameters["unit"] = "axis" + slicing.parameters["position"] = zero_value self.zero_deg_slice = copy.deepcopy(self.dataset) self.zero_deg_slice.process(slicing) - self.zero_deg_slice.label = f'{zero_value:.1f}°' - slicing.parameters['position'] = hundredeighty_value + self.zero_deg_slice.label = f"{zero_value:.1f}°" + slicing.parameters["position"] = hundredeighty_value self.hundredeighty_deg_slice = copy.deepcopy(self.dataset) self.hundredeighty_deg_slice.process(slicing) - self.hundredeighty_deg_slice.label = f'{hundredeighty_value:.1f}°' + self.hundredeighty_deg_slice.label = f"{hundredeighty_value:.1f}°" def _get_angle_closest_to_value(self, axis_no=0, value=None): axis = self.dataset.data.axes[axis_no].values @@ -258,24 +264,22 @@ def _get_angle_closest_to_value(self, axis_no=0, value=None): def _configure_comparison_plotter(self): comparison_plotter = self.plotter[1] - comparison_plotter.datasets = [self.zero_deg_slice, - self.hundredeighty_deg_slice] - comparison_plotter.properties.from_dict({ - 'drawings': [ - {'color': 'tab:blue'}, - {'color': 'tab:red'} - ], - 'axes': { - 'yticks': [], - 'ylabel': r'$EPR\ intensity$' + comparison_plotter.datasets = [ + self.zero_deg_slice, + self.hundredeighty_deg_slice, + ] + comparison_plotter.properties.from_dict( + { + "drawings": [{"color": "tab:blue"}, {"color": "tab:red"}], + "axes": {"yticks": [], "ylabel": r"$EPR\ intensity$"}, } - }) - comparison_plotter.parameters['show_legend'] = True + ) + comparison_plotter.parameters["show_legend"] = True self.plotter[1] = comparison_plotter def _configure_traces_plotter(self): - self.plotter[2].parameters['yticklabelformat'] = '%.1f' - self.plotter[2].parameters['ytickcount'] = 19 + self.plotter[2].parameters["yticklabelformat"] = "%.1f" + self.plotter[2].parameters["ytickcount"] = 19 class PowerSweepAnalysisPlotter(aspecd.plotting.MultiPlotter1D): @@ -371,12 +375,12 @@ class PowerSweepAnalysisPlotter(aspecd.plotting.MultiPlotter1D): def __init__(self): super().__init__() - self.parameters['mw-axis'] = True - self.parameters['tight_layout'] = True + self.parameters["mw-axis"] = True + self.parameters["tight_layout"] = True def _create_plot(self): super()._create_plot() - if self.parameters['mw-axis']: + if self.parameters["mw-axis"]: self._set_lower_xlim() self._create_power_axis() @@ -393,15 +397,17 @@ def _create_power_axis(self): Note that :func:`numpy.sqrt` returns NaN for negative values. Therefore, the lower axis limit is set to be >= 0 in this plot. """ + def forward(values): return np.power(values, 2) def backward(values): return np.sqrt(values) - power_axis = self.ax.secondary_xaxis('top', - functions=(backward, forward)) - power_axis.set_xlabel('$mw\\ power$') + power_axis = self.ax.secondary_xaxis( + "top", functions=(backward, forward) + ) + power_axis.set_xlabel("$mw\\ power$") power_axis.tick_params(labelrotation=90) @@ -442,7 +448,7 @@ class PlotterExtensions: """ def __init__(self): - self.parameters['g-axis'] = False + self.parameters["g-axis"] = False def _create_g_axis(self, mw_freq=None): """ @@ -459,14 +465,15 @@ def _create_g_axis(self, mw_freq=None): microwave frequency (**in GHz**) used to convert from mT to g """ + def forward(values): return utils.convert_mT2g(values, mw_freq=mw_freq) def backward(values): return utils.convert_g2mT(values, mw_freq=mw_freq) - gaxis = self.ax.secondary_xaxis('top', functions=(backward, forward)) - gaxis.set_xlabel(r'$g\ value$') + gaxis = self.ax.secondary_xaxis("top", functions=(backward, forward)) + gaxis.set_xlabel(r"$g\ value$") class SinglePlotter1D(aspecd.plotting.SinglePlotter1D, PlotterExtensions): @@ -524,8 +531,10 @@ class for details. def _create_plot(self): super()._create_plot() - if self.parameters['g-axis'] and self.data.axes[0].unit == 'mT': - self._create_g_axis(self.dataset.metadata.bridge.mw_frequency.value) + if self.parameters["g-axis"] and self.data.axes[0].unit == "mT": + self._create_g_axis( + self.dataset.metadata.bridge.mw_frequency.value + ) class SinglePlotter2D(aspecd.plotting.SinglePlotter2D, PlotterExtensions): @@ -630,12 +639,15 @@ class for details. def _create_plot(self): super()._create_plot() - if self.parameters['g-axis'] and self.data.axes[0].unit == 'mT': - self._create_g_axis(self.dataset.metadata.bridge.mw_frequency.value) + if self.parameters["g-axis"] and self.data.axes[0].unit == "mT": + self._create_g_axis( + self.dataset.metadata.bridge.mw_frequency.value + ) -class SinglePlotter2DStacked(aspecd.plotting.SinglePlotter2DStacked, - PlotterExtensions): +class SinglePlotter2DStacked( + aspecd.plotting.SinglePlotter2DStacked, PlotterExtensions +): """Stacked plots of 2D data. A stackplot creates a series of lines stacked on top of each other from @@ -722,8 +734,10 @@ class for details. def _create_plot(self): super()._create_plot() - if self.parameters['g-axis'] and self.data.axes[0].unit == 'mT': - self._create_g_axis(self.dataset.metadata.bridge.mw_frequency.value) + if self.parameters["g-axis"] and self.data.axes[0].unit == "mT": + self._create_g_axis( + self.dataset.metadata.bridge.mw_frequency.value + ) class MultiPlotter1D(aspecd.plotting.MultiPlotter1D, PlotterExtensions): @@ -804,14 +818,15 @@ class for details. def _create_plot(self): super()._create_plot() - if self.parameters['g-axis'] \ - and self.data[0].axes[0].unit == 'mT': + if self.parameters["g-axis"] and self.data[0].axes[0].unit == "mT": self._create_g_axis( - self.datasets[0].metadata.bridge.mw_frequency.value) + self.datasets[0].metadata.bridge.mw_frequency.value + ) -class MultiPlotter1DStacked(aspecd.plotting.MultiPlotter1DStacked, - PlotterExtensions): +class MultiPlotter1DStacked( + aspecd.plotting.MultiPlotter1DStacked, PlotterExtensions +): """Stacked 1D plots of multiple datasets. Convenience class taking care of 1D plots of multiple datasets. @@ -902,7 +917,7 @@ class for details. def _create_plot(self): super()._create_plot() - if self.parameters['g-axis'] \ - and self.data[0].axes[0].unit == 'mT': + if self.parameters["g-axis"] and self.data[0].axes[0].unit == "mT": self._create_g_axis( - self.datasets[0].metadata.bridge.mw_frequency.value) + self.datasets[0].metadata.bridge.mw_frequency.value + ) diff --git a/cwepr/processing.py b/cwepr/processing.py index e8637c0..3198daf 100644 --- a/cwepr/processing.py +++ b/cwepr/processing.py @@ -468,12 +468,14 @@ def _perform_task(self): # TODO: Question: Better check for quantity rather than unit? # (Difficult if not filled) # if axis.quantity == 'magnetic field' - if axis.unit in ('mT', 'G'): + if axis.unit in ("mT", "G"): axis.values += self.parameters["offset"] - self.dataset.metadata.magnetic_field.start.value = \ + self.dataset.metadata.magnetic_field.start.value = ( axis.values[0] - self.dataset.metadata.magnetic_field.stop.value = \ - axis.values[-1] + ) + self.dataset.metadata.magnetic_field.stop.value = axis.values[ + -1 + ] class FrequencyCorrection(aspecd.processing.SingleProcessingStep): @@ -535,36 +537,38 @@ class FrequencyCorrection(aspecd.processing.SingleProcessingStep): def __init__(self): super().__init__() self.parameters["frequency"] = 9.5 - self.parameters['kind'] = 'proportional' + self.parameters["kind"] = "proportional" self.description = "Correct magnetic field axis for given frequency" @staticmethod def applicable(dataset): """Check applicability.""" if not dataset.metadata.bridge.mw_frequency.value: - message = 'No frequency given in dataset' + message = "No frequency given in dataset" warnings.warn(message=message) return False return True def _sanitise_parameters(self): - if isinstance(self.parameters['frequency'], int): - self.parameters['frequency'] = float(self.parameters['frequency']) + if isinstance(self.parameters["frequency"], int): + self.parameters["frequency"] = float(self.parameters["frequency"]) def _perform_task(self): """Perform the actual transformation / correction.""" - nu_target = self.parameters['frequency'] + nu_target = self.parameters["frequency"] for axis in self.dataset.data.axes: # TODO: Question: Better check for quantity rather than unit? # (Difficult if not filled) # if axis.quantity == 'magnetic field' - if axis.unit in ('mT', 'G'): - if 'proportional' in self.parameters['kind'].lower(): - axis.values = self._correct_proportionally(nu_target, - axis.values) - elif 'offset' in self.parameters['kind'].lower(): - axis.values = self._correct_with_offset(nu_target, - axis.values) + if axis.unit in ("mT", "G"): + if "proportional" in self.parameters["kind"].lower(): + axis.values = self._correct_proportionally( + nu_target, axis.values + ) + elif "offset" in self.parameters["kind"].lower(): + axis.values = self._correct_with_offset( + nu_target, axis.values + ) self._write_new_frequency() def _correct_proportionally(self, nu_target=None, b_initial=None): @@ -592,13 +596,16 @@ def _correct_proportionally(self, nu_target=None, b_initial=None): def _correct_with_offset(self, nu_target=None, axis=None): point_to_correct = axis[round(len(axis) / 2)] nu_initial = self.dataset.metadata.bridge.mw_frequency.value - offset = point_to_correct - (nu_target / nu_initial) * point_to_correct + offset = ( + point_to_correct - (nu_target / nu_initial) * point_to_correct + ) b_target = axis + offset return b_target def _write_new_frequency(self): - self.dataset.metadata.bridge.mw_frequency.value = \ - self.parameters['frequency'] + self.dataset.metadata.bridge.mw_frequency.value = self.parameters[ + "frequency" + ] class GAxisCreation(aspecd.processing.SingleProcessingStep): @@ -627,11 +634,11 @@ def __init__(self): def _perform_task(self): for axis in self.dataset.data.axes: - if axis.unit == 'mT': + if axis.unit == "mT": mw_freq = self.dataset.metadata.bridge.mw_frequency.value axis.values = utils.convert_mT2g(axis.values, mw_freq=mw_freq) - axis.unit = '' - axis.quantity = 'g value' + axis.unit = "" + axis.quantity = "g value" class AutomaticPhaseCorrection(aspecd.processing.SingleProcessingStep): @@ -652,9 +659,9 @@ def __init__(self): super().__init__() # Public properties self.description = "Automatic phase correction via Hilbert transform" - self.parameters['order'] = 1 - self.parameters['points_percentage'] = 10 - self.parameters['phase_angle'] = 0 + self.parameters["order"] = 1 + self.parameters["points_percentage"] = 10 + self.parameters["phase_angle"] = 0 # private properties self._analytic_signal = None self._points_per_side = None @@ -670,11 +677,12 @@ def _perform_task(self): def _find_initial_negative_area(self): """Get area/values below zero as indicator of the phase deviation.""" ft_sig_tmp = self._analytic_signal - if self.parameters['order'] > 0: + if self.parameters["order"] > 0: # pylint: disable=unused-variable - for j in range(self.parameters['order']): # integrate j times - ft_sig_tmp = scipy.integrate.cumtrapz(self._analytic_signal, - initial=0) + for j in range(self.parameters["order"]): # integrate j times + ft_sig_tmp = scipy.integrate.cumtrapz( + self._analytic_signal, initial=0 + ) ft_sig_tmp = self._baseline_correction(signal=np.real(ft_sig_tmp)) elements_inf_zero = [x for x in ft_sig_tmp if x < 0] self._area_under_curve = abs(np.trapz(elements_inf_zero)) @@ -684,14 +692,17 @@ def _baseline_correction(self, signal=None): signal_size = signal.size if len(signal.shape) > 1 and signal.shape[1] != 1: signal.transpose() - self._points_per_side = \ - int(np.ceil((self.parameters['points_percentage'] / 100) * - signal_size)) + self._points_per_side = int( + np.ceil( + (self.parameters["points_percentage"] / 100) * signal_size + ) + ) data_parts = self._extract_points(signal) x_axis_parts = self._extract_points(self.dataset.data.axes[0].values) - coefficients = np.polyfit(x_axis_parts, data_parts, - deg=self.parameters['order']) + coefficients = np.polyfit( + x_axis_parts, data_parts, deg=self.parameters["order"] + ) baseline = np.polyval(coefficients, self.dataset.data.axes[0].values) corrected_signal = signal - baseline @@ -701,8 +712,12 @@ def _baseline_correction(self, signal=None): def _extract_points(self, values): # pylint: disable=invalid-unary-operand-type - vector_parts = np.concatenate([values[:self._points_per_side], - values[-self._points_per_side:]]) + vector_parts = np.concatenate( + [ + values[: self._points_per_side], + values[-self._points_per_side :], + ] + ) return vector_parts def _find_best_phase(self): @@ -712,27 +727,31 @@ def _find_best_phase(self): angles = np.linspace(min_angle, max_angle, num=181) for angle in angles: rotated_signal = np.exp(1j * angle) * self._analytic_signal - if self.parameters['order'] > 0: + if self.parameters["order"] > 0: # pylint: disable=unused-variable - for j in range(self.parameters['order']): - rotated_signal = scipy.integrate.cumtrapz(rotated_signal, - initial=0) - rotated_signal = self._baseline_correction(signal=np.real( - rotated_signal)) + for j in range(self.parameters["order"]): + rotated_signal = scipy.integrate.cumtrapz( + rotated_signal, initial=0 + ) + rotated_signal = self._baseline_correction( + signal=np.real(rotated_signal) + ) elements_inf_zero = [x for x in rotated_signal if x < 0] area = abs(np.trapz(elements_inf_zero)) if area < self._area_under_curve: self._area_under_curve = area - self.parameters['phase_angle'] = angle + self.parameters["phase_angle"] = angle def _reconstruct_real_signal(self): - self.dataset.data.data = np.real(np.exp(1j * self.parameters[ - 'phase_angle']) * self._analytic_signal) + self.dataset.data.data = np.real( + np.exp(1j * self.parameters["phase_angle"]) + * self._analytic_signal + ) assert not np.iscomplex(self.dataset.data.data).all() def _print_results_to_command_line(self): - phi_degree = self.parameters['phase_angle'] * 180 / np.pi - print(f'Phase correction was done with phi = {phi_degree:.3f} degree') + phi_degree = self.parameters["phase_angle"] * 180 / np.pi + print(f"Phase correction was done with phi = {phi_degree:.3f} degree") class NormalisationOfDerivativeToArea(aspecd.processing.SingleProcessingStep): @@ -758,8 +777,9 @@ def _perform_task(self): self.dataset.data.data /= self._area def _integrate_spectrum(self): - integrated_spectrum = \ - scipy.integrate.cumtrapz(self.dataset.data.data, initial=0) + integrated_spectrum = scipy.integrate.cumtrapz( + self.dataset.data.data, initial=0 + ) self._area = np.trapz(integrated_spectrum) @@ -836,16 +856,19 @@ class Normalisation(aspecd.processing.Normalisation): """ def _perform_task(self): - if 'receiver' in self.parameters["kind"].lower(): + if "receiver" in self.parameters["kind"].lower(): self._normalise_for_receiver_gain() - elif 'scan_number' in self.parameters["kind"].lower(): - self.dataset.data.data \ - /= self.dataset.metadata.signal_channel.accumulations + elif "scan_number" in self.parameters["kind"].lower(): + self.dataset.data.data /= ( + self.dataset.metadata.signal_channel.accumulations + ) else: super()._perform_task() def _normalise_for_receiver_gain(self): - receiver_gain = self.dataset.metadata.signal_channel.receiver_gain.value + receiver_gain = ( + self.dataset.metadata.signal_channel.receiver_gain.value + ) receiver_gain_value = 10 ** (receiver_gain / 20) self.dataset.data.data /= receiver_gain_value @@ -865,29 +888,32 @@ class AxisInterpolation(aspecd.processing.SingleProcessingStep): def __init__(self): super().__init__() - self.description = 'Interpolate axis to get equidistant points.' - self.parameters['points'] = None + self.description = "Interpolate axis to get equidistant points." + self.parameters["points"] = None def _perform_task(self): for num, axis in enumerate(self.dataset.data.axes): if not axis.equidistant: - if not self.parameters['points']: + if not self.parameters["points"]: self._get_axis_length(ax_nr=num) self._interpolate_axis(num) break def _interpolate_axis(self, ax_number=None): - points = self.parameters['points'] + points = self.parameters["points"] # Actual interpolation start = self.dataset.metadata.magnetic_field.start.value stop = self.dataset.metadata.magnetic_field.stop.value new_x_axis = np.linspace(start, stop, num=points) - self.dataset.data.data = np.interp(new_x_axis, self.dataset.data.axes[ - ax_number].values, self.dataset.data.data) + self.dataset.data.data = np.interp( + new_x_axis, + self.dataset.data.axes[ax_number].values, + self.dataset.data.data, + ) self.dataset.data.axes[ax_number].values = new_x_axis def _get_axis_length(self, ax_nr): - self.parameters['points'] = len(self.dataset.data.axes[ax_nr].values) + self.parameters["points"] = len(self.dataset.data.axes[ax_nr].values) class ScalarAlgebra(aspecd.processing.ScalarAlgebra): diff --git a/cwepr/report.py b/cwepr/report.py index 75b5cfd..3cde8c9 100644 --- a/cwepr/report.py +++ b/cwepr/report.py @@ -77,14 +77,14 @@ class ExperimentalDatasetLaTeXReporter(aspecd.report.LaTeXReporter): """Report implementation for cwepr module.""" - def __init__(self, template='', filename=''): + def __init__(self, template="", filename=""): super().__init__(template=template, filename=filename) self.dataset = cwepr.dataset.ExperimentalDataset() # private properties self._metadata = {} self._tasks = collections.OrderedDict() self._figure_name = {} - self._exclude_from_to_dict.extend(['dataset']) + self._exclude_from_to_dict.extend(["dataset"]) def create(self): """Perform all methods to generate a report.""" @@ -96,25 +96,26 @@ def create(self): super().create() def _prepare_metadata(self): - self._metadata = self.context['dataset']['metadata'] - self._metadata['parameter'] = collections.OrderedDict() + self._metadata = self.context["dataset"]["metadata"] + self._metadata["parameter"] = collections.OrderedDict() self._collect_experimental_parameters() def _collect_experimental_parameters(self): """Collect all the metadata keys.""" for key in self._metadata.keys(): - if key not in ['sample', 'measurement', 'parameter']: - self._metadata['parameter'][key] = \ - self._metadata[key] + if key not in ["sample", "measurement", "parameter"]: + self._metadata["parameter"][key] = self._metadata[key] def _get_tasks(self): for task in self.dataset.tasks: - if task['kind'] in ('analysis', 'processing'): - self._tasks[(getattr(task['task'], - task['kind']).description)] = { - 'Parameters': getattr(task['task'], - task['kind']).parameters, - 'Comment': getattr(task['task'], task['kind']).comment + if task["kind"] in ("analysis", "processing"): + self._tasks[ + (getattr(task["task"], task["kind"]).description) + ] = { + "Parameters": getattr( + task["task"], task["kind"] + ).parameters, + "Comment": getattr(task["task"], task["kind"]).comment, } def _get_tasks_recursively(self, dict_=None): @@ -136,31 +137,31 @@ def _get_tasks_recursively(self, dict_=None): """ for task in dict_.tasks: - if task['kind'] == 'annotation': + if task["kind"] == "annotation": continue - if task['kind'] == 'analysis': - if isinstance(task['task'].analysis.result, - aspecd.dataset.CalculatedDataset): - self._get_tasks_recursively(task['task'].analysis.result) - - self._tasks[(getattr(task['task'], - task['kind']).description)] = { - 'Parameters': getattr(task['task'], - task['kind']).parameters, - 'Comment': getattr(task['task'], task['kind']).comment, + if task["kind"] == "analysis": + if isinstance( + task["task"].analysis.result, + aspecd.dataset.CalculatedDataset, + ): + self._get_tasks_recursively(task["task"].analysis.result) + + self._tasks[(getattr(task["task"], task["kind"]).description)] = { + "Parameters": getattr(task["task"], task["kind"]).parameters, + "Comment": getattr(task["task"], task["kind"]).comment, } def _create_context(self): """Create a dictionary containing all data to write the report.""" - self.context['TASKS'] = self._tasks - self.context['METADATA'] = self._metadata - self.context['FIGURENAMES'] = self.includes + self.context["TASKS"] = self._tasks + self.context["METADATA"] = self._metadata + self.context["FIGURENAMES"] = self.includes def _sanitise_context(self, dict_=None): """Removes corresponding keys to empty values from context.""" tmp_dict = copy.deepcopy(dict_) for key, value in dict_.items(): - if key == 'dataset': + if key == "dataset": continue if isinstance(value, (collections.OrderedDict, dict)): tmp_dict[key] = self._sanitise_context(value) @@ -172,14 +173,20 @@ def _sanitise_context(self, dict_=None): def _get_figure_names(self): """Get the names of the figures used for the report.""" for i, _ in enumerate(self.dataset.representations): - if self.dataset.representations[i].plot.description \ - == '2D plot as scaled image.': - self._figure_name['Figure2D'] = \ - self.dataset.representations[i].plot.filename - elif self.dataset.representations[i].plot.description \ - == '1D line plot.': - self._figure_name['Figure1D'] = \ - self.dataset.representations[i].plot.filename + if ( + self.dataset.representations[i].plot.description + == "2D plot as scaled image." + ): + self._figure_name["Figure2D"] = self.dataset.representations[ + i + ].plot.filename + elif ( + self.dataset.representations[i].plot.description + == "1D line plot." + ): + self._figure_name["Figure1D"] = self.dataset.representations[ + i + ].plot.filename else: pass @@ -187,13 +194,13 @@ def _get_figure_names(self): class PowerSweepAnalysisReporter(aspecd.report.LaTeXReporter): """Create report for power sweep analysis.""" - def __init__(self, template='', filename=''): + def __init__(self, template="", filename=""): super().__init__(template=template, filename=filename) self.dataset = cwepr.dataset.ExperimentalDataset() # private properties self._metadata = {} self._tasks = {} - self._exclude_from_to_dict.extend(['dataset']) + self._exclude_from_to_dict.extend(["dataset"]) def create(self): """Perform all methods to generate a report. @@ -206,51 +213,50 @@ def create(self): """ self._prepare_metadata() - #self._get_tasks() + # self._get_tasks() # TODO: Put figurenames in a dict instead of a list? - #self._get_figure_names() + # self._get_figure_names() self._create_context() self.context = self._sanitise_context(self.context) super().create() def _prepare_metadata(self): - self._metadata = self.context['dataset']['metadata'] - self._metadata['parameter'] = collections.OrderedDict() + self._metadata = self.context["dataset"]["metadata"] + self._metadata["parameter"] = collections.OrderedDict() self._collect_experimental_parameters() def _collect_experimental_parameters(self): """Collect all the metadata keys.""" for key in self._metadata.keys(): - if key not in ['sample', 'measurement', 'parameter']: - self._metadata['parameter'][key] = \ - self._metadata[key] + if key not in ["sample", "measurement", "parameter"]: + self._metadata["parameter"][key] = self._metadata[key] def _create_context(self): """Create a dictionary containing all data to write the report.""" - self.context['TASKS'] = self._tasks - self.context['METADATA'] = self._metadata - self.context['FIGURENAMES'] = self.includes + self.context["TASKS"] = self._tasks + self.context["METADATA"] = self._metadata + self.context["FIGURENAMES"] = self.includes def _get_tasks(self): - for task, _ in enumerate(self.context['dataset']['tasks']): - task = self.context['dataset']['tasks'][task] - if task['kind'] in ('analysis', 'processing'): - self._tasks[task['task'][task['kind']]['description']] = { - 'Parameters': task['task'][task['kind']]['parameters'], - 'Comment': task['task'][task['kind']]['comment'] + for task, _ in enumerate(self.context["dataset"]["tasks"]): + task = self.context["dataset"]["tasks"][task] + if task["kind"] in ("analysis", "processing"): + self._tasks[task["task"][task["kind"]]["description"]] = { + "Parameters": task["task"][task["kind"]]["parameters"], + "Comment": task["task"][task["kind"]]["comment"], } - fit = self.context['FITTING'] - fit_coeffs = fit.metadata.calculation.parameters['coefficients'] - further_tasks = self.context['CALCDATA'].tasks - self._tasks[further_tasks[0]['task'].analysis.description] = { - 'Parameters': str(fit_coeffs), - 'Comment': further_tasks[0]['task'].analysis.comment + fit = self.context["FITTING"] + fit_coeffs = fit.metadata.calculation.parameters["coefficients"] + further_tasks = self.context["CALCDATA"].tasks + self._tasks[further_tasks[0]["task"].analysis.description] = { + "Parameters": str(fit_coeffs), + "Comment": further_tasks[0]["task"].analysis.comment, } def _sanitise_context(self, dict_=None): tmp_dict = copy.deepcopy(dict_) for key, value in dict_.items(): - if key == 'dataset': + if key == "dataset": continue if isinstance(value, (collections.OrderedDict, dict)): tmp_dict[key] = self._sanitise_context(value) @@ -269,16 +275,16 @@ class DokuwikiCaptionsReporter(aspecd.report.Reporter): has to be inserted. """ - def __init__(self, template='', filename=''): + def __init__(self, template="", filename=""): self.filename = filename - self.language = 'de' + self.language = "de" self.template = template if template else self._get_template() super().__init__(template=self.template, filename=self.filename) self.dataset = cwepr.dataset.ExperimentalDataset() # private properties self._metadata = {} self._figure_name = {} - self._exclude_from_to_dict.extend(['dataset']) + self._exclude_from_to_dict.extend(["dataset"]) def create(self): """Perform all methods to create captions.""" @@ -289,23 +295,26 @@ def create(self): def _get_template(self): language = self.language module_rootpath = os.path.split(os.path.abspath(__file__))[0] - return os.path.join(module_rootpath, 'templates', language, - 'DokuwikiCaption.txt.jinja') + return os.path.join( + module_rootpath, + "templates", + language, + "DokuwikiCaption.txt.jinja", + ) def _prepare_metadata(self): - self._metadata = self.context['dataset']['metadata'] - self._metadata['parameter'] = collections.OrderedDict() + self._metadata = self.context["dataset"]["metadata"] + self._metadata["parameter"] = collections.OrderedDict() self._collect_experimental_parameters() def _collect_experimental_parameters(self): """Collect all the metadata keys.""" for key in self._metadata.keys(): - if key not in ['sample', 'measurement', 'parameter']: - self._metadata['parameter'][key] = \ - self._metadata[key] + if key not in ["sample", "measurement", "parameter"]: + self._metadata["parameter"][key] = self._metadata[key] def _create_context(self): - self.context['METADATA'] = self._metadata + self.context["METADATA"] = self._metadata class InfofileReporter(DokuwikiCaptionsReporter): @@ -337,19 +346,21 @@ class InfofileReporter(DokuwikiCaptionsReporter): """ def __init__(self): - self.filename = '' - self.language = 'en' + self.filename = "" + self.language = "en" self.template = self._get_template() super().__init__(template=self.template, filename=self.filename) - self._exclude_from_to_dict.extend(['dataset']) + self._exclude_from_to_dict.extend(["dataset"]) def _get_template(self): language = self.language module_rootpath = os.path.split(os.path.abspath(__file__))[0] - return os.path.join(module_rootpath, 'templates', language, - 'Infofile.info.jinja') + return os.path.join( + module_rootpath, "templates", language, "Infofile.info.jinja" + ) def _create_context(self): super()._create_context() - self.context['DATASET_ID'] = \ - os.path.split(self.context['dataset']['id'])[-1] + self.context["DATASET_ID"] = os.path.split( + self.context["dataset"]["id"] + )[-1] diff --git a/cwepr/utils.py b/cwepr/utils.py index e244323..8bb1645 100644 --- a/cwepr/utils.py +++ b/cwepr/utils.py @@ -36,8 +36,8 @@ def convert_g2mT(values, mw_freq=None): # noqa converted values in millitesla (mT) """ - planck_constant = scipy.constants.value('Planck constant') - mu_b = scipy.constants.value('Bohr magneton') + planck_constant = scipy.constants.value("Planck constant") + mu_b = scipy.constants.value("Bohr magneton") values = np.asarray([not_zero(value) for value in values]) return (planck_constant * mw_freq * 1e9) / (mu_b * values * 1e-3) @@ -67,8 +67,8 @@ def convert_mT2g(values, mw_freq=None): # noqa converted values in *g* """ - planck_constant = scipy.constants.value('Planck constant') - mu_b = scipy.constants.value('Bohr magneton') + planck_constant = scipy.constants.value("Planck constant") + mu_b = scipy.constants.value("Bohr magneton") values = np.asarray([not_zero(value) for value in values]) return (planck_constant * mw_freq * 1e9) / (mu_b * values * 1e-3) @@ -101,4 +101,6 @@ def not_zero(value): Value guaranteed not to be zero """ - return np.copysign(max(abs(value), np.finfo(np.float64).resolution), value) + return np.copysign( + max(abs(value), np.finfo(np.float64).resolution), value + ) diff --git a/docs/conf.py b/docs/conf.py index de8e741..7d4b884 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,16 +22,19 @@ import os import subprocess -with open(os.path.join(os.path.dirname(__file__), '..', 'VERSION')) as \ - version_file: +with open( + os.path.join(os.path.dirname(__file__), "..", "VERSION") +) as version_file: release_ = version_file.read().strip() -project = 'cwepr' -copyright = '2020- Mirjam Schröder, 2018/19 Pascal Kirchner, 2018– Till Biskup' -author = 'Mirjam Schröder, Pascal Kirchner, Till Biskup' +project = "cwepr" +copyright = ( + "2020- Mirjam Schröder, 2018/19 Pascal Kirchner, 2018– Till Biskup" +) +author = "Mirjam Schröder, Pascal Kirchner, Till Biskup" # The short X.Y version -version = ".".join(release_.split('.')[0:2]) +version = ".".join(release_.split(".")[0:2]) # The full version, including alpha/beta/rc tags release = release_ @@ -46,64 +49,67 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.mathjax', + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.mathjax", # 'sphinx.ext.viewcode', - 'sphinx.ext.napoleon', - 'sphinx_multiversion', - 'sphinxcontrib.bibtex' + "sphinx.ext.napoleon", + "sphinx_multiversion", + "sphinxcontrib.bibtex", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = 'en' +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # Autodoc configuration autodoc_default_options = { - 'members': True, - 'member-order': 'bysource', - 'undoc-members': True, - 'private-members': False, - 'show-inheritance': True, + "members": True, + "member-order": "bysource", + "undoc-members": True, + "private-members": False, + "show-inheritance": True, } # Multiversion configuration -smv_branch_whitelist = r'^master.*$' -smv_tag_whitelist = r'^v\d+\.\d+$' -smv_released_pattern = r'^refs/tags/v\d+\.\d+$' - -tag = subprocess.run("git describe --tags `git rev-list --tags " - "--max-count=1`", shell=True, capture_output=True) +smv_branch_whitelist = r"^master.*$" +smv_tag_whitelist = r"^v\d+\.\d+$" +smv_released_pattern = r"^refs/tags/v\d+\.\d+$" + +tag = subprocess.run( + "git describe --tags `git rev-list --tags " "--max-count=1`", + shell=True, + capture_output=True, +) smv_latest_version = tag.stdout.decode().strip() # Bibtex configuration -bibtex_bibfiles = ['literature.bib'] +bibtex_bibfiles = ["literature.bib"] # -- Options for HTML output ------------------------------------------------- @@ -111,7 +117,7 @@ # a list of builtin themes. # # html_theme = 'alabaster' -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -120,30 +126,30 @@ # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = './cwepr-logo.png' +html_logo = "./cwepr-logo.png" # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -html_favicon = './cwepr-favicon.ico' +html_favicon = "./cwepr-favicon.ico" -html_last_updated_fmt = '%Y-%m-%d' +html_last_updated_fmt = "%Y-%m-%d" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] +# html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. @@ -159,7 +165,7 @@ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'cweprdoc' +htmlhelp_basename = "cweprdoc" # -- Options for LaTeX output ------------------------------------------------ @@ -168,15 +174,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -186,8 +189,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'cwepr.tex', 'cwepr Documentation', - author, 'manual'), + (master_doc, "cwepr.tex", "cwepr Documentation", author, "manual"), ] @@ -195,10 +197,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'cwepr', 'cwepr Documentation', - [author], 1) -] +man_pages = [(master_doc, "cwepr", "cwepr Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------------- @@ -207,9 +206,15 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'cwepr', 'cwepr Documentation', - author, 'cwepr', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "cwepr", + "cwepr Documentation", + author, + "cwepr", + "One line description of project.", + "Miscellaneous", + ), ] @@ -228,7 +233,7 @@ # epub_uid = '' # A list of files that should not be packed into the epub file. -epub_exclude_files = ['search.html'] +epub_exclude_files = ["search.html"] # -- Extension configuration ------------------------------------------------- @@ -237,11 +242,11 @@ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - 'python': ('https://docs.python.org/3', None), - 'numpy': ('https://numpy.org/doc/stable/', None), - 'scipy': ('https://docs.scipy.org/doc/scipy/', None), - 'matplotlib': ('https://matplotlib.org/stable/', None), - 'aspecd': ('https://docs.aspecd.de/', None), + "python": ("https://docs.python.org/3", None), + "numpy": ("https://numpy.org/doc/stable/", None), + "scipy": ("https://docs.scipy.org/doc/scipy/", None), + "matplotlib": ("https://matplotlib.org/stable/", None), + "aspecd": ("https://docs.aspecd.de/", None), } # -- Options for todo extension ---------------------------------------------- diff --git a/docs/datasets2yaml.py b/docs/datasets2yaml.py index 01b9185..a810687 100644 --- a/docs/datasets2yaml.py +++ b/docs/datasets2yaml.py @@ -1,12 +1,13 @@ import aspecd.utils -class_names = ['ExperimentalDataset', 'CalculatedDataset'] +class_names = ["ExperimentalDataset", "CalculatedDataset"] for class_name in class_names: yaml = aspecd.utils.Yaml() - ds = aspecd.utils.object_from_class_name(".".join(['cwepr.dataset', - class_name])) + ds = aspecd.utils.object_from_class_name( + ".".join(["cwepr.dataset", class_name]) + ) yaml.dict = ds.to_dict() yaml.serialise_numpy_arrays() - yaml.write_to(".".join([class_name, 'yaml'])) + yaml.write_to(".".join([class_name, "yaml"])) diff --git a/setup.py b/setup.py index 5cafaa9..1e12efa 100644 --- a/setup.py +++ b/setup.py @@ -1,30 +1,30 @@ import os import setuptools -with open(os.path.join(os.path.dirname(__file__), 'VERSION')) as version_file: +with open(os.path.join(os.path.dirname(__file__), "VERSION")) as version_file: version = version_file.read().strip() -with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f: +with open(os.path.join(os.path.dirname(__file__), "README.rst")) as f: readme = f.read() setuptools.setup( - name='cwepr', + name="cwepr", version=version, - description='Package for handling cw-EPR data.', + description="Package for handling cw-EPR data.", long_description=readme, - long_description_content_type='text/x-rst', - author='Mirjam Schröder, Pascal Kirchner, Till Biskup', - author_email='till@till-biskup.de', - url='https://www.cwepr.de/', + long_description_content_type="text/x-rst", + author="Mirjam Schröder, Pascal Kirchner, Till Biskup", + author_email="till@till-biskup.de", + url="https://www.cwepr.de/", project_urls={ "Documentation": "https://docs.cwepr.de/", "Source": "https://github.com/tillbiskup/cwepr", - 'Bug Tracker': 'https://github.com/tillbiskup/cwepr/issues', + "Bug Tracker": "https://github.com/tillbiskup/cwepr/issues", }, - packages=setuptools.find_packages(exclude=('tests', 'docs')), + packages=setuptools.find_packages(exclude=("tests", "docs")), include_package_data=True, - license='BSD', + license="BSD", keywords=[ "EPR spectroscopy", "data processing and analysis", @@ -47,15 +47,19 @@ "Topic :: Scientific/Engineering", ], install_requires=[ - 'aspecd>=0.9.0', - 'numpy', - 'scipy', - 'matplotlib', + "aspecd>=0.9.0", + "numpy", + "scipy", + "matplotlib", ], extras_require={ - 'dev': ['prospector[with_pyroma]'], - 'docs': ['sphinx', 'sphinx-rtd-theme', 'sphinxcontrib-bibtex', - 'sphinx-multiversion'], + "dev": ["prospector[with_pyroma]"], + "docs": [ + "sphinx", + "sphinx-rtd-theme", + "sphinxcontrib-bibtex", + "sphinx-multiversion", + ], }, - python_requires='>=3.7', + python_requires=">=3.7", ) diff --git a/tests/io/test_bes3t.py b/tests/io/test_bes3t.py index 0db9f03..7a362fd 100644 --- a/tests/io/test_bes3t.py +++ b/tests/io/test_bes3t.py @@ -14,34 +14,41 @@ def setUp(self): self.dataset = cwepr.dataset.ExperimentalDataset() def test_import_with_1D_dataset(self): - source = os.path.join(ROOTPATH, 'testdata/test-bes3t-1D-fieldsweep.DSC') + source = os.path.join( + ROOTPATH, "testdata/test-bes3t-1D-fieldsweep.DSC" + ) importer = cwepr.io.bes3t.BES3TImporter(source=source) self.dataset.import_from(importer) self.assertTrue(self.dataset.data.axes[0].unit) self.assertFalse(self.dataset.data.axes[1].unit) def test_import_with_1D_dataset_dimensions(self): - source = os.path.join(ROOTPATH, 'testdata/test-bes3t-1D-fieldsweep.DSC') + source = os.path.join( + ROOTPATH, "testdata/test-bes3t-1D-fieldsweep.DSC" + ) importer = cwepr.io.bes3t.BES3TImporter(source=source) self.dataset.import_from(importer) self.assertEqual(2, len(self.dataset.data.axes)) self.assertEqual(1, self.dataset.data.data.ndim) def test_import_gives_correct_units(self): - source = os.path.join(ROOTPATH, 'testdata/BDPA-1DFieldSweep') + source = os.path.join(ROOTPATH, "testdata/BDPA-1DFieldSweep") importer = cwepr.io.bes3t.BES3TImporter(source=source) self.dataset.import_from(importer) - self.assertAlmostEqual(0.6325, - self.dataset.metadata.bridge.power.value) - self.assertEqual('mW', - self.dataset.metadata.bridge.power.unit) - self.assertEqual('mT', self.dataset.metadata.magnetic_field.start.unit) - self.assertEqual('mT', - self.dataset.metadata.magnetic_field.sweep_width.unit) + self.assertAlmostEqual( + 0.6325, self.dataset.metadata.bridge.power.value + ) + self.assertEqual("mW", self.dataset.metadata.bridge.power.unit) + self.assertEqual( + "mT", self.dataset.metadata.magnetic_field.start.unit + ) + self.assertEqual( + "mT", self.dataset.metadata.magnetic_field.sweep_width.unit + ) self.assertTrue(self.dataset.data.axes[0].values[0] < 1000) def test_import_with_2D_dataset_powersweep(self): - source = os.path.join(ROOTPATH, 'testdata/BDPA-2DFieldPower.DSC') + source = os.path.join(ROOTPATH, "testdata/BDPA-2DFieldPower.DSC") importer = cwepr.io.bes3t.BES3TImporter(source=source) self.dataset.import_from(importer) self.assertTrue(self.dataset.data.axes[0].unit) @@ -49,36 +56,36 @@ def test_import_with_2D_dataset_powersweep(self): self.assertTrue(self.dataset.data.axes[1].quantity) def test_import_with_2D_dataset_fielddelay(self): - source = os.path.join(ROOTPATH, 'testdata/BDPA-2DFieldDelay.DSC') + source = os.path.join(ROOTPATH, "testdata/BDPA-2DFieldDelay.DSC") importer = cwepr.io.bes3t.BES3TImporter(source=source) self.dataset.import_from(importer) self.assertTrue(self.dataset.data.axes[0].unit) self.assertTrue(self.dataset.data.axes[1].unit) def test_imports_infofile(self): - source = os.path.join(ROOTPATH, 'testdata/BDPA-2DFieldDelay.DSC') + source = os.path.join(ROOTPATH, "testdata/BDPA-2DFieldDelay.DSC") importer = cwepr.io.bes3t.BES3TImporter(source=source) self.dataset.import_from(importer) self.assertTrue(self.dataset.metadata.measurement.operator) self.assertTrue(self.dataset.metadata.measurement.purpose) self.assertTrue(self.dataset.metadata.experiment.type) self.assertTrue(self.dataset.metadata.probehead.type) - self.assertTrue(self.dataset.metadata.temperature_control.temperature - .value) + self.assertTrue( + self.dataset.metadata.temperature_control.temperature.value + ) def test_import_with_no_infofile_continues(self): - source = os.path.join(ROOTPATH, 'testdata/BDPA-2DFieldDelay') + source = os.path.join(ROOTPATH, "testdata/BDPA-2DFieldDelay") with tempfile.TemporaryDirectory() as testdir: - for extension in ('.DSC', '.DTA', '.YGF'): - new_source = os.path.join(testdir, 'test-wo-infofile') + for extension in (".DSC", ".DTA", ".YGF"): + new_source = os.path.join(testdir, "test-wo-infofile") shutil.copyfile(source + extension, new_source + extension) dataset = cwepr.dataset.ExperimentalDataset() - importer = cwepr.io.bes3t.BES3TImporter( - source=new_source) + importer = cwepr.io.bes3t.BES3TImporter(source=new_source) dataset.import_from(importer) def test_import_sets_correct_units_for_infofile(self): - source = os.path.join(ROOTPATH, 'testdata/BDPA-1DFieldSweep') + source = os.path.join(ROOTPATH, "testdata/BDPA-1DFieldSweep") importer = cwepr.io.bes3t.BES3TImporter(source=source) importer.dataset = cwepr.dataset.ExperimentalDataset() importer._clean_filenames() @@ -91,14 +98,17 @@ def test_import_sets_correct_units_for_infofile(self): importer._load_infofile() importer._map_infofile() - self.assertIn('ms', importer._metadata_dict['signal_channel'][ - 'time_constant']) + self.assertIn( + "ms", importer._metadata_dict["signal_channel"]["time_constant"] + ) def test_import_sets_correct_units_for_dsc_file(self): - source = os.path.join(ROOTPATH, 'testdata/BDPA-1DFieldSweep') + source = os.path.join(ROOTPATH, "testdata/BDPA-1DFieldSweep") importer = cwepr.io.bes3t.BES3TImporter(source=source) self.dataset.import_from(importer) - self.assertEqual(self.dataset.metadata.signal_channel.time_constant - .value, 2.56) - self.assertEqual(self.dataset.metadata.signal_channel.time_constant - .unit, 'ms') + self.assertEqual( + self.dataset.metadata.signal_channel.time_constant.value, 2.56 + ) + self.assertEqual( + self.dataset.metadata.signal_channel.time_constant.unit, "ms" + ) diff --git a/tests/io/test_esp_winepr.py b/tests/io/test_esp_winepr.py index 98c15bd..d6d40b2 100644 --- a/tests/io/test_esp_winepr.py +++ b/tests/io/test_esp_winepr.py @@ -11,86 +11,123 @@ class TestESPWinEPRImporter(unittest.TestCase): def setUp(self): self.dataset = cwepr.dataset.ExperimentalDataset() - self.sources = [os.path.join(ROOTPATH, path) for path in [ - 'testdata/ESP', 'testdata/EMX-winEPR.par', 'testdata/winepr.par']] - self.source = os.path.join(ROOTPATH, 'testdata/winepr.par') + self.sources = [ + os.path.join(ROOTPATH, path) + for path in [ + "testdata/ESP", + "testdata/EMX-winEPR.par", + "testdata/winepr.par", + ] + ] + self.source = os.path.join(ROOTPATH, "testdata/winepr.par") def test_imports_esp_data_correctly(self): - importer = cwepr.io.esp_winepr.ESPWinEPRImporter(source=self.sources[0]) + importer = cwepr.io.esp_winepr.ESPWinEPRImporter( + source=self.sources[0] + ) self.dataset.import_from(importer) - self.assertTrue(self.dataset.data.data[0] < 10 ** 12) + self.assertTrue(self.dataset.data.data[0] < 10**12) def test_imports_winepr_data_correctly(self): - importer = cwepr.io.esp_winepr.ESPWinEPRImporter(source=self.sources[1]) + importer = cwepr.io.esp_winepr.ESPWinEPRImporter( + source=self.sources[1] + ) self.dataset.import_from(importer) - self.assertTrue(self.dataset.data.data[0] < 10 ** 12) + self.assertTrue(self.dataset.data.data[0] < 10**12) def test_gets_parameter(self): - importer = cwepr.io.esp_winepr.ESPWinEPRImporter(source=self.sources[0]) + importer = cwepr.io.esp_winepr.ESPWinEPRImporter( + source=self.sources[0] + ) self.dataset.import_from(importer) self.assertTrue(len(importer._par_dict.keys()) > 1) def test_infofile_gets_imported(self): - importer = cwepr.io.esp_winepr.ESPWinEPRImporter(source=self.sources[0]) + importer = cwepr.io.esp_winepr.ESPWinEPRImporter( + source=self.sources[0] + ) self.dataset.import_from(importer) - self.assertTrue(isinstance( - self.dataset.metadata.bridge.mw_frequency.value, float)) + self.assertTrue( + isinstance(self.dataset.metadata.bridge.mw_frequency.value, float) + ) def test_map_par_parameters_correctly(self): - importer = cwepr.io.esp_winepr.ESPWinEPRImporter(source=self.sources[0]) + importer = cwepr.io.esp_winepr.ESPWinEPRImporter( + source=self.sources[0] + ) self.dataset.import_from(importer) - self.assertEqual(5.000000e+05, - self.dataset.metadata.signal_channel.receiver_gain - .value) - self.assertAlmostEqual(339.498, - self.dataset.metadata.magnetic_field.start.value, - 2) + self.assertEqual( + 5.000000e05, + self.dataset.metadata.signal_channel.receiver_gain.value, + ) + self.assertAlmostEqual( + 339.498, self.dataset.metadata.magnetic_field.start.value, 2 + ) def test_map_par_parameters_correctly_second_dataset(self): - importer = cwepr.io.esp_winepr.ESPWinEPRImporter(source=self.sources[1]) + importer = cwepr.io.esp_winepr.ESPWinEPRImporter( + source=self.sources[1] + ) self.dataset.import_from(importer) - self.assertNotIn('RRG', importer._par_dict.keys()) - self.assertAlmostEqual(350.5, - self.dataset.metadata.magnetic_field.start.value, - 2) + self.assertNotIn("RRG", importer._par_dict.keys()) + self.assertAlmostEqual( + 350.5, self.dataset.metadata.magnetic_field.start.value, 2 + ) def test_import_with_1D_dataset(self): importer = cwepr.io.esp_winepr.ESPWinEPRImporter(source=self.source) self.dataset.import_from(importer) - self.assertTrue(self.dataset.data.axes[0].unit in ('G', 'mT')) + self.assertTrue(self.dataset.data.axes[0].unit in ("G", "mT")) self.assertFalse(self.dataset.data.axes[1].unit) def test_winepr_sets_default_values(self): - importer = cwepr.io.esp_winepr.ESPWinEPRImporter(source=self.sources[2]) + importer = cwepr.io.esp_winepr.ESPWinEPRImporter( + source=self.sources[2] + ) self.dataset.import_from(importer) - self.assertEqual(100, self.dataset.metadata.signal_channel - .modulation_frequency.value) - self.assertEqual('kHz', self.dataset.metadata.signal_channel - .modulation_frequency.unit) + self.assertEqual( + 100, + self.dataset.metadata.signal_channel.modulation_frequency.value, + ) + self.assertEqual( + "kHz", + self.dataset.metadata.signal_channel.modulation_frequency.unit, + ) def test_time_gets_imported_correctly_from_par(self): - importer = cwepr.io.esp_winepr.ESPWinEPRImporter(source=self.sources[2]) + importer = cwepr.io.esp_winepr.ESPWinEPRImporter( + source=self.sources[2] + ) self.dataset.import_from(importer) date_time = datetime.datetime(2021, 10, 15, 10, 37) self.assertEqual(date_time, self.dataset.metadata.measurement.start) def test_frequency_gets_written(self): - importer = cwepr.io.esp_winepr.ESPWinEPRImporter(source=self.sources[2]) + importer = cwepr.io.esp_winepr.ESPWinEPRImporter( + source=self.sources[2] + ) self.dataset.import_from(importer) self.assertTrue(self.dataset.metadata.bridge.mw_frequency.value) def test_operator_is_written_from_infofile(self): - importer = cwepr.io.esp_winepr.ESPWinEPRImporter(source=self.sources[1]) + importer = cwepr.io.esp_winepr.ESPWinEPRImporter( + source=self.sources[1] + ) self.dataset.import_from(importer) self.assertTrue(self.dataset.metadata.measurement.operator) self.assertTrue(self.dataset.metadata.measurement.purpose) self.assertTrue(self.dataset.metadata.experiment.type) self.assertTrue(self.dataset.metadata.probehead.type) - self.assertTrue(self.dataset.metadata.temperature_control.temperature - .value) + self.assertTrue( + self.dataset.metadata.temperature_control.temperature.value + ) def test_mod_amp_has_unit(self): - importer = cwepr.io.esp_winepr.ESPWinEPRImporter(source=self.sources[1]) + importer = cwepr.io.esp_winepr.ESPWinEPRImporter( + source=self.sources[1] + ) self.dataset.import_from(importer) self.assertEqual( - self.dataset.metadata.signal_channel.modulation_amplitude.unit, 'G') + self.dataset.metadata.signal_channel.modulation_amplitude.unit, + "G", + ) diff --git a/tests/io/test_exporter.py b/tests/io/test_exporter.py index ce2ce6d..9929901 100644 --- a/tests/io/test_exporter.py +++ b/tests/io/test_exporter.py @@ -9,14 +9,16 @@ class TestMetadataExporter(unittest.TestCase): def setUp(self): self.export = cwepr.io.exporter.MetadataExporter() self.dataset = cwepr.dataset.ExperimentalDataset() - self.filename = '' + self.filename = "" def tearDown(self): if os.path.exists(self.export.filename): os.remove(self.export.filename) def test_metadata_dict_is_dict(self): - self.assertIsInstance(self.export.metadata_dict, collections.OrderedDict) + self.assertIsInstance( + self.export.metadata_dict, collections.OrderedDict + ) def test_file_exists_with_default_filename_after_export(self): self.export.dataset = self.dataset @@ -25,27 +27,27 @@ def test_file_exists_with_default_filename_after_export(self): def test_file_exists_with_custom_filename_after_export(self): self.export.dataset = self.dataset - self.export.filename = 'my_filename.yaml' + self.export.filename = "my_filename.yaml" self.dataset.export_to(self.export) self.assertTrue(os.path.exists(self.export.filename)) def test_file_exists_with_custom_filename_wo_extension_after_export(self): self.export.dataset = self.dataset - self.export.filename = 'my_filename' + self.export.filename = "my_filename" self.dataset.export_to(self.export) - self.assertTrue(os.path.exists('my_filename.yaml')) + self.assertTrue(os.path.exists("my_filename.yaml")) def test_metadata_get_cleaned_from_empty_values(self): self.dataset.metadata.bridge.mw_frequency.value = 9.5 - self.dataset.metadata.bridge.mw_frequency.unit = 'GHz' + self.dataset.metadata.bridge.mw_frequency.unit = "GHz" self.export.dataset = self.dataset - self.export.filename = 'my_filename' + self.export.filename = "my_filename" self.dataset.export_to(self.export) goal_dict = { - 'bridge': { - 'mw_frequency': { - 'value': 9.5, - 'unit': 'GHz', + "bridge": { + "mw_frequency": { + "value": 9.5, + "unit": "GHz", } } } diff --git a/tests/io/test_factory.py b/tests/io/test_factory.py index 606a714..5893007 100644 --- a/tests/io/test_factory.py +++ b/tests/io/test_factory.py @@ -20,57 +20,57 @@ def test_instantiate_class(self): pass def test_goniometer_importer_gets_correct_files(self): - source = os.path.join(ROOTPATH, 'testdata', 'magnettech-goniometer/') + source = os.path.join(ROOTPATH, "testdata", "magnettech-goniometer/") importer = cwepr.dataset.DatasetFactory().importer_factory importer.get_importer(source=source) - self.assertEqual('GoniometerSweep', importer.data_format) + self.assertEqual("GoniometerSweep", importer.data_format) def test_powersweep_importer_gets_correct_files(self): - source = os.path.join(ROOTPATH, 'testdata', 'magnettech-power/') + source = os.path.join(ROOTPATH, "testdata", "magnettech-power/") importer = cwepr.dataset.DatasetFactory().importer_factory importer.get_importer(source=source) - self.assertEqual('PowerSweep', importer.data_format) + self.assertEqual("PowerSweep", importer.data_format) def test_goniometer_importer_does_not_import_inconsistent_data(self): - source = os.path.join(ROOTPATH, 'testdata', 'magnettech-goniometer/') + source = os.path.join(ROOTPATH, "testdata", "magnettech-goniometer/") with tempfile.TemporaryDirectory() as tmpdir: - new_source = os.path.join(tmpdir, 'new') + new_source = os.path.join(tmpdir, "new") importer = cwepr.dataset.DatasetFactory().importer_factory importer.get_importer(source=new_source) - self.assertNotEqual('MagnettechXML', importer.data_format) + self.assertNotEqual("MagnettechXML", importer.data_format) def test_factory_detects_extension(self): - source = os.path.join(ROOTPATH, 'testdata', 'test-magnettech.xml') + source = os.path.join(ROOTPATH, "testdata", "test-magnettech.xml") factory = cwepr.dataset.DatasetFactory() importer_factory = factory.importer_factory importer_factory.get_importer(source=source) root_source, _ = os.path.splitext(source) - self.assertEqual(importer_factory.data_format, 'MagnettechXML') + self.assertEqual(importer_factory.data_format, "MagnettechXML") def test_factory_without_extension_returns(self): - source = os.path.join(ROOTPATH, 'testdata', 'test-magnettech') + source = os.path.join(ROOTPATH, "testdata", "test-magnettech") factory = cwepr.dataset.DatasetFactory() importer_factory = factory.importer_factory importer_factory.get_importer(source=source) root_source, _ = os.path.splitext(source) - self.assertEqual(importer_factory.data_format, 'MagnettechXML') + self.assertEqual(importer_factory.data_format, "MagnettechXML") def test_with_adf_extension_returns_adf_importer(self): - source = 'test.adf' + source = "test.adf" importer = self.factory.get_importer(source=source) self.assertIsInstance(importer, aspecd.io.AdfImporter) def test_niehsdat_file_returns_correct_importer(self): - source = os.path.join(ROOTPATH, 'testdata', 'Pyrene.dat') + source = os.path.join(ROOTPATH, "testdata", "Pyrene.dat") importer = self.factory.get_importer(source=source) self.assertIsInstance(importer, cwepr.io.NIEHSDatImporter) def test_niehslmb_file_returns_correct_importer(self): - source = os.path.join(ROOTPATH, 'testdata', 'dmpo.lmb') + source = os.path.join(ROOTPATH, "testdata", "dmpo.lmb") importer = self.factory.get_importer(source=source) self.assertIsInstance(importer, cwepr.io.NIEHSLmbImporter) def test_niehsexp_file_returns_correct_importer(self): - source = os.path.join(ROOTPATH, 'testdata', 'e1-05.exp') + source = os.path.join(ROOTPATH, "testdata", "e1-05.exp") importer = self.factory.get_importer(source=source) self.assertIsInstance(importer, cwepr.io.NIEHSExpImporter) diff --git a/tests/io/test_magnettech.py b/tests/io/test_magnettech.py index 9d98fbb..de9103e 100644 --- a/tests/io/test_magnettech.py +++ b/tests/io/test_magnettech.py @@ -18,25 +18,30 @@ class TestMagnettechXmlImporter(unittest.TestCase): def setUp(self): - source = os.path.join(ROOTPATH, 'testdata/test-magnettech') - self.importer = cwepr.io.magnettech.MagnettechXMLImporter(source=source) + source = os.path.join(ROOTPATH, "testdata/test-magnettech") + self.importer = cwepr.io.magnettech.MagnettechXMLImporter( + source=source + ) self.dataset = cwepr.dataset.ExperimentalDataset() def test_axis_dimensions_equals_one(self): converter = cwepr.io.magnettech.MagnettechXMLImporter() - testdata = 'CZzAKavudEA=5HabpLDudEA=' - self.assertEqual(1, converter._convert_base64string_to_np_array( - testdata).ndim) + testdata = "CZzAKavudEA=5HabpLDudEA=" + self.assertEqual( + 1, converter._convert_base64string_to_np_array(testdata).ndim + ) def test_specific_fields_are_filled(self): self.dataset.import_from(self.importer) # arbitrary attributes that must have been set - teststr = ['temperature_control.temperature.value', - 'magnetic_field.start.unit', - 'bridge.mw_frequency.value'] + teststr = [ + "temperature_control.temperature.value", + "magnetic_field.start.unit", + "bridge.mw_frequency.value", + ] for string_ in teststr: metadata_object = self.dataset.metadata - for element in string_.split('.'): + for element in string_.split("."): metadata_object = getattr(metadata_object, element) self.assertTrue(metadata_object) @@ -51,80 +56,88 @@ def test_import_with_no_file_raises(self): self.dataset.import_from(importer) def test_import_with_not_existing_file_raises(self): - source = 'foo.xml' + source = "foo.xml" importer = cwepr.io.magnettech.MagnettechXMLImporter(source=source) with self.assertRaises(FileNotFoundError): self.dataset.import_from(importer) def test_import_with_no_infofile_continues(self): - source = os.path.join(ROOTPATH, 'testdata/test-magnettech.xml') + source = os.path.join(ROOTPATH, "testdata/test-magnettech.xml") with tempfile.TemporaryDirectory() as testdir: - new_source = os.path.join(testdir, 'test-wo-infofile') - shutil.copyfile(source, new_source + '.xml') + new_source = os.path.join(testdir, "test-wo-infofile") + shutil.copyfile(source, new_source + ".xml") importer = cwepr.io.magnettech.MagnettechXMLImporter( - source=new_source) + source=new_source + ) self.dataset.import_from(importer) def test_import_with_incorrect_iso_datetime_writes_correct_datetime(self): - source = os.path.join(ROOTPATH, 'testdata/test-magnettech.xml') + source = os.path.join(ROOTPATH, "testdata/test-magnettech.xml") mytime = datetime.datetime.fromisoformat( - '2020-11-18T16:56:04.771146+01:00') + "2020-11-18T16:56:04.771146+01:00" + ) with tempfile.TemporaryDirectory() as testdir: - new_source = os.path.join(testdir, 'test-wo-infofile') - shutil.copyfile(source, new_source + '.xml') + new_source = os.path.join(testdir, "test-wo-infofile") + shutil.copyfile(source, new_source + ".xml") importer = cwepr.io.magnettech.MagnettechXMLImporter( - source=new_source) + source=new_source + ) self.dataset.import_from(importer) imported_start_time = self.dataset.metadata.measurement.start self.assertAlmostEqual(mytime, imported_start_time) def test_with_file_extension(self): - source = os.path.join(ROOTPATH, 'testdata/test-magnettech.xml') + source = os.path.join(ROOTPATH, "testdata/test-magnettech.xml") importer = cwepr.io.magnettech.MagnettechXMLImporter(source=source) self.dataset.import_from(importer) def test_comment_gets_written(self): - source = os.path.join(ROOTPATH, 'testdata/test-magnettech.xml') + source = os.path.join(ROOTPATH, "testdata/test-magnettech.xml") importer = cwepr.io.magnettech.MagnettechXMLImporter(source=source) self.dataset.import_from(importer) self.assertTrue(self.dataset.annotations) def test_import_with_second_harmonic_imports_first(self): - source = os.path.join(ROOTPATH, 'testdata/magnettech-second-harmonic') + source = os.path.join(ROOTPATH, "testdata/magnettech-second-harmonic") self.importer.source = source self.dataset.import_from(self.importer) - self.assertEqual(self.importer._data_curve.attrib['Name'], - 'MWAbsorption (1st harm.)') + self.assertEqual( + self.importer._data_curve.attrib["Name"], + "MWAbsorption (1st harm.)", + ) def test_check_on_other_source_file_versions(self): - files = glob.glob('testdata/magnettech-various-formats/*.xml') + files = glob.glob("testdata/magnettech-various-formats/*.xml") for file in files: self.importer.source = file try: self.dataset.import_from(self.importer) except TypeError: - print(f'File {file} not imported') + print(f"File {file} not imported") continue self.assertIsInstance(self.dataset.data.data, np.ndarray) - self.assertTrue(self.dataset.metadata.temperature_control.temperature) + self.assertTrue( + self.dataset.metadata.temperature_control.temperature + ) class TestGoniometerSweepImporter(unittest.TestCase): def setUp(self): - source = os.path.join(ROOTPATH, 'testdata/magnettech-goniometer') - self.goniometer_importer = \ + source = os.path.join(ROOTPATH, "testdata/magnettech-goniometer") + self.goniometer_importer = ( cwepr.io.magnettech.GoniometerSweepImporter(source=source) + ) self.dataset = cwepr.dataset.ExperimentalDataset() def instantiate_class(self): pass def test_has_import_method(self): - self.assertTrue(hasattr(self.goniometer_importer, '_import')) + self.assertTrue(hasattr(self.goniometer_importer, "_import")) self.assertTrue(callable(self.goniometer_importer._import)) def test_source_path_doesnt_exist_raises(self): - source = 'foo/' + source = "foo/" importer = cwepr.io.magnettech.GoniometerSweepImporter(source=source) with self.assertRaises(FileNotFoundError): self.dataset.import_from(importer) @@ -135,21 +148,24 @@ def test_sort_filenames_returns_sorted_list(self): sorted_list = self.goniometer_importer.filenames nums = [] for filename in sorted_list: - num = filename.split('gon_')[1] - nums.append(num.split('dg')[0]) - for x in range(len(nums)-1): - self.assertGreater(int(nums[x+1])-int(nums[x]), 0) + num = filename.split("gon_")[1] + nums.append(num.split("dg")[0]) + for x in range(len(nums) - 1): + self.assertGreater(int(nums[x + 1]) - int(nums[x]), 0) def test_has_import_all_data_to_list_method(self): - self.assertTrue(hasattr(self.goniometer_importer, - '_import_all_spectra_to_list')) - self.assertTrue(callable( - self.goniometer_importer._import_all_spectra_to_list)) + self.assertTrue( + hasattr(self.goniometer_importer, "_import_all_spectra_to_list") + ) + self.assertTrue( + callable(self.goniometer_importer._import_all_spectra_to_list) + ) def test_angles_smaller_than_360_deg(self): self.dataset.import_from(self.goniometer_importer) - self.assertTrue(all([x < 359 for x in - self.goniometer_importer._angles])) + self.assertTrue( + all([x < 359 for x in self.goniometer_importer._angles]) + ) def test_import_data_fills_dataset(self): self.dataset.import_from(self.goniometer_importer) @@ -159,21 +175,23 @@ def test_data_and_filenames_have_same_lengths(self): # Check whether all data has been imported correctly and was moved # entirely to the final self.dataset. self.dataset.import_from(self.goniometer_importer) - self.assertEqual(len(self.goniometer_importer.filenames), - self.goniometer_importer.dataset.data.data.shape[1]) + self.assertEqual( + len(self.goniometer_importer.filenames), + self.goniometer_importer.dataset.data.data.shape[1], + ) def test_all_datasets_have_same_frequency(self): self.dataset.import_from(self.goniometer_importer) frequencies = np.array([]) for set_ in self.goniometer_importer._data: - frequencies = np.append(frequencies, - set_.metadata.bridge.mw_frequency.value) + frequencies = np.append( + frequencies, set_.metadata.bridge.mw_frequency.value + ) self.assertAlmostEqual(max(frequencies), min(frequencies)) def test_goniometer_imports_with_slash_at_source(self): - source = os.path.join(ROOTPATH, 'testdata/magnettech-goniometer/') - importer = cwepr.io.magnettech.GoniometerSweepImporter( - source=source) + source = os.path.join(ROOTPATH, "testdata/magnettech-goniometer/") + importer = cwepr.io.magnettech.GoniometerSweepImporter(source=source) self.dataset.import_from(importer) def test_q_value_is_float(self): @@ -182,31 +200,33 @@ def test_q_value_is_float(self): self.assertIsInstance(q_value, float) def test_import_with_no_infofile_continues(self): - source = os.path.join(ROOTPATH, 'testdata/magnettech-goniometer') + source = os.path.join(ROOTPATH, "testdata/magnettech-goniometer") with tempfile.TemporaryDirectory() as testdir: - new_source = os.path.join(testdir, 'test-wo-infofile') + new_source = os.path.join(testdir, "test-wo-infofile") shutil.copytree(source, new_source) importer = cwepr.io.magnettech.GoniometerSweepImporter( - source=new_source) + source=new_source + ) self.dataset.import_from(importer) class TestAmplitudeSweepImporter(unittest.TestCase): def setUp(self): - source = os.path.join(ROOTPATH, 'testdata/magnettech-amplitude') - self.amplitude_importer = \ - cwepr.io.magnettech.AmplitudeSweepImporter(source=source) + source = os.path.join(ROOTPATH, "testdata/magnettech-amplitude") + self.amplitude_importer = cwepr.io.magnettech.AmplitudeSweepImporter( + source=source + ) self.dataset = cwepr.dataset.ExperimentalDataset() def instantiate_class(self): pass def test_has_import_method(self): - self.assertTrue(hasattr(self.amplitude_importer, '_import')) + self.assertTrue(hasattr(self.amplitude_importer, "_import")) self.assertTrue(callable(self.amplitude_importer._import)) def test_source_path_doesnt_exist_raises(self): - source = 'foo/' + source = "foo/" importer = cwepr.io.magnettech.AmplitudeSweepImporter(source=source) with self.assertRaises(FileNotFoundError): self.dataset.import_from(importer) @@ -217,26 +237,34 @@ def test_sort_filenames_returns_sorted_list(self): sorted_list = self.amplitude_importer.filenames nums = [] for filename in sorted_list: - num = filename.split('mod_')[1] - nums.append(num.split('mT')[0]) - for x in range(len(nums)-1): - self.assertGreater(int(nums[x+1])-int(nums[x]), 0) + num = filename.split("mod_")[1] + nums.append(num.split("mT")[0]) + for x in range(len(nums) - 1): + self.assertGreater(int(nums[x + 1]) - int(nums[x]), 0) def test_has_import_all_data_to_list_method(self): - self.assertTrue(hasattr(self.amplitude_importer, - '_import_all_spectra_to_list')) - self.assertTrue(callable( - self.amplitude_importer._import_all_spectra_to_list)) + self.assertTrue( + hasattr(self.amplitude_importer, "_import_all_spectra_to_list") + ) + self.assertTrue( + callable(self.amplitude_importer._import_all_spectra_to_list) + ) def test_amplitudes_all_in_mT(self): self.dataset.import_from(self.amplitude_importer) for item in self.amplitude_importer._amplitudes: - self.assertTrue(item.unit == 'mT') + self.assertTrue(item.unit == "mT") def test_amplitude_list_exists_of_floats(self): self.dataset.import_from(self.amplitude_importer) - self.assertTrue(all([type(x) == float for x in - self.amplitude_importer._amplitude_list])) + self.assertTrue( + all( + [ + type(x) == float + for x in self.amplitude_importer._amplitude_list + ] + ) + ) def test_import_data_fills_dataset(self): self.dataset.import_from(self.amplitude_importer) @@ -256,35 +284,41 @@ def test_raw_data_have_same_length(self): self.amplitude_importer._sort_filenames() self.amplitude_importer._import_all_spectra_to_list() self.amplitude_importer._bring_axes_to_same_values() - self.assertTrue(np.array_equal( - self.amplitude_importer._data[0].data.axes[0].values, - self.amplitude_importer._data[-1].data.axes[0].values)) + self.assertTrue( + np.array_equal( + self.amplitude_importer._data[0].data.axes[0].values, + self.amplitude_importer._data[-1].data.axes[0].values, + ) + ) def test_data_and_filenames_have_same_lengths(self): # Check whether all data has been imported correctly and was moved # entirely to the final self.dataset. self.dataset.import_from(self.amplitude_importer) - self.assertEqual(len(self.amplitude_importer.filenames), - self.amplitude_importer.dataset.data.data.shape[1]) + self.assertEqual( + len(self.amplitude_importer.filenames), + self.amplitude_importer.dataset.data.data.shape[1], + ) def test_second_axis_has_correct_values(self): self.dataset.import_from(self.amplitude_importer) self.assertListEqual( list(self.amplitude_importer.dataset.data.axes[1].values), - self.amplitude_importer._amplitude_list) + self.amplitude_importer._amplitude_list, + ) def test_all_datasets_have_same_frequency(self): self.dataset.import_from(self.amplitude_importer) frequencies = np.array([]) for set_ in self.amplitude_importer._data: - frequencies = np.append(frequencies, - set_.metadata.bridge.mw_frequency.value) + frequencies = np.append( + frequencies, set_.metadata.bridge.mw_frequency.value + ) self.assertAlmostEqual(max(frequencies), min(frequencies)) def test_amplitude_imports_with_slash_at_source(self): - source = os.path.join(ROOTPATH, 'testdata/magnettech-amplitude/') - importer = cwepr.io.magnettech.AmplitudeSweepImporter( - source=source) + source = os.path.join(ROOTPATH, "testdata/magnettech-amplitude/") + importer = cwepr.io.magnettech.AmplitudeSweepImporter(source=source) self.dataset.import_from(importer) def test_fixed_values_are_imported_to_metadata(self): @@ -300,33 +334,38 @@ def test_time_start_is_imported_in_readable_format(self): self.dataset.import_from(self.amplitude_importer) start = self.dataset.metadata.measurement.start self.assertTrue(start) - rex = re.compile("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}") + rex = re.compile( + "[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}" + ) self.assertTrue(rex.match(start)) def test_time_end_is_imported_in_readable_format(self): self.dataset.import_from(self.amplitude_importer) end = self.dataset.metadata.measurement.end self.assertTrue(end) - rex = re.compile("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}") + rex = re.compile( + "[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}" + ) self.assertTrue(rex.match(end)) class TestPowerSweepImporter(unittest.TestCase): def setUp(self): - source = os.path.join(ROOTPATH, 'testdata/magnettech-power') - self.power_importer = \ - cwepr.io.magnettech.PowerSweepImporter(source=source) + source = os.path.join(ROOTPATH, "testdata/magnettech-power") + self.power_importer = cwepr.io.magnettech.PowerSweepImporter( + source=source + ) self.dataset = cwepr.dataset.ExperimentalDataset() def instantiate_class(self): pass def test_has_import_method(self): - self.assertTrue(hasattr(self.power_importer, '_import')) + self.assertTrue(hasattr(self.power_importer, "_import")) self.assertTrue(callable(self.power_importer._import)) def test_source_path_doesnt_exist_raises(self): - source = 'foo/' + source = "foo/" importer = cwepr.io.magnettech.PowerSweepImporter(source=source) with self.assertRaises(FileNotFoundError): self.dataset.import_from(importer) @@ -337,26 +376,29 @@ def test_sort_filenames_returns_sorted_list(self): sorted_list = self.power_importer.filenames nums = [] for filename in sorted_list: - num = filename.split('pow_')[1] - nums.append(num.split('mW')[0]) - for x in range(len(nums)-1): - self.assertGreater(int(nums[x+1])-int(nums[x]), 0) + num = filename.split("pow_")[1] + nums.append(num.split("mW")[0]) + for x in range(len(nums) - 1): + self.assertGreater(int(nums[x + 1]) - int(nums[x]), 0) def test_has_import_all_data_to_list_method(self): - self.assertTrue(hasattr(self.power_importer, - '_import_all_spectra_to_list')) - self.assertTrue(callable( - self.power_importer._import_all_spectra_to_list)) + self.assertTrue( + hasattr(self.power_importer, "_import_all_spectra_to_list") + ) + self.assertTrue( + callable(self.power_importer._import_all_spectra_to_list) + ) def test_power_all_in_mW(self): self.dataset.import_from(self.power_importer) for item in self.power_importer._power: - self.assertTrue(item.unit == 'mW') + self.assertTrue(item.unit == "mW") def test_power_list_exists_of_floats(self): self.dataset.import_from(self.power_importer) - self.assertTrue(all([type(x) == float for x in - self.power_importer._power_list])) + self.assertTrue( + all([type(x) == float for x in self.power_importer._power_list]) + ) def test_import_data_fills_dataset(self): self.dataset.import_from(self.power_importer) @@ -376,35 +418,41 @@ def test_raw_data_have_same_length(self): self.power_importer._sort_filenames() self.power_importer._import_all_spectra_to_list() self.power_importer._bring_axes_to_same_values() - self.assertTrue(np.array_equal( - self.power_importer._data[0].data.axes[0].values, - self.power_importer._data[-1].data.axes[0].values)) + self.assertTrue( + np.array_equal( + self.power_importer._data[0].data.axes[0].values, + self.power_importer._data[-1].data.axes[0].values, + ) + ) def test_data_and_filenames_have_same_lengths(self): # Check whether all data has been imported correctly and was moved # entirely to the final self.dataset. self.dataset.import_from(self.power_importer) - self.assertEqual(len(self.power_importer.filenames), - self.power_importer.dataset.data.data.shape[1]) + self.assertEqual( + len(self.power_importer.filenames), + self.power_importer.dataset.data.data.shape[1], + ) def test_second_axis_has_correct_values(self): self.dataset.import_from(self.power_importer) self.assertListEqual( list(self.power_importer.dataset.data.axes[1].values), - self.power_importer._power_list) + self.power_importer._power_list, + ) def test_all_datasets_have_same_frequency(self): self.dataset.import_from(self.power_importer) frequencies = np.array([]) for set_ in self.power_importer._data: - frequencies = np.append(frequencies, - set_.metadata.bridge.mw_frequency.value) + frequencies = np.append( + frequencies, set_.metadata.bridge.mw_frequency.value + ) self.assertAlmostEqual(max(frequencies), min(frequencies)) def test_power_imports_with_slash_at_source(self): - source = os.path.join(ROOTPATH, 'testdata/magnettech-power/') - importer = cwepr.io.magnettech.PowerSweepImporter( - source=source) + source = os.path.join(ROOTPATH, "testdata/magnettech-power/") + importer = cwepr.io.magnettech.PowerSweepImporter(source=source) self.dataset.import_from(importer) def test_fixed_values_are_imported_to_metadata(self): @@ -420,12 +468,16 @@ def test_time_start_is_imported_in_readable_format(self): self.dataset.import_from(self.power_importer) start = self.dataset.metadata.measurement.start self.assertTrue(start) - rex = re.compile("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}") + rex = re.compile( + "[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}" + ) self.assertTrue(rex.match(start)) def test_time_end_is_imported_in_readable_format(self): self.dataset.import_from(self.power_importer) end = self.dataset.metadata.measurement.end self.assertTrue(end) - rex = re.compile("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}") + rex = re.compile( + "[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}" + ) self.assertTrue(rex.match(end)) diff --git a/tests/io/test_niehs.py b/tests/io/test_niehs.py index 9689adb..320baee 100644 --- a/tests/io/test_niehs.py +++ b/tests/io/test_niehs.py @@ -9,9 +9,8 @@ class TestNIEHSDatImporter(unittest.TestCase): - def setUp(self): - source = os.path.join(ROOTPATH, 'testdata/Pyrene') + source = os.path.join(ROOTPATH, "testdata/Pyrene") self.importer = cwepr.io.niehs.NIEHSDatImporter(source=source) self.dataset = cwepr.dataset.ExperimentalDataset() @@ -28,35 +27,42 @@ def test_extract_data(self): def test_data_has_all_points(self): self.dataset.import_from(self.importer) - self.assertEqual(int(self.importer._raw_data[2]), - len(self.dataset.data.data)) + self.assertEqual( + int(self.importer._raw_data[2]), len(self.dataset.data.data) + ) def test_axis_exists_with_meaningful_values(self): self.dataset.import_from(self.importer) - self.assertEqual(int(self.importer._raw_data[2]), - len(self.dataset.data.axes[0].values)) + self.assertEqual( + int(self.importer._raw_data[2]), + len(self.dataset.data.axes[0].values), + ) self.assertNotEqual(0, self.dataset.data.axes[0].values[0]) def test_axis_has_unit(self): self.dataset.import_from(self.importer) - self.assertEqual('mT', self.dataset.data.axes[0].unit) + self.assertEqual("mT", self.dataset.data.axes[0].unit) def test_metadata_points(self): self.dataset.import_from(self.importer) - self.assertEqual(int, type(self.dataset.metadata.magnetic_field.points)) + self.assertEqual( + int, type(self.dataset.metadata.magnetic_field.points) + ) self.assertNotEqual(0, self.dataset.metadata.magnetic_field.points) def test_metadata_magnetic_field(self): self.dataset.import_from(self.importer) - self.assertEqual('mT', - self.dataset.metadata.magnetic_field.start.unit) - self.assertNotEqual(0, self.dataset.metadata.magnetic_field.start.value) + self.assertEqual( + "mT", self.dataset.metadata.magnetic_field.start.unit + ) + self.assertNotEqual( + 0, self.dataset.metadata.magnetic_field.start.value + ) class TestNIEHSLmbImporter(unittest.TestCase): - def setUp(self): - source = os.path.join(ROOTPATH, 'testdata/dmpo') + source = os.path.join(ROOTPATH, "testdata/dmpo") self.importer = cwepr.io.niehs.NIEHSLmbImporter(source=source) self.dataset = cwepr.dataset.ExperimentalDataset() @@ -74,17 +80,20 @@ def test_extract_data(self): def test_axis_exists_with_meaningful_values(self): self.dataset.import_from(self.importer) - self.assertEqual(len(self.dataset.data.data), - len(self.dataset.data.axes[0].values)) + self.assertEqual( + len(self.dataset.data.data), len(self.dataset.data.axes[0].values) + ) self.assertTrue(self.dataset.data.axes[0].values.all()) def test_axis_has_unit(self): self.dataset.import_from(self.importer) - self.assertEqual('mT', self.dataset.data.axes[0].unit) + self.assertEqual("mT", self.dataset.data.axes[0].unit) def test_assign_comment(self): self.dataset.import_from(self.importer) - self.assertTrue(self.dataset.annotations[0].annotation.content["comment"]) + self.assertTrue( + self.dataset.annotations[0].annotation.content["comment"] + ) def test_assign_basic_metadata(self): self.dataset.import_from(self.importer) @@ -99,10 +108,9 @@ def test_assign_basic_metadata(self): class TestNIEHSExpImporter(unittest.TestCase): - def setUp(self): - self.dsv_source = os.path.join(ROOTPATH, 'testdata/pow128') - self.block_source = os.path.join(ROOTPATH, 'testdata/e1-05') + self.dsv_source = os.path.join(ROOTPATH, "testdata/pow128") + self.block_source = os.path.join(ROOTPATH, "testdata/e1-05") self.importer = cwepr.io.niehs.NIEHSExpImporter() self.dataset = cwepr.dataset.ExperimentalDataset() @@ -132,16 +140,19 @@ def test_extract_data_with_block_file(self): def test_axis_exists_with_meaningful_values(self): self.importer.source = self.block_source self.dataset.import_from(self.importer) - self.assertEqual(len(self.dataset.data.data), - len(self.dataset.data.axes[0].values)) + self.assertEqual( + len(self.dataset.data.data), len(self.dataset.data.axes[0].values) + ) self.assertTrue(self.dataset.data.axes[0].values.all()) def test_axis_has_unit(self): self.importer.source = self.block_source self.dataset.import_from(self.importer) - self.assertEqual('mT', self.dataset.data.axes[0].unit) + self.assertEqual("mT", self.dataset.data.axes[0].unit) def test_assign_comment(self): self.importer.source = self.block_source self.dataset.import_from(self.importer) - self.assertTrue(self.dataset.annotations[0].annotation.content["comment"]) + self.assertTrue( + self.dataset.annotations[0].annotation.content["comment"] + ) diff --git a/tests/io/test_txt_file.py b/tests/io/test_txt_file.py index dcc039e..3266f55 100644 --- a/tests/io/test_txt_file.py +++ b/tests/io/test_txt_file.py @@ -10,9 +10,8 @@ class TestCsvImporter(unittest.TestCase): - def setUp(self): - self.filename = 'testdata.csv' + self.filename = "testdata.csv" self.data = np.random.random([5, 2]) def tearDown(self): @@ -20,7 +19,7 @@ def tearDown(self): os.remove(self.filename) def create_testdata(self): - with open(self.filename, 'w+', encoding="utf8") as file: + with open(self.filename, "w+", encoding="utf8") as file: for row in self.data: file.write(f"{row[0]}, {row[1]}\n") @@ -41,9 +40,8 @@ def test_import_metadata(self): class TestTxtImporter(unittest.TestCase): - def setUp(self): - self.filename = 'testdata.csv' + self.filename = "testdata.csv" self.data = np.random.random([5, 2]) def tearDown(self): @@ -51,10 +49,11 @@ def tearDown(self): os.remove(self.filename) def create_testdata(self, delimiter=" ", separator="."): - with open(self.filename, 'w+', encoding="utf8") as file: + with open(self.filename, "w+", encoding="utf8") as file: for row in self.data: - file.write(f"{row[0]}{delimiter}{row[1]}\n".replace('.', - separator)) + file.write( + f"{row[0]}{delimiter}{row[1]}\n".replace(".", separator) + ) def test_import(self): source = self.filename @@ -65,39 +64,42 @@ def test_import(self): def test_import_with_delimiters(self): source = self.filename - for delimiter in (',', '\t', ';', ' '): + for delimiter in (",", "\t", ";", " "): with self.subTest(delimiter=delimiter): self.create_testdata(delimiter=delimiter) importer = cwepr.io.TxtImporter(source=source) - importer.parameters['delimiter'] = delimiter + importer.parameters["delimiter"] = delimiter dataset = cwepr.dataset.ExperimentalDataset() dataset.import_from(importer) - np.testing.assert_array_equal(self.data[:, 1], - dataset.data.data) + np.testing.assert_array_equal( + self.data[:, 1], dataset.data.data + ) def test_import_with_separators(self): source = self.filename - for separator in (',', '.'): + for separator in (",", "."): with self.subTest(separator=separator): self.create_testdata(separator=separator) importer = cwepr.io.TxtImporter(source=source) - importer.parameters['separator'] = separator + importer.parameters["separator"] = separator dataset = cwepr.dataset.ExperimentalDataset() dataset.import_from(importer) - np.testing.assert_array_equal(self.data[:, 1], - dataset.data.data) + np.testing.assert_array_equal( + self.data[:, 1], dataset.data.data + ) def test_import_with_file_extensions(self): source = self.filename - for extension in ('.csv', '.txt', '.dat', '.xyz', '.d', '.data', ''): + for extension in (".csv", ".txt", ".dat", ".xyz", ".d", ".data", ""): with self.subTest(extension=extension): - self.filename = self.filename.replace('.csv', extension) + self.filename = self.filename.replace(".csv", extension) self.create_testdata() importer = cwepr.io.TxtImporter(source=source) dataset = cwepr.dataset.ExperimentalDataset() dataset.import_from(importer) - np.testing.assert_array_equal(self.data[:, 1], - dataset.data.data) + np.testing.assert_array_equal( + self.data[:, 1], dataset.data.data + ) def test_import_with_skip_rows(self): source = self.filename @@ -105,14 +107,15 @@ def test_import_with_skip_rows(self): with self.subTest(skiprows=skiprows): self.create_testdata() importer = cwepr.io.TxtImporter(source=source) - importer.parameters['skiprows'] = skiprows + importer.parameters["skiprows"] = skiprows dataset = cwepr.dataset.ExperimentalDataset() dataset.import_from(importer) - np.testing.assert_array_equal(self.data[skiprows:, 1], - dataset.data.data) + np.testing.assert_array_equal( + self.data[skiprows:, 1], dataset.data.data + ) def test_import_metadata(self): - source = os.path.join(ROOTPATH, 'testdata/noisy_data.txt') + source = os.path.join(ROOTPATH, "testdata/noisy_data.txt") importer = cwepr.io.TxtImporter(source=source) dataset = cwepr.dataset.ExperimentalDataset() dataset.import_from(importer) diff --git a/tests/test_analysis.py b/tests/test_analysis.py index 809a10d..d884552 100644 --- a/tests/test_analysis.py +++ b/tests/test_analysis.py @@ -16,17 +16,19 @@ class TestAnalysis(unittest.TestCase): def setUp(self): - source = os.path.join(ROOTPATH, 'io/testdata/test-bes3t-1D-fieldsweep') + source = os.path.join( + ROOTPATH, "io/testdata/test-bes3t-1D-fieldsweep" + ) importer = cwepr.io.bes3t.BES3TImporter(source=source) self.dataset = cwepr.dataset.ExperimentalDataset() self.dataset.import_from(importer) def test_field_correction_value(self): analysator = cwepr.analysis.FieldCalibration() - analysator.parameters['standard'] = 'dpph' + analysator.parameters["standard"] = "dpph" analysator = self.dataset.analyse(analysator) - self.assertTrue(analysator.parameters['mw_frequency']) - self.assertTrue(analysator.parameters['g_value'] == 2.0036) + self.assertTrue(analysator.parameters["mw_frequency"]) + self.assertTrue(analysator.parameters["g_value"] == 2.0036) self.assertEqual(np.float64, type(analysator.result)) def test_area_under_curve(self): @@ -56,15 +58,16 @@ def setUp(self): self.dataset = cwepr.dataset.ExperimentalDataset() self.data = np.sin(np.linspace(0, 2 * np.pi, num=500)) self.mw_frequency = 9.68 - self.standard = 'LiLiF' + self.standard = "LiLiF" self.center_field = 345.410996 # For LiLiF with given MW freq def test_instantiate_class(self): pass def test_has_appropriate_description(self): - self.assertIn('magnetic field offset', - self.analysis.description.lower()) + self.assertIn( + "magnetic field offset", self.analysis.description.lower() + ) def test_perform_without_frequency_raises(self): with self.assertRaisesRegex(ValueError, "microwave frequency"): @@ -80,8 +83,9 @@ def test_perform_sets_parameters(self): self.dataset.metadata.bridge.mw_frequency.value = self.mw_frequency self.analysis.parameters["standard"] = self.standard analysis = self.dataset.analyse(self.analysis) - self.assertEqual(self.mw_frequency, - analysis.parameters["mw_frequency"]) + self.assertEqual( + self.mw_frequency, analysis.parameters["mw_frequency"] + ) self.assertEqual(2.002293, analysis.parameters["g_value"]) def test_perform_returns_correct_value(self): @@ -115,7 +119,7 @@ def test_instantiate_class(self): cwepr.analysis.Amplitude() def test_has_appropriate_description(self): - self.assertIn('amplitude', self.analysator.description.lower()) + self.assertIn("amplitude", self.analysator.description.lower()) def test_get_amplitude_1d_dataset(self): self.dataset.data.data = self.data @@ -136,27 +140,33 @@ def setUp(self): self.dataset = cwepr.dataset.ExperimentalDataset() data = np.sin(np.linspace(0, 2 * np.pi, num=500)) self.dataset.data.data = np.transpose(np.tile(data, (4, 1))) - self.dataset.data.axes[1].values = np.array([10, 5, 2.5, 1.25, 0.6125, - 0.305]) - self.dataset.data.axes[1].unit = 'mW' + self.dataset.data.axes[1].values = np.array( + [10, 5, 2.5, 1.25, 0.6125, 0.305] + ) + self.dataset.data.axes[1].unit = "mW" def test_instantiate_class(self): cwepr.analysis.AmplitudeVsPower() def test_has_description(self): - self.assertNotIn('abstract', self.analysator.description.lower()) + self.assertNotIn("abstract", self.analysator.description.lower()) def test_calculate_dataset(self): analysis = self.dataset.analyse(self.analysator) self.assertEqual(2, len(analysis.result.data.axes)) - self.assertEqual(len(np.sqrt(self.dataset.data.axes[1].values)), - len(analysis.result.data.axes[0].values)) - self.assertEqual('sqrt(mW)', analysis.result.data.axes[0].unit) + self.assertEqual( + len(np.sqrt(self.dataset.data.axes[1].values)), + len(analysis.result.data.axes[0].values), + ) + self.assertEqual("sqrt(mW)", analysis.result.data.axes[0].unit) def test_returns_ascending_x_axis(self): analysis = self.dataset.analyse(self.analysator) - self.assertGreater(analysis.result.data.axes[0].values[1] - - analysis.result.data.axes[0].values[0], 0) + self.assertGreater( + analysis.result.data.axes[0].values[1] + - analysis.result.data.axes[0].values[0], + 0, + ) class TestFitOnData(unittest.TestCase): @@ -168,7 +178,7 @@ def test_instantiate_class(self): cwepr.analysis.FitOnData() def test_has_description(self): - self.assertNotIn('abstract', self.analysator.description.lower()) + self.assertNotIn("abstract", self.analysator.description.lower()) def test_fit_returns_coefficients(self): self.dataset.data.data = np.linspace(1, 21) @@ -177,8 +187,9 @@ def test_fit_returns_coefficients(self): self.assertTrue(all(analysis.result)) def test_fit_only_takes_first_points(self): - self.dataset.data.data = np.concatenate([np.linspace(1, 5, num=5), - np.linspace(5.1, 6.1, num=5)]) + self.dataset.data.data = np.concatenate( + [np.linspace(1, 5, num=5), np.linspace(5.1, 6.1, num=5)] + ) self.dataset.data.axes[0].values = np.linspace(1, 10, num=10) analysis = self.dataset.analyse(self.analysator) self.assertAlmostEqual(1, analysis.result[0]) @@ -186,14 +197,14 @@ def test_fit_only_takes_first_points(self): def test_fit_does_second_order(self): self.dataset.data.axes[0].values = np.linspace(1, 10, num=10) self.dataset.data.data = 4 * self.dataset.data.axes[0].values ** 2 - self.analysator.parameters['order'] = 2 + self.analysator.parameters["order"] = 2 analysis = self.dataset.analyse(self.analysator) self.assertAlmostEqual(4, analysis.result[0]) def test_fixed_offset_with_offset_zero(self): self.dataset.data.data = np.asarray([0.45, 1, 2, 3, 4, 5]) self.dataset.data.axes[0].values = np.asarray([0.5, 1, 2, 3, 4, 5]) - self.analysator.parameters['fixed_intercept'] = True + self.analysator.parameters["fixed_intercept"] = True res = self.dataset.analyse(self.analysator) self.assertAlmostEqual(res.result[1], 1, 2) self.assertIsInstance(res.result, list) @@ -202,8 +213,8 @@ def test_fixed_offset_with_offset_zero(self): def test_fixed_offset_with_offset_non_zero(self): self.dataset.data.data = np.asarray([1.4, 2, 3, 4, 5, 6]) self.dataset.data.axes[0].values = np.asarray([0.5, 1, 2, 3, 4, 5]) - self.analysator.parameters['fixed_intercept'] = True - self.analysator.parameters['offset'] = 1 + self.analysator.parameters["fixed_intercept"] = True + self.analysator.parameters["offset"] = 1 res = self.dataset.analyse(self.analysator) self.assertAlmostEqual(res.result[1], 1, 1) self.assertEqual(res.result[0], 1) @@ -211,43 +222,50 @@ def test_fixed_offset_with_offset_non_zero(self): def test_fixed_offset_with_offset_zero_return_pol_coeffs(self): self.dataset.data.data = np.asarray([0.45, 1, 2, 3, 4, 5]) self.dataset.data.axes[0].values = np.asarray([0.5, 1, 2, 3, 4, 5]) - self.analysator.parameters['fixed_intercept'] = True - self.analysator.parameters['polynomial_coefficients'] = True + self.analysator.parameters["fixed_intercept"] = True + self.analysator.parameters["polynomial_coefficients"] = True res = self.dataset.analyse(self.analysator) self.assertIsInstance(res.result, list) def test_fit_returns_calculated_dataset_of_linear_slope(self): self.dataset.data.data = np.asarray([0.45, 1, 2, 3, 4, 5]) self.dataset.data.axes[0].values = np.asarray([0.5, 1, 2, 3, 4, 5]) - self.analysator.parameters['fixed_intercept'] = True - self.analysator.parameters['return_type'] = 'dataset' + self.analysator.parameters["fixed_intercept"] = True + self.analysator.parameters["return_type"] = "dataset" analysis = self.dataset.analyse(self.analysator) - self.assertEqual(aspecd.dataset.CalculatedDataset, - type(analysis.result)) - self.assertEqual(len(self.dataset.data.axes[0].values), - analysis.result.data.data.shape[0]) + self.assertEqual( + aspecd.dataset.CalculatedDataset, type(analysis.result) + ) + self.assertEqual( + len(self.dataset.data.axes[0].values), + analysis.result.data.data.shape[0], + ) def test_fit_returns_calculated_dataset_higher_order(self): - self.dataset.data.data = np.concatenate([np.linspace(1, 5, num=5), - np.linspace(5.1, 6.1, num=5)]) + self.dataset.data.data = np.concatenate( + [np.linspace(1, 5, num=5), np.linspace(5.1, 6.1, num=5)] + ) self.dataset.data.axes[0].values = np.linspace(1, 10, num=10) - self.analysator.parameters['return_type'] = 'dataset' + self.analysator.parameters["return_type"] = "dataset" analysis = self.dataset.analyse(self.analysator) - self.assertEqual(aspecd.dataset.CalculatedDataset, - type(analysis.result)) - self.assertEqual(len(self.dataset.data.axes[0].values), - analysis.result.data.data.shape[0]) + self.assertEqual( + aspecd.dataset.CalculatedDataset, type(analysis.result) + ) + self.assertEqual( + len(self.dataset.data.axes[0].values), + analysis.result.data.data.shape[0], + ) class TestPtpVsModAmp(TestCase): - def setUp(self): self.analysator = cwepr.analysis.PtpVsModAmp() self.dataset = cwepr.dataset.ExperimentalDataset() data = np.array([]) for sigma in np.linspace(5, 110): - data = np.append(data, np.gradient(scipy.signal.windows.gaussian( - 1000, sigma))) + data = np.append( + data, np.gradient(scipy.signal.windows.gaussian(1000, sigma)) + ) data = data.reshape(50, 1000).T self.dataset.data.data = data self.dataset.data.axes[1].values = np.linspace(0, 5, num=50) @@ -256,12 +274,13 @@ def test_instantiate_class(self): cwepr.analysis.PtpVsModAmp() def test_has_description(self): - self.assertNotIn('abstract', self.analysator.description.lower()) + self.assertNotIn("abstract", self.analysator.description.lower()) def test_perform_task(self): analysis = self.dataset.analyse(self.analysator) - self.assertEqual(aspecd.dataset.CalculatedDataset, - type(analysis.result)) + self.assertEqual( + aspecd.dataset.CalculatedDataset, type(analysis.result) + ) def test_result_has_no_values_in_last_axis(self): analysis = self.dataset.analyse(self.analysator) diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 075dd06..4cd0b2f 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -25,7 +25,9 @@ def setUp(self): pass def test_dataset_factory(self): - source = os.path.join(ROOTPATH, "io/testdata/test-bes3t-1D-fieldsweep") + source = os.path.join( + ROOTPATH, "io/testdata/test-bes3t-1D-fieldsweep" + ) factory = dataset.DatasetFactory() ds = factory.get_dataset(source=source) - assert(type(ds) == dataset.ExperimentalDataset) + assert type(ds) == dataset.ExperimentalDataset diff --git a/tests/test_plotting.py b/tests/test_plotting.py index 9780c0d..b39ab50 100644 --- a/tests/test_plotting.py +++ b/tests/test_plotting.py @@ -15,10 +15,11 @@ class TestGoniometerSweepPlotter(unittest.TestCase): def setUp(self): - self.filename = 'goniometertest.pdf' - source = os.path.join(ROOTPATH, 'io/testdata/magnettech-goniometer/') + self.filename = "goniometertest.pdf" + source = os.path.join(ROOTPATH, "io/testdata/magnettech-goniometer/") self.importer = cwepr.io.magnettech.GoniometerSweepImporter( - source=source) + source=source + ) self.dataset = cwepr.dataset.ExperimentalDataset() self.dataset.import_from(self.importer) @@ -37,179 +38,209 @@ def test_plotter_does_not_fail(self): class TestSinglePlotter1D(unittest.TestCase): - def setUp(self): self.plotter = cwepr.plotting.SinglePlotter1D() self.dataset = cwepr.dataset.ExperimentalDataset() self.dataset.data.data = np.random.random(5) - self.dataset.data.axes[0].quantity = 'magnetic field' - self.dataset.data.axes[0].unit = 'mT' - self.dataset.data.axes[1].quantity = 'intensity' - self.dataset.data.axes[1].unit = 'V' + self.dataset.data.axes[0].quantity = "magnetic field" + self.dataset.data.axes[0].unit = "mT" + self.dataset.data.axes[1].quantity = "intensity" + self.dataset.data.axes[1].unit = "V" self.plotter.dataset = self.dataset def test_has_g_axis_parameter(self): - self.assertTrue('g-axis' in self.plotter.parameters) + self.assertTrue("g-axis" in self.plotter.parameters) def test_g_axis_adds_secondary_axis(self): - self.plotter.parameters['g-axis'] = True + self.plotter.parameters["g-axis"] = True self.plotter.plot() secondary_axes = [ - child for child in self.plotter.ax.get_children() - if isinstance(child, matplotlib.axes._secondary_axes.SecondaryAxis) + child + for child in self.plotter.ax.get_children() + if isinstance( + child, matplotlib.axes._secondary_axes.SecondaryAxis + ) ] self.assertTrue(secondary_axes) def test_g_axis_has_correct_label(self): - self.plotter.parameters['g-axis'] = True + self.plotter.parameters["g-axis"] = True self.plotter.plot() secondary_axes = [ - child for child in self.plotter.ax.get_children() - if isinstance(child, matplotlib.axes._secondary_axes.SecondaryAxis) + child + for child in self.plotter.ax.get_children() + if isinstance( + child, matplotlib.axes._secondary_axes.SecondaryAxis + ) ] - self.assertIn('g\\ value', - secondary_axes[0].get_xaxis().get_label().get_text()) + self.assertIn( + "g\\ value", secondary_axes[0].get_xaxis().get_label().get_text() + ) class TestSinglePlotter2D(unittest.TestCase): - def setUp(self): self.plotter = cwepr.plotting.SinglePlotter2D() self.dataset = cwepr.dataset.ExperimentalDataset() self.dataset.data.data = np.random.random([5, 5]) - self.dataset.data.axes[0].quantity = 'magnetic field' - self.dataset.data.axes[0].unit = 'mT' - self.dataset.data.axes[1].quantity = 'time' - self.dataset.data.axes[1].unit = 's' - self.dataset.data.axes[2].quantity = 'intensity' - self.dataset.data.axes[2].unit = 'V' + self.dataset.data.axes[0].quantity = "magnetic field" + self.dataset.data.axes[0].unit = "mT" + self.dataset.data.axes[1].quantity = "time" + self.dataset.data.axes[1].unit = "s" + self.dataset.data.axes[2].quantity = "intensity" + self.dataset.data.axes[2].unit = "V" self.plotter.dataset = self.dataset def test_has_g_axis_parameter(self): - self.assertTrue('g-axis' in self.plotter.parameters) + self.assertTrue("g-axis" in self.plotter.parameters) def test_g_axis_adds_secondary_axis(self): - self.plotter.parameters['g-axis'] = True + self.plotter.parameters["g-axis"] = True self.plotter.plot() secondary_axes = [ - child for child in self.plotter.ax.get_children() - if isinstance(child, matplotlib.axes._secondary_axes.SecondaryAxis) + child + for child in self.plotter.ax.get_children() + if isinstance( + child, matplotlib.axes._secondary_axes.SecondaryAxis + ) ] self.assertTrue(secondary_axes) def test_g_axis_has_correct_label(self): - self.plotter.parameters['g-axis'] = True + self.plotter.parameters["g-axis"] = True self.plotter.plot() secondary_axes = [ - child for child in self.plotter.ax.get_children() - if isinstance(child, matplotlib.axes._secondary_axes.SecondaryAxis) + child + for child in self.plotter.ax.get_children() + if isinstance( + child, matplotlib.axes._secondary_axes.SecondaryAxis + ) ] - self.assertIn('g\\ value', - secondary_axes[0].get_xaxis().get_label().get_text()) + self.assertIn( + "g\\ value", secondary_axes[0].get_xaxis().get_label().get_text() + ) class TestSinglePlotter2DStacked(unittest.TestCase): - def setUp(self): self.plotter = cwepr.plotting.SinglePlotter2DStacked() self.dataset = cwepr.dataset.ExperimentalDataset() self.dataset.data.data = np.random.random([5, 5]) - self.dataset.data.axes[0].quantity = 'magnetic field' - self.dataset.data.axes[0].unit = 'mT' - self.dataset.data.axes[1].quantity = 'time' - self.dataset.data.axes[1].unit = 's' - self.dataset.data.axes[2].quantity = 'intensity' - self.dataset.data.axes[2].unit = 'V' + self.dataset.data.axes[0].quantity = "magnetic field" + self.dataset.data.axes[0].unit = "mT" + self.dataset.data.axes[1].quantity = "time" + self.dataset.data.axes[1].unit = "s" + self.dataset.data.axes[2].quantity = "intensity" + self.dataset.data.axes[2].unit = "V" self.plotter.dataset = self.dataset def test_has_g_axis_parameter(self): - self.assertTrue('g-axis' in self.plotter.parameters) + self.assertTrue("g-axis" in self.plotter.parameters) def test_g_axis_adds_secondary_axis(self): - self.plotter.parameters['g-axis'] = True + self.plotter.parameters["g-axis"] = True self.plotter.plot() secondary_axes = [ - child for child in self.plotter.ax.get_children() - if isinstance(child, matplotlib.axes._secondary_axes.SecondaryAxis) + child + for child in self.plotter.ax.get_children() + if isinstance( + child, matplotlib.axes._secondary_axes.SecondaryAxis + ) ] self.assertTrue(secondary_axes) def test_g_axis_has_correct_label(self): - self.plotter.parameters['g-axis'] = True + self.plotter.parameters["g-axis"] = True self.plotter.plot() secondary_axes = [ - child for child in self.plotter.ax.get_children() - if isinstance(child, matplotlib.axes._secondary_axes.SecondaryAxis) + child + for child in self.plotter.ax.get_children() + if isinstance( + child, matplotlib.axes._secondary_axes.SecondaryAxis + ) ] - self.assertIn('g\\ value', - secondary_axes[0].get_xaxis().get_label().get_text()) + self.assertIn( + "g\\ value", secondary_axes[0].get_xaxis().get_label().get_text() + ) class TestMultiPlotter1D(unittest.TestCase): - def setUp(self): self.plotter = cwepr.plotting.MultiPlotter1D() self.dataset = cwepr.dataset.ExperimentalDataset() self.dataset.data.data = np.random.random(5) - self.dataset.data.axes[0].quantity = 'magnetic field' - self.dataset.data.axes[0].unit = 'mT' - self.dataset.data.axes[1].quantity = 'intensity' - self.dataset.data.axes[1].unit = 'V' + self.dataset.data.axes[0].quantity = "magnetic field" + self.dataset.data.axes[0].unit = "mT" + self.dataset.data.axes[1].quantity = "intensity" + self.dataset.data.axes[1].unit = "V" self.plotter.datasets = [self.dataset] def test_has_g_axis_parameter(self): - self.assertTrue('g-axis' in self.plotter.parameters) + self.assertTrue("g-axis" in self.plotter.parameters) def test_g_axis_adds_secondary_axis(self): - self.plotter.parameters['g-axis'] = True + self.plotter.parameters["g-axis"] = True self.plotter.plot() secondary_axes = [ - child for child in self.plotter.ax.get_children() - if isinstance(child, matplotlib.axes._secondary_axes.SecondaryAxis) + child + for child in self.plotter.ax.get_children() + if isinstance( + child, matplotlib.axes._secondary_axes.SecondaryAxis + ) ] self.assertTrue(secondary_axes) def test_g_axis_has_correct_label(self): - self.plotter.parameters['g-axis'] = True + self.plotter.parameters["g-axis"] = True self.plotter.plot() secondary_axes = [ - child for child in self.plotter.ax.get_children() - if isinstance(child, matplotlib.axes._secondary_axes.SecondaryAxis) + child + for child in self.plotter.ax.get_children() + if isinstance( + child, matplotlib.axes._secondary_axes.SecondaryAxis + ) ] - self.assertIn('g\\ value', - secondary_axes[0].get_xaxis().get_label().get_text()) + self.assertIn( + "g\\ value", secondary_axes[0].get_xaxis().get_label().get_text() + ) class TestMultiPlotter1DStacked(unittest.TestCase): - def setUp(self): self.plotter = cwepr.plotting.MultiPlotter1DStacked() self.dataset = cwepr.dataset.ExperimentalDataset() self.dataset.data.data = np.random.random(5) - self.dataset.data.axes[0].quantity = 'magnetic field' - self.dataset.data.axes[0].unit = 'mT' - self.dataset.data.axes[1].quantity = 'intensity' - self.dataset.data.axes[1].unit = 'V' + self.dataset.data.axes[0].quantity = "magnetic field" + self.dataset.data.axes[0].unit = "mT" + self.dataset.data.axes[1].quantity = "intensity" + self.dataset.data.axes[1].unit = "V" self.plotter.datasets = [self.dataset] def test_has_g_axis_parameter(self): - self.assertTrue('g-axis' in self.plotter.parameters) + self.assertTrue("g-axis" in self.plotter.parameters) def test_g_axis_adds_secondary_axis(self): - self.plotter.parameters['g-axis'] = True + self.plotter.parameters["g-axis"] = True self.plotter.plot() secondary_axes = [ - child for child in self.plotter.ax.get_children() - if isinstance(child, matplotlib.axes._secondary_axes.SecondaryAxis) + child + for child in self.plotter.ax.get_children() + if isinstance( + child, matplotlib.axes._secondary_axes.SecondaryAxis + ) ] self.assertTrue(secondary_axes) def test_g_axis_has_correct_label(self): - self.plotter.parameters['g-axis'] = True + self.plotter.parameters["g-axis"] = True self.plotter.plot() secondary_axes = [ - child for child in self.plotter.ax.get_children() - if isinstance(child, matplotlib.axes._secondary_axes.SecondaryAxis) + child + for child in self.plotter.ax.get_children() + if isinstance( + child, matplotlib.axes._secondary_axes.SecondaryAxis + ) ] - self.assertIn('g\\ value', - secondary_axes[0].get_xaxis().get_label().get_text()) + self.assertIn( + "g\\ value", secondary_axes[0].get_xaxis().get_label().get_text() + ) diff --git a/tests/test_processing.py b/tests/test_processing.py index 911ecac..e225791 100644 --- a/tests/test_processing.py +++ b/tests/test_processing.py @@ -17,14 +17,14 @@ class TestFieldCorrection(unittest.TestCase): def setUp(self): - source = os.path.join(ROOTPATH, 'io/testdata/test-magnettech') + source = os.path.join(ROOTPATH, "io/testdata/test-magnettech") importer = cwepr.io.magnettech.MagnettechXMLImporter(source=source) self.dataset = cwepr.dataset.ExperimentalDataset() self.dataset.import_from(importer) def test_axis_is_updated(self): fc = cwepr.processing.FieldCorrection() - fc.parameters['offset'] = 10 + fc.parameters["offset"] = 10 axis_before = copy.deepcopy(self.dataset.data.axes[0].values) self.dataset.process(fc) axis_after = self.dataset.data.axes[0].values @@ -32,43 +32,49 @@ def test_axis_is_updated(self): def test_metadata_is_updated(self): fc = cwepr.processing.FieldCorrection() - fc.parameters['offset'] = 10 - start = copy.deepcopy(self.dataset.metadata.magnetic_field.start.value) + fc.parameters["offset"] = 10 + start = copy.deepcopy( + self.dataset.metadata.magnetic_field.start.value + ) stop = copy.deepcopy(self.dataset.metadata.magnetic_field.stop.value) self.dataset.process(fc) - self.assertGreater(self.dataset.metadata.magnetic_field.start.value, - start) - self.assertGreater(self.dataset.metadata.magnetic_field.stop.value, - stop) + self.assertGreater( + self.dataset.metadata.magnetic_field.start.value, start + ) + self.assertGreater( + self.dataset.metadata.magnetic_field.stop.value, stop + ) class TestAutomaticPhaseCorrection(unittest.TestCase): def setUp(self): - source = os.path.join(ROOTPATH, 'io/testdata/phase-45_noise0.000.txt') + source = os.path.join(ROOTPATH, "io/testdata/phase-45_noise0.000.txt") importer = cwepr.io.txt_file.TxtImporter(source=source) - importer.parameters['delimiter'] = '\t' + importer.parameters["delimiter"] = "\t" self.dataset = cwepr.dataset.ExperimentalDataset() self.dataset.import_from(importer) def test_analytic_signal_is_complex(self): apc = cwepr.processing.AutomaticPhaseCorrection() - apc.parameters['order'] = 1 - apc.parameters['points_percentage'] = 5 + apc.parameters["order"] = 1 + apc.parameters["points_percentage"] = 5 self.dataset.process(apc) self.assertTrue(np.iscomplex(apc._analytic_signal).all) def test_signal_before_and_after_differ(self): apc = cwepr.processing.AutomaticPhaseCorrection() - apc.parameters['order'] = 1 - apc.parameters['points_percentage'] = 20 + apc.parameters["order"] = 1 + apc.parameters["points_percentage"] = 20 dataset_old = copy.deepcopy(self.dataset) self.dataset.process(apc) - self.assertTrue((dataset_old.data.data != self.dataset.data.data).all()) + self.assertTrue( + (dataset_old.data.data != self.dataset.data.data).all() + ) class TestSubtraction(unittest.TestCase): def setUp(self): - source = os.path.join(ROOTPATH, 'io/testdata/test-magnettech') + source = os.path.join(ROOTPATH, "io/testdata/test-magnettech") importer = cwepr.io.magnettech.MagnettechXMLImporter(source=source) self.dataset = cwepr.dataset.ExperimentalDataset() self.dataset.import_from(importer) @@ -76,7 +82,7 @@ def setUp(self): class TestFrequencyCorrection(unittest.TestCase): def setUp(self): - source = os.path.join(ROOTPATH, 'io/testdata/test-magnettech') + source = os.path.join(ROOTPATH, "io/testdata/test-magnettech") importer = cwepr.io.magnettech.MagnettechXMLImporter(source=source) self.dataset = cwepr.dataset.ExperimentalDataset() self.dataset.import_from(importer) @@ -84,28 +90,28 @@ def setUp(self): def test_frequency_before_is_different_from_after(self): old_freq = copy.deepcopy(self.dataset.metadata.bridge.mw_frequency) - self.corrector.parameters['frequency'] = 9.5 + self.corrector.parameters["frequency"] = 9.5 self.dataset.process(self.corrector) new_freq = self.dataset.metadata.bridge.mw_frequency self.assertNotEqual(new_freq.value, old_freq.value) def test_frequency_given_as_int(self): old_freq = copy.deepcopy(self.dataset.metadata.bridge.mw_frequency) - self.corrector.parameters['frequency'] = 9 + self.corrector.parameters["frequency"] = 9 self.dataset.process(self.corrector) new_freq = self.dataset.metadata.bridge.mw_frequency self.assertNotEqual(new_freq.value, old_freq.value) def test_no_frequency_given(self): - self.dataset.metadata.bridge.mw_frequency = \ + self.dataset.metadata.bridge.mw_frequency = ( aspecd.metadata.PhysicalQuantity() + ) with self.assertRaises(aspecd.exceptions.NotApplicableToDatasetError): self.dataset.process(self.corrector) def test_magnetic_field_axis_is_different(self): - old_field_axis = copy.deepcopy( - self.dataset.data.axes[0].values) - self.corrector.parameters['frequency'] = 8. + old_field_axis = copy.deepcopy(self.dataset.data.axes[0].values) + self.corrector.parameters["frequency"] = 8.0 self.dataset.process(self.corrector) new_field_axis = self.dataset.data.axes[0].values diffs = old_field_axis - new_field_axis @@ -113,19 +119,17 @@ def test_magnetic_field_axis_is_different(self): self.assertFalse(all(conditions)) def test_magnetic_field_points_have_non_constant_offset(self): - old_field_axis = copy.deepcopy( - self.dataset.data.axes[0].values) - self.corrector.parameters['frequency'] = 8. + old_field_axis = copy.deepcopy(self.dataset.data.axes[0].values) + self.corrector.parameters["frequency"] = 8.0 self.dataset.process(self.corrector) new_field_axis = self.dataset.data.axes[0].values diffs = old_field_axis - new_field_axis self.assertTrue(diffs[0] != diffs[-1]) def test_correct_with_offset_writes_new_magnetic_field_values(self): - old_field_axis = copy.deepcopy( - self.dataset.data.axes[0].values) - self.corrector.parameters['frequency'] = 8.5 - self.corrector.parameters['kind'] = 'offset' + old_field_axis = copy.deepcopy(self.dataset.data.axes[0].values) + self.corrector.parameters["frequency"] = 8.5 + self.corrector.parameters["kind"] = "offset" self.dataset.process(self.corrector) new_field_axis = self.dataset.data.axes[0].values diffs = old_field_axis - new_field_axis @@ -133,10 +137,9 @@ def test_correct_with_offset_writes_new_magnetic_field_values(self): self.assertFalse(all(conditions)) def test_correct_with_offset_center_point_is_lower_than_before(self): - old_field_axis = copy.deepcopy( - self.dataset.data.axes[0].values) - self.corrector.parameters['frequency'] = 8.5 - self.corrector.parameters['kind'] = 'offset' + old_field_axis = copy.deepcopy(self.dataset.data.axes[0].values) + self.corrector.parameters["frequency"] = 8.5 + self.corrector.parameters["kind"] = "offset" self.dataset.process(self.corrector) new_field_axis = self.dataset.data.axes[0].values idx = round(len(old_field_axis) / 2) @@ -146,17 +149,16 @@ def test_correct_with_offset_center_point_is_lower_than_before(self): def test_correct_with_offset_writes_new_frequency(self): old_freq = copy.deepcopy(self.dataset.metadata.bridge.mw_frequency) - self.corrector.parameters['kind'] = 'offset' - self.corrector.parameters['frequency'] = 8.5 + self.corrector.parameters["kind"] = "offset" + self.corrector.parameters["frequency"] = 8.5 self.dataset.process(self.corrector) new_freq = self.dataset.metadata.bridge.mw_frequency self.assertNotEqual(new_freq.value, old_freq.value) def test_correct_with_offset_field_points_have_constant_offset(self): - old_field_axis = copy.deepcopy( - self.dataset.data.axes[0].values) - self.corrector.parameters['kind'] = 'offset' - self.corrector.parameters['frequency'] = 8.5 + old_field_axis = copy.deepcopy(self.dataset.data.axes[0].values) + self.corrector.parameters["kind"] = "offset" + self.corrector.parameters["frequency"] = 8.5 self.dataset.process(self.corrector) new_field_axis = self.dataset.data.axes[0].values diffs = old_field_axis - new_field_axis @@ -169,8 +171,8 @@ def setUp(self): self.dataset = cwepr.dataset.ExperimentalDataset() self.dataset.data.data = np.random.random(100) self.dataset.data.axes[0].values = np.linspace(300, 400, num=100) - self.dataset.data.axes[0].unit = 'mT' - self.dataset.data.axes[0].quantity = 'magnetic field' + self.dataset.data.axes[0].unit = "mT" + self.dataset.data.axes[0].quantity = "magnetic field" self.dataset.metadata.bridge.mw_frequency.value = 9.5 def test_instantiate_class(self): @@ -179,7 +181,7 @@ def test_instantiate_class(self): def test_description_is_appropriate(self): self.assertTrue(self.proc.description) - self.assertIn('magnetic field axis to g axis', self.proc.description) + self.assertIn("magnetic field axis to g axis", self.proc.description) def test_axis_values_differs_after(self): values_before = np.copy(self.dataset.data.axes[0].values) @@ -215,7 +217,7 @@ def test_instantiate_class(self): pass def test_interpolate_returns_new_axis(self): - source = os.path.join(ROOTPATH, 'io/testdata/test-magnettech') + source = os.path.join(ROOTPATH, "io/testdata/test-magnettech") importer = cwepr.io.magnettech.MagnettechXMLImporter(source=source) dataset = cwepr.dataset.ExperimentalDataset() dataset.import_from(importer) @@ -228,7 +230,7 @@ def test_interpolate_returns_new_axis(self): class TestNormalisationOfDerivativeToArea(unittest.TestCase): def setUp(self): - source = os.path.join(ROOTPATH, 'io/testdata/not_noisy_data') + source = os.path.join(ROOTPATH, "io/testdata/not_noisy_data") importer = cwepr.io.txt_file.TxtImporter(source=source) self.dataset = cwepr.dataset.ExperimentalDataset() self.dataset.import_from(importer) @@ -242,23 +244,24 @@ def test_normalisation_of_derivative_to_area(self): class TestNormalisation(unittest.TestCase): def setUp(self): - source = os.path.join(ROOTPATH, 'io/testdata/BDPA-1DFieldSweep') + source = os.path.join(ROOTPATH, "io/testdata/BDPA-1DFieldSweep") importer = cwepr.dataset.DatasetFactory() self.dataset = importer.get_dataset(source=source) def test_normalisation_to_receiver_gain(self): correction = cwepr.processing.Normalisation() - correction.parameters['kind'] = 'receiver_gain' + correction.parameters["kind"] = "receiver_gain" before = max(self.dataset.data.data) rg = 10 ** ( - self.dataset.metadata.signal_channel.receiver_gain.value / 20) + self.dataset.metadata.signal_channel.receiver_gain.value / 20 + ) self.dataset.process(correction) after = max(self.dataset.data.data) self.assertEqual(before / rg, after) def test_normalisation_to_scan_number(self): correction = cwepr.processing.Normalisation() - correction.parameters['kind'] = 'scan_number' + correction.parameters["kind"] = "scan_number" before = max(self.dataset.data.data) scans = self.dataset.metadata.signal_channel.accumulations self.dataset.process(correction) @@ -267,7 +270,7 @@ def test_normalisation_to_scan_number(self): def test_normalisation_to_maximum(self): correction = cwepr.processing.Normalisation() - correction.parameters['kind'] = 'max' + correction.parameters["kind"] = "max" before = np.max(self.dataset.data.data) max_ = max(self.dataset.data.data) self.dataset.process(correction) @@ -276,7 +279,7 @@ def test_normalisation_to_maximum(self): def test_normalisation_to_minimum(self): correction = cwepr.processing.Normalisation() - correction.parameters['kind'] = 'min' + correction.parameters["kind"] = "min" before = min(self.dataset.data.data) min_ = min(self.dataset.data.data) self.dataset.process(correction) diff --git a/tests/test_report.py b/tests/test_report.py index ca2e015..a9f76e8 100644 --- a/tests/test_report.py +++ b/tests/test_report.py @@ -9,66 +9,73 @@ import cwepr.report TEST_ROOTPATH = os.path.split(os.path.abspath(__file__))[0] -MODULE_ROOTPATH = os.path.split(os.path.split(os.path.abspath(__file__))[0])[0] +MODULE_ROOTPATH = os.path.split(os.path.split(os.path.abspath(__file__))[0])[ + 0 +] class TestExperimentalDatasetLaTeXReporter(unittest.TestCase): def setUp(self): - source = os.path.join(TEST_ROOTPATH, - "io/testdata/test-bes3t-1D-fieldsweep") + source = os.path.join( + TEST_ROOTPATH, "io/testdata/test-bes3t-1D-fieldsweep" + ) factory = cwepr.dataset.DatasetFactory() self.dataset = factory.get_dataset(source=source) analysator = cwepr.analysis.Amplitude() self.dataset.analyse(analysator) algebra = aspecd.processing.ScalarAlgebra() - algebra.parameters['kind'] = '+' - algebra.parameters['value'] = 10 - algebra.comment = 'Does this show up in the report?' + algebra.parameters["kind"] = "+" + algebra.parameters["value"] = 10 + algebra.comment = "Does this show up in the report?" self.dataset.process(algebra) - self.filename = 'test.tex' + self.filename = "test.tex" template_ = os.path.join( - MODULE_ROOTPATH, 'cwepr', 'templates', 'de', 'report.tex.jinja') - self.reporter = \ - cwepr.report.ExperimentalDatasetLaTeXReporter(template=template_, - filename=self.filename) - self.reporter.context['dataset'] = self.dataset.to_dict() + MODULE_ROOTPATH, "cwepr", "templates", "de", "report.tex.jinja" + ) + self.reporter = cwepr.report.ExperimentalDatasetLaTeXReporter( + template=template_, filename=self.filename + ) + self.reporter.context["dataset"] = self.dataset.to_dict() self.reporter.dataset = self.dataset def tearDown(self): if os.path.exists(self.filename): os.remove(self.filename) - if os.path.exists(self.filename.replace('tex', 'pdf')): - os.remove(self.filename.replace('tex', 'pdf')) + if os.path.exists(self.filename.replace("tex", "pdf")): + os.remove(self.filename.replace("tex", "pdf")) def test_get_tasks(self): self.reporter._get_tasks_recursively(self.dataset) def test_reporter(self): self.reporter.create() - #self.reporter.compile() + # self.reporter.compile() def test_to_dict_does_not_contain_dataset(self): dict_ = self.reporter.to_dict() - self.assertNotIn('dataset', dict_) + self.assertNotIn("dataset", dict_) class TestPowerSweepAnalysisReport(unittest.TestCase): def setUp(self): - self.recipe_filename = \ - os.path.join(TEST_ROOTPATH, 'io/testdata/power-sweep-analysis.yaml') - self.filename = 'PowerSweepReport.tex' - self.filename2 = 'PowerSweepAnalysis.pdf' + self.recipe_filename = os.path.join( + TEST_ROOTPATH, "io/testdata/power-sweep-analysis.yaml" + ) + self.filename = "PowerSweepReport.tex" + self.filename2 = "PowerSweepAnalysis.pdf" self.chef = aspecd.tasks.ChefDeService() def tearDown(self): if os.path.exists(self.filename): os.remove(self.filename) - for path in glob.glob(self.recipe_filename.replace('.yaml', '-*.yaml')): + for path in glob.glob( + self.recipe_filename.replace(".yaml", "-*.yaml") + ): os.remove(path) if os.path.exists(self.filename2): os.remove(self.filename2) - if os.path.exists(self.filename.replace('tex', 'pdf')): - os.remove(self.filename.replace('tex', 'pdf')) + if os.path.exists(self.filename.replace("tex", "pdf")): + os.remove(self.filename.replace("tex", "pdf")) def test_reporter(self): self.chef.serve(recipe_filename=self.recipe_filename) @@ -76,27 +83,29 @@ def test_reporter(self): def test_to_dict_does_not_contain_dataset(self): reporter = cwepr.report.PowerSweepAnalysisReporter() dict_ = reporter.to_dict() - self.assertNotIn('dataset', dict_) + self.assertNotIn("dataset", dict_) class TestModulationAmplitudeSweepAnalysisReport(unittest.TestCase): def setUp(self): - self.recipe_filename = \ - os.path.join(TEST_ROOTPATH, - 'io/testdata/modulation-amplitude-analysis.yaml') - self.tex_filename = 'ModAmpSweepReport.tex' - self.plot_filename = 'ModAmpSweepAnalysis.pdf' + self.recipe_filename = os.path.join( + TEST_ROOTPATH, "io/testdata/modulation-amplitude-analysis.yaml" + ) + self.tex_filename = "ModAmpSweepReport.tex" + self.plot_filename = "ModAmpSweepAnalysis.pdf" self.chef = aspecd.tasks.ChefDeService() def tearDown(self): if os.path.exists(self.tex_filename): os.remove(self.tex_filename) - for path in glob.glob(self.recipe_filename.replace('.yaml', '-*.yaml')): + for path in glob.glob( + self.recipe_filename.replace(".yaml", "-*.yaml") + ): os.remove(path) if os.path.exists(self.plot_filename): os.remove(self.plot_filename) - if os.path.exists(self.tex_filename.replace('tex', 'pdf')): - os.remove(self.tex_filename.replace('tex', 'pdf')) + if os.path.exists(self.tex_filename.replace("tex", "pdf")): + os.remove(self.tex_filename.replace("tex", "pdf")) def test_reporter(self): self.chef.serve(recipe_filename=self.recipe_filename) @@ -104,17 +113,20 @@ def test_reporter(self): class TestDokuwikiCaptionsReporter(unittest.TestCase): def setUp(self): - self.filename = 'Dokuwiki-caption.txt' + self.filename = "Dokuwiki-caption.txt" self.template_ = os.path.join( - MODULE_ROOTPATH, 'cwepr', 'templates', 'en', - 'DokuwikiCaption.txt.jinja') + MODULE_ROOTPATH, + "cwepr", + "templates", + "en", + "DokuwikiCaption.txt.jinja", + ) self.reporter = cwepr.report.DokuwikiCaptionsReporter() self.dataset = cwepr.dataset.ExperimentalDataset() - source = \ - os.path.join(TEST_ROOTPATH, "io/testdata/test-magnettech") + source = os.path.join(TEST_ROOTPATH, "io/testdata/test-magnettech") factory = cwepr.dataset.DatasetFactory() self.dataset = factory.get_dataset(source=source) - self.reporter.context['dataset'] = self.dataset.to_dict() + self.reporter.context["dataset"] = self.dataset.to_dict() def tearDown(self): if os.path.exists(self.filename): @@ -133,21 +145,23 @@ def test_reporter_without_template_filename(self): def test_to_dict_does_not_contain_dataset(self): dict_ = self.reporter.to_dict() - self.assertNotIn('dataset', dict_) + self.assertNotIn("dataset", dict_) class InfofileReporterBES3T(unittest.TestCase): def setUp(self): - self.filename = 'MyInfofile.info' + self.filename = "MyInfofile.info" self.template_ = os.path.join( - MODULE_ROOTPATH, 'cwepr', 'templates', 'en', 'Infofile.info.jinja') + MODULE_ROOTPATH, "cwepr", "templates", "en", "Infofile.info.jinja" + ) self.reporter = cwepr.report.InfofileReporter() self.dataset = cwepr.dataset.ExperimentalDataset() - source = \ - os.path.join(TEST_ROOTPATH, "io/testdata/test-bes3t-1D-fieldsweep") + source = os.path.join( + TEST_ROOTPATH, "io/testdata/test-bes3t-1D-fieldsweep" + ) factory = cwepr.dataset.DatasetFactory() self.dataset = factory.get_dataset(source=source) - self.reporter.context['dataset'] = self.dataset.to_dict() + self.reporter.context["dataset"] = self.dataset.to_dict() def tearDown(self): if os.path.exists(self.filename): @@ -161,21 +175,24 @@ def test_reporter(self): def test_to_dict_does_not_contain_dataset(self): dict_ = self.reporter.to_dict() - self.assertNotIn('dataset', dict_) + self.assertNotIn("dataset", dict_) class InfofileReporterMagnettech(unittest.TestCase): def setUp(self): - self.filename = 'MyInfofile.info' + self.filename = "MyInfofile.info" self.template_ = os.path.join( - MODULE_ROOTPATH, 'cwepr', 'templates', 'en', 'Infofile.info.jinja') + MODULE_ROOTPATH, "cwepr", "templates", "en", "Infofile.info.jinja" + ) self.reporter = cwepr.report.InfofileReporter() self.dataset = cwepr.dataset.ExperimentalDataset() - self.source = \ - os.path.join(TEST_ROOTPATH, "io/testdata/test-magnettech") + self.source = os.path.join( + TEST_ROOTPATH, "io/testdata/test-magnettech" + ) self.factory = cwepr.dataset.DatasetFactory() - self.source2 = \ - os.path.join(TEST_ROOTPATH, "io/testdata/magnettech-goniometer/") + self.source2 = os.path.join( + TEST_ROOTPATH, "io/testdata/magnettech-goniometer/" + ) def tearDown(self): if os.path.exists(self.filename): @@ -185,7 +202,7 @@ def test_reporter(self): for source in (self.source, self.source2): with self.subTest(source=source): self.dataset = self.factory.get_dataset(source=source) - self.reporter.context['dataset'] = self.dataset.to_dict() + self.reporter.context["dataset"] = self.dataset.to_dict() self.reporter.filename = self.filename self.reporter.template = self.template_ self.reporter.create() diff --git a/tests/test_utils.py b/tests/test_utils.py index 1a3e97f..f25765b 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -6,7 +6,6 @@ class TestConvertmT2g(unittest.TestCase): - def test_values_are_positive(self): values = np.linspace(340, 350, 100) mw_freq = 9.5 @@ -15,13 +14,13 @@ def test_values_are_positive(self): def test_values_have_correct_range(self): values = np.linspace(340, 350, 100) mw_freq = 9.5 - condition = \ - (np.floor(np.log10(utils.convert_mT2g(values, mw_freq))) == 0) + condition = ( + np.floor(np.log10(utils.convert_mT2g(values, mw_freq))) == 0 + ) self.assertTrue(all(condition)) class TestConvertg2mT(unittest.TestCase): - def test_values_are_positive(self): values = np.linspace(1.8, 4, 100) mw_freq = 9.5 @@ -30,19 +29,18 @@ def test_values_are_positive(self): def test_values_have_correct_range(self): values = np.linspace(1.8, 4, 100) mw_freq = 9.5 - condition = \ - (np.floor(np.log10(utils.convert_g2mT(values, mw_freq))) == 2) + condition = ( + np.floor(np.log10(utils.convert_g2mT(values, mw_freq))) == 2 + ) self.assertTrue(all(condition)) class TestNotZero(unittest.TestCase): - def test_not_zero_of_zero_returns_nonzero_value(self): self.assertGreater(utils.not_zero(0), 0) def test_not_zero_of_zero_returns_np_float_resolution(self): - self.assertEqual(np.finfo(np.float64).resolution, - utils.not_zero(0)) + self.assertEqual(np.finfo(np.float64).resolution, utils.not_zero(0)) def test_not_zero_of_positive_value_preserves_sign(self): self.assertGreater(utils.not_zero(1e-20), 0) @@ -51,5 +49,6 @@ def test_not_zero_of_negative_value_preserves_sign(self): self.assertLess(utils.not_zero(-1e-20), 0) def test_not_zero_of_negative_value_closer_than_limit_returns_limit(self): - self.assertEqual(-np.finfo(np.float64).resolution, - utils.not_zero(-1e-20)) + self.assertEqual( + -np.finfo(np.float64).resolution, utils.not_zero(-1e-20) + ) From f0c6d2739b79f778e0ec6bbcc857d58e2cea2f05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mirjam=20Schr=C3=B6der?= Date: Sat, 13 Jan 2024 22:33:14 +0100 Subject: [PATCH 08/16] Ignore black run --- .git-blame-ignore-revs | 2 ++ VERSION | 2 +- setup.py | 5 ++++- 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 .git-blame-ignore-revs diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000..9dedb14 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# First black run on all code +7cd00161dece9ce2a1d9387bd48e569f59f851bf diff --git a/VERSION b/VERSION index 42e4fe3..befc43f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev7 +0.6.0.dev8 diff --git a/setup.py b/setup.py index 1e12efa..c521ead 100644 --- a/setup.py +++ b/setup.py @@ -53,7 +53,10 @@ "matplotlib", ], extras_require={ - "dev": ["prospector[with_pyroma]"], + "dev": [ + "prospector[with_pyroma]", + "black", + ], "docs": [ "sphinx", "sphinx-rtd-theme", From 146c804bcd3725e9e3cf3b85b8dc9d255ee24c1b Mon Sep 17 00:00:00 2001 From: Till Biskup Date: Sun, 14 Jan 2024 12:23:32 +0100 Subject: [PATCH 09/16] Update prospector config to comply with Black code formatting --- .prospector.yaml | 7 +++++++ VERSION | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.prospector.yaml b/.prospector.yaml index b322cfd..4492276 100644 --- a/.prospector.yaml +++ b/.prospector.yaml @@ -21,6 +21,13 @@ pylint: max-attributes: 12 max-module-lines: 3000 +pycodestyle: + disable: + - E203 + - W503 + enable: + - W504 + pyroma: run: true diff --git a/VERSION b/VERSION index befc43f..6ab3068 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev8 +0.6.0.dev9 From d992b362f34a04bc550465e4a1671a3e8a4af780 Mon Sep 17 00:00:00 2001 From: Till Biskup Date: Sun, 18 Feb 2024 20:58:50 +0100 Subject: [PATCH 10/16] ESPWinEPRImporter: Additional condition for WinEPR files --- VERSION | 2 +- cwepr/io/esp_winepr.py | 7 ++++++- docs/changelog.rst | 12 ++++++++++++ 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index 6ab3068..cf6e9e7 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev9 +0.6.0.dev10 diff --git a/cwepr/io/esp_winepr.py b/cwepr/io/esp_winepr.py index f5cb509..ada9a5a 100644 --- a/cwepr/io/esp_winepr.py +++ b/cwepr/io/esp_winepr.py @@ -47,6 +47,9 @@ class ESPWinEPRImporter(aspecd.io.DatasetImporter): .. versionadded:: 0.2 + .. versionchanged:: 0.5.1 + Additional condition for WinEPR files + """ def __init__(self, source=None): @@ -112,7 +115,9 @@ def _import_data(self): self.dataset.data.data = raw_data def _get_file_encoding(self): - if ("DOS", "Format") in self._par_dict.items(): + if (("DOS", "Format") in self._par_dict.items() + or ("ASCII", "Format") in self._par_dict.items() + ): self._file_encoding = " Date: Sun, 18 Feb 2024 22:42:28 +0100 Subject: [PATCH 11/16] Add format parameter to ESPWinEPRImporter. The official format specification does not allow for discriminating the two formats, hence we always rely on informed guessing. If that fails, users can explicitly provide the format by themselves. --- VERSION | 2 +- cwepr/io/esp_winepr.py | 121 ++++++++++++++++++++++++++++++++++-- docs/changelog.rst | 1 + tests/io/test_esp_winepr.py | 30 +++++++++ 4 files changed, 148 insertions(+), 6 deletions(-) diff --git a/VERSION b/VERSION index cf6e9e7..3a776c1 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev10 +0.6.0.dev11 diff --git a/cwepr/io/esp_winepr.py b/cwepr/io/esp_winepr.py index ada9a5a..2cac360 100644 --- a/cwepr/io/esp_winepr.py +++ b/cwepr/io/esp_winepr.py @@ -27,7 +27,9 @@ class ESPWinEPRImporter(aspecd.io.DatasetImporter): - """Importer for the Bruker ESP and EMX formats. + # noinspection PyUnresolvedReferences + """ + Importer for the Bruker ESP and EMX formats. The Bruker EMX and ESP formats consist of two files, a data file with extension "spc" and a parameter file with extension "par". The @@ -45,15 +47,117 @@ class ESPWinEPRImporter(aspecd.io.DatasetImporter): values specified in the parameter file. + Attributes + ---------- + parameters : :class:`dict` + Additional parameters to control import options. + + format : :class:`str` + Identifier of the file format. + + Possible values are ``WinEPR``, ``ESP``, ``auto`` + + Note: When setting the values explicitly before importing data, + they are case-insensitive. But they will always be set to + either of the two values shown above upon data import. + + There are two file formats in use with Bruker spectrometers + that have (nearly) the same parameter files, but entirely + different binary data files: The ESP and WinEPR (old EMX) + formats, named after the respective spectrometer series. + + The importer does its best to automatically detect the format + for you. However, as the official file format specification + does not allow for such discrimination, these are necessarily + informed guesses. Sometimes, you need to help the importer by + explicitly stating which format you have at hand, Use this + parameter in such cases. + + The parameter will be set after importing the data file, + hence in retrospect you can always figure out which format has + been detected. + + + Examples + -------- + Usually, you will use the importer implicitly when cooking a recipe. + And in most cases, both, the overall file format as well as the + special format (WinEPR or ESP) should be detected automatically for + you. In such case, implicitly using the importer means just importing + datasets: + + .. code-block:: yaml + + datasets: + - winepr + + + However, if you happen to have a dataset where the importer + unfortunately fails with auto-detecting and discriminating between + WinEPR and ESP formats, you may explicitly provide the format to use: + + .. code-block:: yaml + + datasets: + - source: winepr + importer: ESPWinEPRImporter + importer_parameters: + format: WinEPR + + For convenience, the format specifier is case-insensitive when set as + parameter here. Note, however, that in the resulting history of the + recipe, it is always written in the way specified above, + see :attr:`parameters` for details. + + .. note:: + + How do you know if the importer failed? Quite simple: Your + resulting "spectrum" looks like garbage, but does not resemble an + EPR spectrum at all. Typically, you will have sort of a step + function with random oscillation between rather discrete values. + + If you want to explicitly tell the importer to auto-detect the format, + *i.e.* discriminate between WinEPR and ESP formats -- that is the + default behaviour anyway -- you may do something like this: + + .. code-block:: yaml + + datasets: + - source: winepr + importer: ESPWinEPRImporter + importer_parameters: + format: auto + + Just to mention: All examples so far have omitted the file extension. + This is fine, as long as you do *not* have additional ASCII exports + with the same file basename in the same directory, as in this case, + the :class:`cwepr.io.factory.DatasetImporterFactory` will get confused. + In such cases, you need to provide at least one of the two possible + file extensions (``par``, ``spc``) explicitly: + + .. code-block:: yaml + + datasets: + - winepr.par + + This would be equivalent to: + + .. code-block:: yaml + + datasets: + - winepr.spc + + .. versionadded:: 0.2 .. versionchanged:: 0.5.1 - Additional condition for WinEPR files + Additional condition for WinEPR files; additional parameter ``format`` """ def __init__(self, source=None): super().__init__(source=source) + self.parameters["format"] = "auto" self.load_infofile = True # private properties self._infofile = aspecd.infofile.Infofile() @@ -115,12 +219,19 @@ def _import_data(self): self.dataset.data.data = raw_data def _get_file_encoding(self): - if (("DOS", "Format") in self._par_dict.items() - or ("ASCII", "Format") in self._par_dict.items() - ): + if self.parameters["format"].lower() == "auto": + if (("DOS", "Format") in self._par_dict.items() + or ("ASCII", "Format") in self._par_dict.items() + ): + self.parameters["format"] = "WinEPR" + else: + self.parameters["format"] = "ESP" + if self.parameters["format"].lower() == "winepr": self._file_encoding = " Date: Sun, 18 Feb 2024 22:53:14 +0100 Subject: [PATCH 12/16] Add todos to documentation --- VERSION | 2 +- cwepr/io/esp_winepr.py | 22 ++++++++++++++++++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/VERSION b/VERSION index 3a776c1..92bf080 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev11 +0.6.0.dev12 diff --git a/cwepr/io/esp_winepr.py b/cwepr/io/esp_winepr.py index 2cac360..379fa2e 100644 --- a/cwepr/io/esp_winepr.py +++ b/cwepr/io/esp_winepr.py @@ -6,9 +6,27 @@ A bit of a problem with these two formats is that they are quite similar, but not the same. Namely the format of the file containing the data in -binary representation is completely different. The way to tell those +binary representation is completely different. One way to tell those two formats apart is to import the ``.par`` file and look, if it contains -``DOS Format`` in the first line. +``DOS Format`` (or alternatively, ``ASCII Format``) in the first line. +However, this is just a workaround, as the official format specification +does *not* allow for any clear discrimination between the two. + +.. todo:: + There might be a way, though, to unequivocally discriminate between + the two formats: The number of field points seems to get written to + the parameter ``RES``. And after importing the binary data, we know + how many field points we have. They should differ for the two types of + binaries (by a factor of two). Just be aware that the ``RES`` + parameter may not be present in the par file if it has not been + changed from the "default value". Hence, we need to *first* read the + default parameters, overwrite those that are contained in the par + file, and only afterwards check for the correct length of the + resulting data vector. + +.. todo:: + Add a bit more details on the file format and all its peculiarities, + essentially documenting the specification. """ import glob From df4e9e0693db2279077ca275b8b8ab2e66257465 Mon Sep 17 00:00:00 2001 From: Till Biskup Date: Wed, 21 Feb 2024 19:28:35 +0100 Subject: [PATCH 13/16] Update documentation --- VERSION | 2 +- cwepr/io/__init__.py | 140 ++++++++++++++++++++++++++++++++++++++++-- docs/api/cwepr.io.rst | 1 + 3 files changed, 137 insertions(+), 6 deletions(-) diff --git a/VERSION b/VERSION index 92bf080..784616c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev12 +0.6.0.dev13 diff --git a/cwepr/io/__init__.py b/cwepr/io/__init__.py index 38f2763..c5c92bd 100644 --- a/cwepr/io/__init__.py +++ b/cwepr/io/__init__.py @@ -1,8 +1,11 @@ """ -Subpackage for IO. +Input and output (IO) of information from and to the persistence layer. -Generally, for each file format (or class of formats), the importer resides -in a separate module. + +.. _sec-supported_file_formats: + +Supported file formats +====================== Currently, a number of more or less well-known and widely used EPR file formats are supported: @@ -18,14 +21,141 @@ Generic text file txt :mod:`cwepr.io.txt_file` =========================== ============= ========================== +For further details, you may have a look at the module documentation of the +individual importer. + + +General usage +============= + +A normal user of the cwepr package will not need to interact explicitly and +directly with the importers, as they are using recipe-driven data analysis. +See the :doc:`use cases section <../usecases>` for an introduction. Here, +importing data is a matter of specifying a list of datasets: + +.. code-block:: yaml + + datasets: + - /path/to/my/first/dataset + - /path/to/my/second/dataset + +The cwepr package will take care of finding the correct importer for you, +thanks to using the :cwepr.io.factory.DatasetImporterFactory` class. + +Nevertheless, sometimes you would like to have a bit more control over the +imported datasets, directly setting IDs for referring to the individual +datasets from within the recipe and a label that would show up, *i.a.*, +in the legend of plots: + +.. code-block:: yaml + + datasets: + - source: /path/to/my/first/dataset + label: cool dataset + id: data + - source: /path/to/my/second/dataset + label: resonator background + id: background + +It should be quite obvious what happens here. Nevertheless, a bit of an +explanation: + +* The list of paths got transformed into a list of dictionaries, if you + like, *i.e.*, key--value pairs, for each dataset. + +* The path to the file needs to be given as ``source`` attribute. + +* The ``label`` attribute allows to set a label used, *i.a.*, in figure + legends. + +* The ``id`` allows to set a short, memorable name for the dataset you can + use to refer to it from within the recipe. This is quite useful + particularly with longer and more complicated paths. + + +In rare cases, you may have the need of explicitly controlling the importer +used to import your data, and perhaps provide some additional parameters to +the importer as well. Typical use cases would be importing text files: + +.. code-block:: yaml + + datasets: + - source: eprdata.txt + importer_parameters: + delimiter: '\t' + separator: ',' + skiprows: 3 + comments: '%' + +Here, the delimiter between columns is the tabulator, the decimal +separator the comma, the first three lines are skipped by default as well +as every line starting with a percent character, as this is interpreted as +comment. + +A frequent use case is importing simulations that were carried out with +EasySpin. A MATLAB excerpt for saving the simulated spectrum might look +as follows: + + +.. code-block:: matlab + + [B_sim_iso, Spc_sim_iso] = garlic(Sys, Exp); + + data = [B_sim_iso', Spc_sim_iso']; + writematrix(data, 'Simulated-spectrum') + + +Read in the simulated spectrum with: + +.. code-block:: yaml + + datasets: + - source: Simulated-spectrum.txt + id: simulation + importer: TxtImporter + importer_parameters: + delimiter: ',' + + +Have a look at the documentation of the individual importer modules, +as well as the documentation of the ASpecD module: :mod:`aspecd.io`. + + +Metadata +======== + +Data without context, read: metadata, are usually useless. Hence, +it is strongly recommended to provide appropriate metadata for your EPR +data. Have a look at the :doc:`section on metadata <../metadata>` in the +general user documentation of the cwepr package. + + +.. important:: + + All importers implemented in the cwepr package should automatically read + Infofiles if they are present. Hence, this is a comparably easy and + straight-forward way to collect metadata during data acquisition in a + machine-readable way and to have those metadata accessible from within + the cwepr package. + + +Organisation +============ + +Generally, for each file format (or class of formats), the importer resides +in a separate module. This is due to the rather complicated nature of some +importers (or, more exactly, the underlying file format). For details of +the available importers, have a look at the :ref:`supported file formats +section ` above. + In addition to modules for the individual data file formats, there are a series of more general modules: -* factory +* :mod:`cwepr.io.factory` Factory classes, currently the DatasetImporterFactory -* exporter +* :mod:`cwepr.io.exporter` Exporters, currently only an ASCII exporter diff --git a/docs/api/cwepr.io.rst b/docs/api/cwepr.io.rst index 1edec6a..03cee6a 100644 --- a/docs/api/cwepr.io.rst +++ b/docs/api/cwepr.io.rst @@ -8,6 +8,7 @@ cwepr.io package .. toctree:: :maxdepth: 1 + :hidden: cwepr.io.factory cwepr.io.bes3t From d3c260e9fb64fab2c4b9cb7d58e05f7d8ca0424e Mon Sep 17 00:00:00 2001 From: Till Biskup Date: Sat, 24 Feb 2024 21:34:11 +0100 Subject: [PATCH 14/16] Black run --- VERSION | 2 +- cwepr/analysis.py | 6 +++--- cwepr/io/esp_winepr.py | 14 ++++++++------ cwepr/io/exporter.py | 1 + cwepr/io/factory.py | 1 + cwepr/io/magnettech.py | 1 + cwepr/io/niehs.py | 1 + cwepr/metadata.py | 1 + cwepr/plotting.py | 1 + cwepr/processing.py | 1 + 10 files changed, 19 insertions(+), 10 deletions(-) diff --git a/VERSION b/VERSION index 784616c..8714de3 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev13 +0.6.0.dev14 diff --git a/cwepr/analysis.py b/cwepr/analysis.py index bfde1ec..98522c7 100644 --- a/cwepr/analysis.py +++ b/cwepr/analysis.py @@ -308,9 +308,9 @@ def _perform_task(self): def _assign_parameters(self): if not self.parameters["mw_frequency"]: - self.parameters[ - "mw_frequency" - ] = self.dataset.metadata.bridge.mw_frequency.value + self.parameters["mw_frequency"] = ( + self.dataset.metadata.bridge.mw_frequency.value + ) if not self.parameters["g_value"]: self.parameters["g_value"] = self.g_values[ self.parameters["standard"].lower() diff --git a/cwepr/io/esp_winepr.py b/cwepr/io/esp_winepr.py index 379fa2e..3368fc1 100644 --- a/cwepr/io/esp_winepr.py +++ b/cwepr/io/esp_winepr.py @@ -29,6 +29,7 @@ essentially documenting the specification. """ + import glob import os import re @@ -238,9 +239,10 @@ def _import_data(self): def _get_file_encoding(self): if self.parameters["format"].lower() == "auto": - if (("DOS", "Format") in self._par_dict.items() - or ("ASCII", "Format") in self._par_dict.items() - ): + if ("DOS", "Format") in self._par_dict.items() or ( + "ASCII", + "Format", + ) in self._par_dict.items(): self.parameters["format"] = "WinEPR" else: self.parameters["format"] = "ESP" @@ -396,9 +398,9 @@ def _ensure_common_units(self): def _fill_axes(self): self._get_magnetic_field_axis() self.dataset.data.axes[0].quantity = "magnetic field" - self.dataset.data.axes[ - 0 - ].unit = self.dataset.metadata.magnetic_field.start.unit + self.dataset.data.axes[0].unit = ( + self.dataset.metadata.magnetic_field.start.unit + ) self.dataset.data.axes[-1].quantity = "intensity" def _get_magnetic_field_axis(self): diff --git a/cwepr/io/exporter.py b/cwepr/io/exporter.py index 5b8c4b0..fd65b3d 100644 --- a/cwepr/io/exporter.py +++ b/cwepr/io/exporter.py @@ -21,6 +21,7 @@ If you want to export data to a txt-file, you might want to use the :class:`aspecd.io.TxtExporter`. """ + import collections import datetime diff --git a/cwepr/io/factory.py b/cwepr/io/factory.py index 528091c..036abcd 100644 --- a/cwepr/io/factory.py +++ b/cwepr/io/factory.py @@ -12,6 +12,7 @@ need to explicitly provide either the file format or an importer. """ + import os.path import aspecd.io diff --git a/cwepr/io/magnettech.py b/cwepr/io/magnettech.py index 1d08f7a..c7b2f4b 100644 --- a/cwepr/io/magnettech.py +++ b/cwepr/io/magnettech.py @@ -31,6 +31,7 @@ importers for other types of two-dimensional datasets is planned for the future. """ + import base64 import glob import logging diff --git a/cwepr/io/niehs.py b/cwepr/io/niehs.py index f716559..c1f083b 100644 --- a/cwepr/io/niehs.py +++ b/cwepr/io/niehs.py @@ -385,6 +385,7 @@ ==================== """ + import io import struct diff --git a/cwepr/metadata.py b/cwepr/metadata.py index f2c9d40..be8aa29 100644 --- a/cwepr/metadata.py +++ b/cwepr/metadata.py @@ -28,6 +28,7 @@ ==================== """ + import aspecd.metadata import aspecd.utils diff --git a/cwepr/plotting.py b/cwepr/plotting.py index 894b567..64ffcba 100644 --- a/cwepr/plotting.py +++ b/cwepr/plotting.py @@ -153,6 +153,7 @@ class :class:`PlotterExtensions` that can be used as a mixin class for other ==================== """ + import copy import numpy as np diff --git a/cwepr/processing.py b/cwepr/processing.py index 3198daf..de2ef7a 100644 --- a/cwepr/processing.py +++ b/cwepr/processing.py @@ -422,6 +422,7 @@ What follows is the API documentation of each class implemented in this module. """ + import warnings import numpy as np From dc70354a87a1da8e315b5cab55e68a7b8077615f Mon Sep 17 00:00:00 2001 From: Till Biskup Date: Sat, 24 Feb 2024 21:35:27 +0100 Subject: [PATCH 15/16] Ignore black run --- .git-blame-ignore-revs | 2 ++ VERSION | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 9dedb14..cc71be9 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,2 +1,4 @@ # First black run on all code 7cd00161dece9ce2a1d9387bd48e569f59f851bf +# Second black run +d3c260e9fb64fab2c4b9cb7d58e05f7d8ca0424e \ No newline at end of file diff --git a/VERSION b/VERSION index 8714de3..9a1dcb3 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev14 +0.6.0.dev15 From 4daea9a87d846f03dec6fb48cfb35b01518b2494 Mon Sep 17 00:00:00 2001 From: Till Biskup Date: Sat, 24 Feb 2024 22:27:25 +0100 Subject: [PATCH 16/16] Update format description and roadmap --- VERSION | 2 +- cwepr/io/esp_winepr.py | 172 +++++++++++++++++++++++++++++++++++------ docs/roadmap.rst | 4 +- 3 files changed, 153 insertions(+), 25 deletions(-) diff --git a/VERSION b/VERSION index 9a1dcb3..36dd850 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.6.0.dev15 +0.6.0.dev16 diff --git a/cwepr/io/esp_winepr.py b/cwepr/io/esp_winepr.py index 3368fc1..5c11f20 100644 --- a/cwepr/io/esp_winepr.py +++ b/cwepr/io/esp_winepr.py @@ -2,31 +2,159 @@ Importer for the Bruker EMX and ESP format. The Bruker EMX and ESP formats are used by older Bruker EPR spectrometers, -namely old EMX spectrometers running WinEPR and the ESP line of spectrometers. +namely old EMX spectrometers running WinEPR and the ESP (and ECS) series of +spectrometers. A bit of a problem with these two formats is that they are quite similar, but not the same. Namely the format of the file containing the data in -binary representation is completely different. One way to tell those -two formats apart is to import the ``.par`` file and look, if it contains -``DOS Format`` (or alternatively, ``ASCII Format``) in the first line. -However, this is just a workaround, as the official format specification -does *not* allow for any clear discrimination between the two. - -.. todo:: - There might be a way, though, to unequivocally discriminate between - the two formats: The number of field points seems to get written to - the parameter ``RES``. And after importing the binary data, we know - how many field points we have. They should differ for the two types of - binaries (by a factor of two). Just be aware that the ``RES`` - parameter may not be present in the par file if it has not been - changed from the "default value". Hence, we need to *first* read the - default parameters, overwrite those that are contained in the par - file, and only afterwards check for the correct length of the - resulting data vector. - -.. todo:: - Add a bit more details on the file format and all its peculiarities, - essentially documenting the specification. +binary representation is completely different: + +===================== =============================== +Spectrometer Binary encoding +===================== =============================== +Bruker EMX / WinEPR 4 byte floating point +Bruker ESP (and ECS) 4 byte integer Motorola format +===================== =============================== + +One way to tell those two formats apart is to import the ``.par`` file and +look, if it contains ``DOS Format`` (or alternatively, ``ASCII Format``) in +the first line. However, this is just a workaround, as the official format +specification does *not* allow for any clear discrimination between the two. + +Eventually, the proof of the pudding is the eating: graphical representation +of the imported data will immediately tell whether the importer chose the +correct format: Either way, the wrong interpretation of the binary data will +produce "garbage" a human can easily tell apart from an EPR spectrum (and +even if it only contained spectrometer noise). Hence, if something goes +wrong, you can explicitly provide the (correct) format to use. See the +documentation of the :class:`ESPWinEPRImporter` class for details. + + +Format documentation +==================== + +Generally, the format consists of two files per each measurement, a binary +spectrum file with ``spc`` extension (and different binary encoding, +as mentioned above) and an ASCII parameter file with ``par`` extension that +should be the same for both binary formats, technically speaking. However, +EMX spectrometers operated by WinEPR tend to add parameters to the parameter +file that are *not* described in the format specification, but *may* be used +to discriminate between EMX/WinEPR and ESP formats. + +Just to make life simpler, a parameter file usually contains *only* those +parameters that deviate from what Bruker defined as "default" value. Those +default values are tabulated in the specification of the file format. A +particularly lovely quote from the specification, regarding defining the *x* +axis of your data: + + Definition of the x-axis can be very tricky because of instrument + offsets, *etc.* To make sure that the x-axis is represented correctly, + you should always use the parameters GST (start value) and GSI (sweep + size) and do not use HCF (center field) and HSW (sweep width). + +Following is the complete list of parameters, together with their default +values and their meaning, as given in the official specification: + +======= ================ ========================================================= +Keyword Default Value Definition +======= ================ ========================================================= +JSS 0 spectrum status word +JON operator name +JRE resonator name +JDA date of acquisition +JTM time of acquisition +JCO comment +JUN Gauss units (Gauss/Mhz/sec/...) +JNS 1 Scans to do +JSD 0 Scans done +JEX EPR Type of experiment +JAR ADD Mode (Add/Replace) +GST 3.455000e+03 left border of display +GSI 5.000000e+01 width of display +TE -l.000000e+00 temperature (-1 means not set by software) +HCF 3.480006e+03 ER032M center field +HSW 5.000000e+01 ER032M sweep width +NGA -1 ER035M gaussmeter address (-1 means not connected) +NOF 0.000000e+00 ER035M gaussmeter field offset +MF -1.000000e+00 Microwave frequency (-1 means no input made) +MP -1.000000e+00 Microwave power +MCA -1 Microwave counter address (-1 means no counter connected) +RMA 1.000000e+00 ER023M modulation amplitude [Gauss] +RRG 2.000000e+04 ER023M receiver gain +RPH 0 ER023M phase +ROF 0 ER023M offset +RCT 5.120000e+00 ER023M conversion time +RTC 1.280000e+00 ER023M time constant +RMF 1.000000e+02 ER023M modulation frequency [kHz] +RHA 1 ER023M harmonic +RRE 1 ER023M resonator +RES 1024 resolution of ER023M spectra +DTM 4.096000e+00 digitizer sweep time [sec] +DSD 0.000000e+00 digitizer sweep delay [sec] +DCT 1000 digitizer conversion time [νsec] +DTR 1000 digitizer trigger rate +DCA ON channel A +DCB OFF channel B +DDM OFF DUAL mode +DRS 4096 digitizer resolution in x-axis +PPL OFF parameter plot +PFP 2 frame pen +PSP 1 spectra pen +POF 0 plot offset +PFR ON frame On/OFF +EMF 3.352100e+03 ENMR field +ESF 2.000000e+01 ENMR start frequency [MHz] +ESW 1.000000e+01 ENMR sweep width [MHz] +EFD 9.977000e+0l FM-modulation [kHz] +EPF 1.000000e+01 ENMR pump frequency [MHz] +ESP 20 ENMR RF attenuator [dB] +EPP 63 ENMR pump power attenuator [dB] +EOP 0 ENMR total power attenuator [dB] +EPH 0 ENMR phase +FME filter method +FWI filter width +FOP 2 filter order of polynomial +FER 2.000000e+00 filter value 'alpha' +======= ================ ========================================================= + +Note that usually, only a rather small subset of all these possible values +are of relevance, particularly in case of the EMX spectrometer series only +capable of performing conventional cw-EPR spectroscopy. The ESP +spectrometer series, in contrast, could be equipped with both, pulsed and +ENDOR capabilities and is the predecessor of the modern ELEXSYS series. + +Regarding the first parameter, ``JSS``, there is a bit more information +available that should be documented here for completeness as well: + +.. code-block:: text + + JSS is a number indicating the status of the spectrum. It is a decimal + number. The following describes what the numbers mean in hex: + + /* CSPS: current spectrum status */ + #define s_DUAL 0x00000001L /* current status : DUAL can change */ + #define s_2D 0x00000002L /* manipulated spectrum is 2D-spec. */ + #define s_FT 0x00000004L /* Fourier Transformation was done */ + #define s_MAN0 0x00000008L /* 'soft' manipulation were done */ + #define s_MAN1 0x00000010L /* 'hard' manipulation were done */ + /* 'soft' manipulation: baseline correction, addition of constant values */ + /* or spectrum fits, multiplication with constant values, phase correction */ + /* 'hard' manipulation: add. and mult. with extern data (other spectra..) */ + /* zero function, smoothing op., expansion (only when fixed); */ + /* org. information lost!!! */ + #define s_PROT 0x00000020L /* Protection flag: manip. not allowed */ + #define s_VEPR 0x00000040L /* VEPR spectrum */ + #define s_POW 0x00000080L /* power spectrum */ + #define s_ABS 0x00000100L /* absolute value spectrum */ + #define s_FTX 0x00000200L /* Fourier Trans. in x-dir. of 2D-spectrum */ + #define s_FTY 0x00000400L /* FT in y-dir. of 2D-spectrum */ + /* s_FTX and s_FTY : FT on all slices off the spectrum; single slice: s_FT */ + #define s_POW2 0x00000800L /* 2D power spectrum */ + #define s_ABS2 0x00001000L /* 2D absolute value spectrum */ + + +Module documentation +==================== """ diff --git a/docs/roadmap.rst b/docs/roadmap.rst index 1f10a8f..9d9b7ac 100644 --- a/docs/roadmap.rst +++ b/docs/roadmap.rst @@ -7,7 +7,7 @@ Roadmap A few ideas how to develop the project further, currently a list as a reminder for the main developers themselves, in no particular order, though with a tendency to list more important aspects first: -For version 0.5 +For version 0.6 =============== * Implement derived importers for Magnettech files @@ -37,7 +37,7 @@ For version 0.5 * Keep step parameter of magnetic field or not? - * Fix docs in tyt-file importer module. + * Fix docs in txt-file importer module. * Check Offset method of Frequency correction. Might be incorrect.