From b477960b94ec7ac0fd5a79e394755c947b1abd62 Mon Sep 17 00:00:00 2001 From: Nabil Freij Date: Fri, 29 Dec 2023 22:34:57 -0800 Subject: [PATCH] Precommit --- .pre-commit-config.yaml | 4 +- sunraster/_dev/scm_version.py | 2 +- sunraster/extern/meta.py | 42 +++++++------------ sunraster/extern/tests/test_meta.py | 19 ++++----- sunraster/instr/spice.py | 41 +++++++++++------- sunraster/instr/tests/test_spice.py | 4 +- sunraster/spectrogram.py | 65 +++++++++++++---------------- sunraster/spectrogram_sequence.py | 4 +- 8 files changed, 84 insertions(+), 97 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3ab9a4ed..9d65a7e2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,12 +6,12 @@ repos: args: ['--in-place', '--remove-all-unused-imports', '--remove-unused-variable'] exclude: ".*(.fits|.fts|.fit|.txt|tca.*|extern.*|.rst|.md|__init__.py|docs/conf.py)$" - repo: https://github.com/psf/black - rev: 23.11.0 + rev: 23.12.1 hooks: - id: black exclude: ".*(.fits|.fts|.fit|.txt|.csv)$" - repo: https://github.com/PyCQA/isort - rev: 5.12.0 + rev: 5.13.2 hooks: - id: isort exclude: ".*(.fits|.fts|.fit|.txt|.csv)$" diff --git a/sunraster/_dev/scm_version.py b/sunraster/_dev/scm_version.py index 42209891..572f8663 100644 --- a/sunraster/_dev/scm_version.py +++ b/sunraster/_dev/scm_version.py @@ -7,4 +7,4 @@ version = get_version(root=pth.join("..", ".."), relative_to=__file__) except Exception as e: - raise ImportError(f"setuptools_scm broken or not installed, {e}") + raise ImportError(f"setuptools_scm broken or not installed, {e}") from e diff --git a/sunraster/extern/meta.py b/sunraster/extern/meta.py index 562e7297..abb89fac 100644 --- a/sunraster/extern/meta.py +++ b/sunraster/extern/meta.py @@ -43,16 +43,13 @@ def __init__(self, header=None, comments=None, axes=None, data_shape=None): self.original_header = header # Sanitize metadata values and instantiate class. - if header is None: - header = {} - else: - header = dict(header) + header = {} if header is None else dict(header) super().__init__(header.items()) header_keys = header.keys() # Generate dictionary for comments. if comments is None: - self._comments = dict() + self._comments = {} else: comments = dict(comments) if not set(comments.keys()).issubset(set(header_keys)): @@ -67,12 +64,12 @@ def __init__(self, header=None, comments=None, axes=None, data_shape=None): # Generate dictionary for axes. if axes is None: - self._axes = dict() + self._axes = {} else: # Verify data_shape is set if axes is set. if not ( isinstance(data_shape, collections.abc.Iterable) - and all([isinstance(i, numbers.Integral) for i in data_shape]) + and all(isinstance(i, numbers.Integral) for i in data_shape) ): raise TypeError( "If axes is set, data_shape must be an iterable giving " @@ -91,7 +88,7 @@ def _sanitize_axis_value(self, axis, value, key): # Verify each entry in axes is an iterable of ints. if isinstance(axis, numbers.Integral): axis = (axis,) - if not (isinstance(axis, collections.abc.Iterable) and all([isinstance(i, numbers.Integral) for i in axis])): + if not isinstance(axis, collections.abc.Iterable) or not all(isinstance(i, numbers.Integral) for i in axis): raise TypeError( "Values in axes must be an int or tuple of ints giving " "the data axis/axes associated with the metadata." @@ -101,13 +98,11 @@ def _sanitize_axis_value(self, axis, value, key): # Confirm each axis-associated piece of metadata has the same shape # as its associated axes. shape_error_msg = f"{key} must have shape {tuple(self.shape[axis])} as it is associated with axes {axis}" - if len(axis) == 1: - if not hasattr(value, "__len__"): - raise TypeError(shape_error_msg) + if len(axis) == 1 and not hasattr(value, "__len__") or len(axis) != 1 and not hasattr(value, "shape"): + raise TypeError(shape_error_msg) + elif len(axis) == 1: meta_shape = (len(value),) else: - if not hasattr(value, "shape"): - raise TypeError(shape_error_msg) meta_shape = value.shape if not all(meta_shape == self.shape[axis]): raise ValueError(shape_error_msg) @@ -172,19 +167,18 @@ def __setitem__(self, key, val): if axis is not None: recommendation = "We recommend using the 'add' method to set values." if len(axis) == 1: - if not (hasattr(val, "__len__") and len(val) == self.shape[axis[0]]): + if not hasattr(val, "__len__") or len(val) != self.shape[axis[0]]: raise TypeError( f"{key} must have same length as associated axis, " f"i.e. axis {axis[0]}: {self.shape[axis[0]]}\n" f"{recommendation}" ) - else: - if not (hasattr(val, "shape") and all(val.shape == self.shape[axis])): - raise TypeError( - f"{key} must have same shape as associated axes, " - f"i.e axes {axis}: {self.shape[axis]}\n" - f"{recommendation}" - ) + elif not hasattr(val, "shape") or not all(val.shape == self.shape[axis]): + raise TypeError( + f"{key} must have same shape as associated axes, " + f"i.e axes {axis}: {self.shape[axis]}\n" + f"{recommendation}" + ) super().__setitem__(key, val) def __getitem__(self, item): @@ -195,7 +189,6 @@ def __getitem__(self, item): # If item is single string, slicing is simple. if isinstance(item, str): return super().__getitem__(item) - # Else, the item is assumed to be a typical slicing item. elif self.shape is None: raise TypeError("Meta object does not have a shape and so cannot be sliced.") else: @@ -235,10 +228,7 @@ def __getitem__(self, item): axis = self.axes.get(key, None) if axis is not None: new_item = tuple(item[axis]) - if len(new_item) == 1: - new_value = value[new_item[0]] - else: - new_value = value[new_item] + new_value = value[new_item[0]] if len(new_item) == 1 else value[new_item] new_axis = np.array([-1 if isinstance(i, numbers.Integral) else a for i, a in zip(new_item, axis)]) new_axis -= cumul_dropped_axes[axis] new_axis = new_axis[new_axis >= 0] diff --git a/sunraster/extern/tests/test_meta.py b/sunraster/extern/tests/test_meta.py index a816223b..c6054af0 100644 --- a/sunraster/extern/tests/test_meta.py +++ b/sunraster/extern/tests/test_meta.py @@ -11,7 +11,6 @@ def assert_metas_equal(test_input, expected_output): raise AssertionError( "input and expected are of different type. " f"input: {type(test_input)}; expected: {type(expected_output)}" ) - multi_element_msg = "more than one element is ambiguous" if isinstance(test_input, Meta) and isinstance(expected_output, Meta): # Check keys are the same. assert test_input.keys() == expected_output.keys() @@ -22,6 +21,7 @@ def assert_metas_equal(test_input, expected_output): else: assert np.allclose(test_input.shape, expected_output.shape) + multi_element_msg = "more than one element is ambiguous" # Check values and axes are the same. for test_value, expected_value in zip(test_input.values(), expected_output.values()): try: @@ -32,11 +32,10 @@ def assert_metas_equal(test_input, expected_output): # Check axes are the same. for test_axis, expected_axis in zip(test_input.axes.values(), expected_output.axes.values()): assert (test_axis is None and expected_axis is None) or all(test_axis == expected_axis) - else: - if not (test_input is None and expected_output is None): - assert test_input.keys() == expected_output.keys() - for key in list(test_input.keys()): - assert test_input[key] == expected_output[key] + elif test_input is not None or expected_output is not None: + assert test_input.keys() == expected_output.keys() + for key in list(test_input.keys()): + assert test_input[key] == expected_output[key] @pytest.fixture @@ -102,10 +101,10 @@ def test_slice_away_independent_axis(basic_meta): item = 0 output = meta[item] # Build expected result. - values = dict([(key, value) for key, value in meta.items()]) + values = dict(list(meta.items())) values["b"] = values["b"][0] comments = meta.comments - axes = dict([(key, axis) for key, axis in meta.axes.items()]) + axes = dict(list(meta.axes.items())) del axes["b"] axes["c"] -= 1 axes["d"] -= 1 @@ -123,11 +122,11 @@ def test_slice_dependent_axes(basic_meta): output = meta[:, 1:3, 1] print(meta["a"]) # Build expected result. - values = dict([(key, value) for key, value in meta.items()]) + values = dict(list(meta.items())) values["c"] = values["c"][1:3, 1] values["d"] = values["d"][1] comments = meta.comments - axes = dict([(key, axis) for key, axis in meta.axes.items()]) + axes = dict(list(meta.axes.items())) axes["c"] = 1 del axes["d"] shape = np.array([2, 2, 5]) diff --git a/sunraster/instr/spice.py b/sunraster/instr/spice.py index 4b748136..dd82baae 100644 --- a/sunraster/instr/spice.py +++ b/sunraster/instr/spice.py @@ -85,16 +85,13 @@ def read_spice_l2_fits(filenames, windows=None, memmap=True, read_dumbbells=Fals "All files must correspond to same observing campaign/SPICE OBS ID. " f"First file SPICE OBS ID: {first_obs_id}; " f"{i+1}th file SPICE OBS ID: {this_obs_id}" - ) + ) from err # Depending on type of file, combine data from different files into # SpectrogramSequences and RasterSequences. is_raster = "ras" in first_meta.get("FILENAME") and not any( - [window[1].meta.contains_dumbbell for window in cube_lists.values()] + window[1].meta.contains_dumbbell for window in cube_lists.values() ) - if is_raster: - sequence_class = RasterSequence - else: - sequence_class = SpectrogramSequence + sequence_class = RasterSequence if is_raster else SpectrogramSequence window_sequences = [ (key, sequence_class([v[0] for v in value], common_axis=-1)) for key, value in cube_lists.items() ] @@ -107,7 +104,7 @@ def read_spice_l2_fits(filenames, windows=None, memmap=True, read_dumbbells=Fals # same spectral window, e.g. because they are dumbbell windows. first_sequence = window_sequences[0][1] first_spectral_window = first_sequence[0].meta.spectral_window - if all([window[1][0].meta.spectral_window == first_spectral_window for window in window_sequences]): + if all(window[1][0].meta.spectral_window == first_spectral_window for window in window_sequences): aligned_axes = tuple(range(len(first_sequence.dimensions))) else: aligned_axes = tuple( @@ -179,14 +176,30 @@ def _read_single_spice_l2_fits( windows = [ hdu.header["EXTNAME"] for hdu in hdulist - if (isinstance(hdu, fits.hdu.image.PrimaryHDU) or isinstance(hdu, fits.hdu.image.ImageHDU)) + if ( + isinstance( + hdu, + ( + fits.hdu.image.PrimaryHDU, + fits.hdu.image.ImageHDU, + ), + ) + ) and dumbbell_label in hdu.header["EXTNAME"] ] else: windows = [ hdu.header["EXTNAME"] for hdu in hdulist - if (isinstance(hdu, fits.hdu.image.PrimaryHDU) or isinstance(hdu, fits.hdu.image.ImageHDU)) + if ( + isinstance( + hdu, + ( + fits.hdu.image.PrimaryHDU, + fits.hdu.image.ImageHDU, + ), + ) + ) and dumbbell_label not in hdu.header["EXTNAME"] and hdu.header["EXTNAME"] not in excluded_labels ] @@ -194,7 +207,7 @@ def _read_single_spice_l2_fits( if any(dumbbells_requested) and not all(dumbbells_requested): raise ValueError("Cannot read dumbbell and other window types simultaneously.") # Retrieve window names from FITS file. - for i, hdu in enumerate(hdulist): + for hdu in hdulist: if hdu.header["EXTNAME"] in windows: # Define metadata object. meta = SPICEMeta( @@ -226,10 +239,7 @@ def _read_single_spice_l2_fits( window_cubes.append((window_name, spectrogram)) else: output[window_name].append(spectrogram) - if output is None: - return dict(window_cubes) - else: - return output + return dict(window_cubes) if output is None else output def _convert_fits_comments_to_key_value_pairs(fits_header): @@ -241,8 +251,7 @@ def _convert_fits_comments_to_key_value_pairs(fits_header): class SPICEMeta(Meta, metaclass=SlitSpectrographMetaABC): # ---------- SPICE-specific convenience methods ---------- def _get_unit(self, key): - comment = self.comments.get(key) - if comment: + if comment := self.comments.get(key): try: return [s.split("]") for s in comment.split("[")[1:]][0][:-1][0] except IndexError: diff --git a/sunraster/instr/tests/test_spice.py b/sunraster/instr/tests/test_spice.py index cc9bb192..67ad5ab7 100644 --- a/sunraster/instr/tests/test_spice.py +++ b/sunraster/instr/tests/test_spice.py @@ -300,8 +300,8 @@ def test_read_spice_l2_fits_multiple_rasters_multiple_windows(spice_rasdb_filena assert isinstance(result, READ_SPICE_L2_FITS_RETURN_TYPE) assert set(result.aligned_axes.values()) == {(0, 2, 3)} assert len(result) == 2 - assert all([window.dimensions[0].value == len(filenames) for window in result.values()]) - assert all([isinstance(window, RasterSequence) for window in result.values()]) + assert all(window.dimensions[0].value == len(filenames) for window in result.values()) + assert all(isinstance(window, RasterSequence) for window in result.values()) def test_read_spice_l2_fits_multiple_rasters_single_window(spice_rasdb_filename): diff --git a/sunraster/spectrogram.py b/sunraster/spectrogram.py index f967446f..18a654e2 100644 --- a/sunraster/spectrogram.py +++ b/sunraster/spectrogram.py @@ -279,11 +279,10 @@ def __str__(self): lon_range = u.Quantity([lon.min(), lon.max()]) lat_range = u.Quantity([lat.min(), lat.max()]) except ValueError as err: - if AXIS_NOT_FOUND_ERROR in err.args[0]: - lon_range = None - lat_range = None - else: + if AXIS_NOT_FOUND_ERROR not in err.args[0]: raise err + lon_range = None + lat_range = None try: if self.spectral_axis.isscalar: spectral_range = self.spectral_axis @@ -355,7 +354,7 @@ def spectral_axis(self): self.meta, ) if not self._spectral_name: - raise ValueError("Spectral" + AXIS_NOT_FOUND_ERROR + f"{SUPPORTED_SPECTRAL_NAMES}") + raise ValueError(f"Spectral{AXIS_NOT_FOUND_ERROR}" + f"{SUPPORTED_SPECTRAL_NAMES}") return self._get_axis_coord(self._spectral_name, self._spectral_loc) @property @@ -367,8 +366,8 @@ def time(self): self.extra_coords, self.meta, ) - if not self._time_name: - raise ValueError(f"Time {AXIS_NOT_FOUND_ERROR} {SUPPORTED_TIME_NAMES}") + if not self._time_name: + raise ValueError(f"Time {AXIS_NOT_FOUND_ERROR} {SUPPORTED_TIME_NAMES}") return Time(self._get_axis_coord(self._time_name, self._time_loc)) @property @@ -380,8 +379,8 @@ def exposure_time(self): self.extra_coords, self.meta, ) - if not self._exposure_time_name: - raise ValueError(f"Exposure time {AXIS_NOT_FOUND_ERROR} {SUPPORTED_EXPOSURE_NAMES}") + if not self._exposure_time_name: + raise ValueError(f"Exposure time {AXIS_NOT_FOUND_ERROR} {SUPPORTED_EXPOSURE_NAMES}") return self._get_axis_coord(self._exposure_time_name, self._exposure_time_loc) @property @@ -547,23 +546,19 @@ def _calculate_exposure_time_correction(data, uncertainty, unit, exposure_time, new_unit: `astropy.unit.Unit` Unit of new_data array after exposure time correction. """ - # If force is not set to True and unit already includes inverse time, - # raise error as exposure time correction has probably already been - # applied and should not be applied again. if force is not True and u.s in unit.decompose().bases: raise ValueError(APPLY_EXPOSURE_TIME_ERROR) + # Else, either unit does not include inverse time and so + # exposure does need to be applied, or + # user has set force=True and wants the correction applied + # regardless of the unit. + new_data = data / exposure_time + if uncertainty: + uncertainty_unit = uncertainty.unit / u.s if uncertainty.unit else uncertainty.unit + new_uncertainty = uncertainty.__class__(uncertainty.array / exposure_time, unit=uncertainty_unit) else: - # Else, either unit does not include inverse time and so - # exposure does need to be applied, or - # user has set force=True and wants the correction applied - # regardless of the unit. - new_data = data / exposure_time - if uncertainty: - uncertainty_unit = uncertainty.unit / u.s if uncertainty.unit else uncertainty.unit - new_uncertainty = uncertainty.__class__(uncertainty.array / exposure_time, unit=uncertainty_unit) - else: - new_uncertainty = uncertainty - new_unit = unit / u.s + new_uncertainty = uncertainty + new_unit = unit / u.s return new_data, new_uncertainty, new_unit @@ -591,21 +586,17 @@ def _uncalculate_exposure_time_correction(data, uncertainty, unit, exposure_time new_unit: `astropy.unit.Unit` Unit of new_data array after exposure time correction. """ - # If force is not set to True and unit does not include inverse time, - # raise error as exposure time correction has probably already been - # undone and should not be undone again. if force is not True and u.s in (unit * u.s).decompose().bases: raise ValueError(UNDO_EXPOSURE_TIME_ERROR) + # Else, either unit does include inverse time and so + # exposure does need to be removed, or + # user has set force=True and wants the correction removed + # regardless of the unit. + new_data = data * exposure_time + if uncertainty: + uncertainty_unit = uncertainty.unit * u.s if uncertainty.unit else uncertainty.unit + new_uncertainty = uncertainty.__class__(uncertainty.array * exposure_time, unit=uncertainty_unit) else: - # Else, either unit does include inverse time and so - # exposure does need to be removed, or - # user has set force=True and wants the correction removed - # regardless of the unit. - new_data = data * exposure_time - if uncertainty: - uncertainty_unit = uncertainty.unit * u.s if uncertainty.unit else uncertainty.unit - new_uncertainty = uncertainty.__class__(uncertainty.array * exposure_time, unit=uncertainty_unit) - else: - new_uncertainty = uncertainty - new_unit = unit * u.s + new_uncertainty = uncertainty + new_unit = unit * u.s return new_data, new_uncertainty, new_unit diff --git a/sunraster/spectrogram_sequence.py b/sunraster/spectrogram_sequence.py index 3d2a0a2c..0514a5c6 100644 --- a/sunraster/spectrogram_sequence.py +++ b/sunraster/spectrogram_sequence.py @@ -108,9 +108,7 @@ def apply_exposure_time_correction(self, undo=False, copy=False, force=False): If copy=True, a new SpectrogramSequence is returned with the correction applied (undone). """ - converted_data_list = [] - for cube in self.data: - converted_data_list.append(cube.apply_exposure_time_correction(undo=undo, force=force)) + converted_data_list = [cube.apply_exposure_time_correction(undo=undo, force=force) for cube in self.data] if copy is True: return self.__class__(converted_data_list, meta=self.meta, common_axis=self._common_axis) else: