diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index da25ae816..2110878d0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -41,9 +41,5 @@ repos: rev: 'v1.10.0' hooks: - id: mypy - pass_filenames: false - args: [ ., --strict, --explicit-package-bases, - --disable-error-code, import-untyped, - --disable-error-code, import-not-found, - --disable-error-code, no-untyped-call, - --disable-error-code, type-arg ] + exclude: .*(tests|docs).* + additional_dependencies: [ numpy==1.26.4 ] diff --git a/docs/source/conf.py b/docs/source/conf.py index 9d0f21b0c..d63b5dada 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -113,6 +113,9 @@ (r"py:.*", r".*InitVar*"), (r"py:.*", r".*.glows.utils.constants.TimeTuple.*"), (r"py:.*", r".*glows.utils.constants.DirectEvent.*"), + (r"py:.*", r".*numpy.int.*"), + (r"py:.*", r".*np.ndarray.*"), + (r"py:.*", r".*numpy._typing._array_like._ScalarType_co.*"), ] # Ignore the inherited members from the APID IntEnum class diff --git a/imap_processing/cdf/utils.py b/imap_processing/cdf/utils.py index b4813dc71..5bdf3f37f 100644 --- a/imap_processing/cdf/utils.py +++ b/imap_processing/cdf/utils.py @@ -3,7 +3,6 @@ import logging import re from pathlib import Path -from typing import Optional import imap_data_access import numpy as np @@ -25,8 +24,8 @@ def met_to_j2000ns( met: np.typing.ArrayLike, - reference_epoch: Optional[np.datetime64] = IMAP_EPOCH, -) -> np.typing.ArrayLike: + reference_epoch: np.datetime64 = IMAP_EPOCH, +) -> np.typing.NDArray[np.int64]: """ Convert mission elapsed time (MET) to nanoseconds from J2000. @@ -56,10 +55,10 @@ def met_to_j2000ns( # to 32bit and overflow due to the nanosecond multiplication time_array = (np.asarray(met, dtype=float) * 1e9).astype(np.int64) # Calculate the time difference between our reference system and J2000 - j2000_offset = ( - (reference_epoch - J2000_EPOCH).astype("timedelta64[ns]").astype(np.int64) - ) - return j2000_offset + time_array + j2000_offset: np.typing.NDArray[np.datetime64] = ( + reference_epoch - J2000_EPOCH + ).astype("datetime64[ns]") + return j2000_offset.astype(np.int64) + time_array def load_cdf( diff --git a/imap_processing/codice/codice_l1a.py b/imap_processing/codice/codice_l1a.py index 9beafa9a5..94760fa6d 100644 --- a/imap_processing/codice/codice_l1a.py +++ b/imap_processing/codice/codice_l1a.py @@ -412,7 +412,9 @@ def process_codice_l1a(file_path: Path, data_version: str) -> xr.Dataset: apid = CODICEAPID.COD_HI_SECT_SPECIES_COUNTS table_id, plan_id, plan_step, view_id = (1, 0, 0, 6) - met0 = (np.datetime64("2024-04-29T00:00") - IMAP_EPOCH).astype("timedelta64[s]") + met0: np.timedelta64 = (np.datetime64("2024-04-29T00:00") - IMAP_EPOCH).astype( + "timedelta64[s]" + ) met0 = met0.astype(np.int64) met = [met0, met0 + 1] # Using this to match the other data products science_values = "" # Currently don't have simulated data for this diff --git a/imap_processing/glows/l1b/glows_l1b_data.py b/imap_processing/glows/l1b/glows_l1b_data.py index 8cb2850b6..938b290ab 100644 --- a/imap_processing/glows/l1b/glows_l1b_data.py +++ b/imap_processing/glows/l1b/glows_l1b_data.py @@ -5,6 +5,7 @@ import json from dataclasses import InitVar, dataclass, field from pathlib import Path +from typing import Optional import numpy as np @@ -250,11 +251,11 @@ class DirectEventL1B: # l1a_file_name: str # TODO: Add once L1A questions are answered # ancillary_data_files: np.ndarray # TODO: Add once L1A questions are answered # The following variables are created from the InitVar data - de_flags: np.ndarray = field(init=False, default=None) + de_flags: Optional[np.ndarray] = field(init=False, default=None) # TODO: First two values of DE are sec/subsec - direct_event_glows_times: np.ndarray = field(init=False, default=None) + direct_event_glows_times: Optional[np.ndarray] = field(init=False, default=None) # 3rd value is pulse length - direct_event_pulse_lengths: np.ndarray = field(init=False, default=None) + direct_event_pulse_lengths: Optional[np.ndarray] = field(init=False, default=None) # TODO: where does the multi-event flag go? def __post_init__( diff --git a/imap_processing/idex/l1/idex_l1.py b/imap_processing/idex/l1/idex_l1.py index cde3f1a40..d664b66ee 100644 --- a/imap_processing/idex/l1/idex_l1.py +++ b/imap_processing/idex/l1/idex_l1.py @@ -9,6 +9,7 @@ from enum import IntEnum import numpy as np +import numpy.typing as npt import space_packet_parser import xarray as xr @@ -421,7 +422,7 @@ def _parse_low_sample_waveform(self, waveform_raw: str) -> list[int]: ] return ints - def _calc_low_sample_resolution(self, num_samples: int) -> np.ndarray: + def _calc_low_sample_resolution(self, num_samples: int) -> npt.NDArray: """ Calculate the resolution of the low samples. @@ -447,7 +448,7 @@ def _calc_low_sample_resolution(self, num_samples: int) -> np.ndarray: ) return time_low_sr_data - def _calc_high_sample_resolution(self, num_samples: int) -> np.ndarray: + def _calc_high_sample_resolution(self, num_samples: int) -> npt.NDArray: """ Calculate the resolution of high samples. diff --git a/imap_processing/lo/l0/data_classes/science_counts.py b/imap_processing/lo/l0/data_classes/science_counts.py index 9b1b60cbc..4ce678896 100644 --- a/imap_processing/lo/l0/data_classes/science_counts.py +++ b/imap_processing/lo/l0/data_classes/science_counts.py @@ -3,6 +3,7 @@ from dataclasses import dataclass import numpy as np +import numpy.typing as npt import space_packet_parser from imap_processing.ccsds.ccsds_data import CcsdsData @@ -277,7 +278,7 @@ def _parse_section( binary_string: BinaryString, decompression: Decompress, data_shape: tuple[int, int], - ) -> np.array: + ) -> npt.NDArray: """ Parse a single section of data in the science counts data binary. @@ -322,7 +323,7 @@ def _extract_binary( section_length: int, bit_length: int, decompression: Decompress, - ) -> np.ndarray: + ) -> npt.NDArray: """ Extract and decompress science count binary data section. diff --git a/imap_processing/lo/l0/data_classes/science_direct_events.py b/imap_processing/lo/l0/data_classes/science_direct_events.py index 538415e4f..bf470e401 100644 --- a/imap_processing/lo/l0/data_classes/science_direct_events.py +++ b/imap_processing/lo/l0/data_classes/science_direct_events.py @@ -179,7 +179,9 @@ def _decompress_data(self) -> None: # Case decoder indicates which parts of the data # are transmitted for each case. - case_decoder = CASE_DECODER[(case_number, self.MODE[de_idx])] + case_decoder = CASE_DECODER[(case_number, self.MODE[de_idx])] # type: ignore[index] + # Todo Mypy Error: Invalid index type "tuple[int, ndarray[Any, Any]]" for + # "dict[tuple[int, int], TOFFields]"; expected type "tuple[int, int]" # Check the case decoder to see if the TOF field was # transmitted for this case. Then grab the bits from diff --git a/imap_processing/lo/l0/data_classes/star_sensor.py b/imap_processing/lo/l0/data_classes/star_sensor.py index fc2652fd6..952623e53 100644 --- a/imap_processing/lo/l0/data_classes/star_sensor.py +++ b/imap_processing/lo/l0/data_classes/star_sensor.py @@ -54,7 +54,7 @@ class StarSensor(LoBase): SHCOARSE: int COUNT: int DATA_COMPRESSED: str - DATA: np.array + DATA: np.ndarray # TODO: Because test data does not currently exist, the init function contents # must be commented out for the unit tests to run properly diff --git a/imap_processing/mag/l0/mag_l0_data.py b/imap_processing/mag/l0/mag_l0_data.py index 0f18518b9..c1569d132 100644 --- a/imap_processing/mag/l0/mag_l0_data.py +++ b/imap_processing/mag/l0/mag_l0_data.py @@ -109,7 +109,9 @@ def __post_init__(self) -> None: # Convert string output from space_packet_parser to numpy array of # big-endian bytes self.VECTORS = np.frombuffer( - int(self.VECTORS, 2).to_bytes(len(self.VECTORS) // 8, "big"), + int(self.VECTORS, 2).to_bytes(len(self.VECTORS) // 8, "big"), # type: ignore[arg-type] + # TODO Check MYPY Error: Argument 1 to "int" has incompatible type + # "Union[ndarray[Any, Any], str]"; expected "Union[str, bytes, bytearray]" dtype=np.dtype(">b"), ) diff --git a/imap_processing/mag/l1a/mag_l1a_data.py b/imap_processing/mag/l1a/mag_l1a_data.py index 10162293d..58130b9d6 100644 --- a/imap_processing/mag/l1a/mag_l1a_data.py +++ b/imap_processing/mag/l1a/mag_l1a_data.py @@ -6,6 +6,7 @@ from math import floor import numpy as np +import numpy.typing as npt from imap_processing.cdf.utils import J2000_EPOCH, met_to_j2000ns @@ -190,7 +191,7 @@ class MagL1a: is_mago: bool is_active: int shcoarse: int - vectors: np.array + vectors: np.ndarray starting_packet: InitVar[MagL1aPacketProperties] packet_definitions: dict[np.datetime64, MagL1aPacketProperties] = field(init=False) most_recent_sequence: int = field(init=False) @@ -216,14 +217,14 @@ def __post_init__(self, starting_packet: MagL1aPacketProperties) -> None: self.most_recent_sequence = starting_packet.src_seq_ctr def append_vectors( - self, additional_vectors: np.array, packet_properties: MagL1aPacketProperties + self, additional_vectors: np.ndarray, packet_properties: MagL1aPacketProperties ) -> None: """ Append additional vectors to the current vectors array. Parameters ---------- - additional_vectors : numpy.array + additional_vectors : numpy.ndarray New vectors to append. packet_properties : MagL1aPacketProperties Additional vector definition to add to the l0_packets dictionary. @@ -244,7 +245,7 @@ def append_vectors( @staticmethod def calculate_vector_time( vectors: np.ndarray, vectors_per_sec: int, start_time: TimeTuple - ) -> np.array: + ) -> npt.NDArray: """ Add timestamps to the vector list, turning the shape from (n, 4) to (n, 5). diff --git a/imap_processing/swapi/l1/swapi_l1.py b/imap_processing/swapi/l1/swapi_l1.py index cd3c29f62..d82796c22 100644 --- a/imap_processing/swapi/l1/swapi_l1.py +++ b/imap_processing/swapi/l1/swapi_l1.py @@ -3,6 +3,7 @@ import copy import numpy as np +import numpy.typing as npt import xarray as xr from imap_processing import imap_module_directory @@ -11,7 +12,7 @@ from imap_processing.utils import packet_file_to_datasets -def filter_good_data(full_sweep_sci: xr.Dataset) -> np.ndarray: +def filter_good_data(full_sweep_sci: xr.Dataset) -> npt.NDArray: """ Filter out bad data sweep indices. @@ -29,7 +30,7 @@ def filter_good_data(full_sweep_sci: xr.Dataset) -> np.ndarray: Returns ------- - numpy.ndarray + good_data_indices : numpy.ndarray Good data sweep indices. """ # PLAN_ID for current sweep should all be one value and @@ -70,7 +71,7 @@ def filter_good_data(full_sweep_sci: xr.Dataset) -> np.ndarray: def decompress_count( count_data: np.ndarray, compression_flag: np.ndarray -) -> np.ndarray: +) -> npt.NDArray: """ Will decompress counts based on compression indicators. @@ -99,7 +100,7 @@ def decompress_count( Returns ------- - numpy.ndarray + new_count : numpy.ndarray Array with decompressed counts. """ # Decompress counts based on compression indicators @@ -120,7 +121,7 @@ def decompress_count( return new_count -def find_sweep_starts(packets: xr.Dataset) -> np.ndarray: +def find_sweep_starts(packets: xr.Dataset) -> npt.NDArray: """ Find index of where new cycle started. @@ -138,7 +139,7 @@ def find_sweep_starts(packets: xr.Dataset) -> np.ndarray: Returns ------- - numpy.ndarray + indices_start : numpy.ndarray Array of indices of start cycle. """ if packets["epoch"].size < 12: @@ -175,7 +176,7 @@ def find_sweep_starts(packets: xr.Dataset) -> np.ndarray: return np.where(valid)[0] -def get_indices_of_full_sweep(packets: xr.Dataset) -> np.ndarray: +def get_indices_of_full_sweep(packets: xr.Dataset) -> npt.NDArray: """ Get indices of full cycles. @@ -195,7 +196,7 @@ def get_indices_of_full_sweep(packets: xr.Dataset) -> np.ndarray: Returns ------- - numpy.ndarray + full_cycle_indices : numpy.ndarray 1D array with indices of full cycle data. """ indices_of_start = find_sweep_starts(packets) diff --git a/imap_processing/swe/l1a/swe_science.py b/imap_processing/swe/l1a/swe_science.py index b7ae35518..034226d94 100644 --- a/imap_processing/swe/l1a/swe_science.py +++ b/imap_processing/swe/l1a/swe_science.py @@ -112,7 +112,7 @@ def swe_science(decom_data: list, data_version: str) -> xr.Dataset: science_array = [] raw_science_array = [] - metadata_arrays: np.array = collections.defaultdict(list) + metadata_arrays: dict[list] = collections.defaultdict(list) # We know we can only have 8 bit numbers input, so iterate over all # possibilities once up front @@ -133,7 +133,7 @@ def swe_science(decom_data: list, data_version: str) -> xr.Dataset: # where 1260 = 180 x 7 CEMs # Take the "raw_counts" indices/counts mapping from # decompression_table and then reshape the return - uncompress_data = np.take(decompression_table, raw_counts).reshape(180, 7) + uncompress_data = np.take(decompression_table, raw_counts).reshape(180, 7) # type: ignore[attr-defined] # Save raw counts data as well raw_counts = raw_counts.reshape(180, 7) diff --git a/imap_processing/swe/l1b/swe_l1b_science.py b/imap_processing/swe/l1b/swe_l1b_science.py index 648fb94bc..f4fb25133 100644 --- a/imap_processing/swe/l1b/swe_l1b_science.py +++ b/imap_processing/swe/l1b/swe_l1b_science.py @@ -4,6 +4,7 @@ from typing import Any import numpy as np +import numpy.typing as npt import pandas as pd import xarray as xr @@ -70,7 +71,7 @@ def read_lookup_table(table_index_value: int) -> Any: raise ValueError("Error: Invalid table index value") -def deadtime_correction(counts: np.ndarray, acq_duration: int) -> np.ndarray: +def deadtime_correction(counts: np.ndarray, acq_duration: int) -> npt.NDArray: """ Calculate deadtime correction. @@ -118,7 +119,7 @@ def deadtime_correction(counts: np.ndarray, acq_duration: int) -> np.ndarray: return corrected_count -def convert_counts_to_rate(data: np.ndarray, acq_duration: int) -> np.ndarray: +def convert_counts_to_rate(data: np.ndarray, acq_duration: int) -> npt.NDArray: """ Convert counts to rate using sampling time. @@ -206,7 +207,7 @@ def apply_in_flight_calibration(data: np.ndarray) -> None: def populate_full_cycle_data( l1a_data: xr.Dataset, packet_index: int, esa_table_num: int -) -> np.ndarray: +) -> npt.NDArray: """ Populate full cycle data array using esa lookup table and l1a_data. @@ -277,7 +278,7 @@ def populate_full_cycle_data( return full_cycle_data -def find_cycle_starts(cycles: np.ndarray) -> np.ndarray: +def find_cycle_starts(cycles: np.ndarray) -> npt.NDArray: """ Find index of where new cycle started. @@ -312,7 +313,7 @@ def find_cycle_starts(cycles: np.ndarray) -> np.ndarray: return np.where(valid)[0] -def get_indices_of_full_cycles(quarter_cycle: np.ndarray) -> np.ndarray: +def get_indices_of_full_cycles(quarter_cycle: np.ndarray) -> npt.NDArray: """ Get indices of full cycles. diff --git a/imap_processing/ultra/l0/decom_tools.py b/imap_processing/ultra/l0/decom_tools.py index 81eda0a59..f099c4d16 100644 --- a/imap_processing/ultra/l0/decom_tools.py +++ b/imap_processing/ultra/l0/decom_tools.py @@ -1,6 +1,7 @@ """Ultra Decompression Tools.""" import numpy as np +import numpy.typing as npt import space_packet_parser from imap_processing.ultra.l0.ultra_utils import ( @@ -154,7 +155,7 @@ def decompress_image( binary_data: str, width_bit: int, mantissa_bit_length: int, -) -> np.ndarray: +) -> npt.NDArray: """ Will decompress a binary string representing an image into a matrix of pixel values. diff --git a/imap_processing/ultra/l0/decom_ultra.py b/imap_processing/ultra/l0/decom_ultra.py index a8d51f908..f01f793b8 100644 --- a/imap_processing/ultra/l0/decom_ultra.py +++ b/imap_processing/ultra/l0/decom_ultra.py @@ -31,7 +31,7 @@ def append_tof_params( decom_data: dict, packet: Packet, - decompressed_data: list, + decompressed_data: np.ndarray, data_dict: dict, stacked_dict: dict, ) -> None: diff --git a/imap_processing/ultra/l1b/lookup_utils.py b/imap_processing/ultra/l1b/lookup_utils.py index 505fabb6c..d490a2c2b 100644 --- a/imap_processing/ultra/l1b/lookup_utils.py +++ b/imap_processing/ultra/l1b/lookup_utils.py @@ -1,6 +1,7 @@ """Contains tools for lookup tables for l1b.""" import numpy as np +import numpy.typing as npt import pandas as pd from imap_processing import imap_module_directory @@ -24,7 +25,7 @@ _IMAGE_PARAMS_DF = pd.read_csv(BASE_PATH / "FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv") -def get_y_adjust(dy_lut: np.ndarray) -> np.ndarray: +def get_y_adjust(dy_lut: np.ndarray) -> npt.NDArray: """ Adjust the front yf position based on the particle's trajectory. @@ -45,7 +46,7 @@ def get_y_adjust(dy_lut: np.ndarray) -> np.ndarray: return _YADJUST_DF["dYAdj"].values[dy_lut] -def get_norm(dn: np.ndarray, key: str, file_label: str) -> np.ndarray: +def get_norm(dn: np.ndarray, key: str, file_label: str) -> npt.NDArray: """ Correct mismatches between the stop Time to Digital Converters (TDCs). @@ -80,7 +81,7 @@ def get_norm(dn: np.ndarray, key: str, file_label: str) -> np.ndarray: return tdc_norm_df[key].values[dn] -def get_back_position(back_index: np.ndarray, key: str, file_label: str) -> np.ndarray: +def get_back_position(back_index: np.ndarray, key: str, file_label: str) -> npt.NDArray: """ Convert normalized TDC values using lookup tables. @@ -113,7 +114,7 @@ def get_back_position(back_index: np.ndarray, key: str, file_label: str) -> np.n return back_pos_df[key].values[back_index] -def get_energy_norm(ssd: np.ndarray, composite_energy: np.ndarray) -> np.ndarray: +def get_energy_norm(ssd: np.ndarray, composite_energy: np.ndarray) -> npt.NDArray: """ Normalize composite energy per SSD using a lookup table. @@ -157,4 +158,5 @@ def get_image_params(image: str) -> np.float64: value : np.float64 Image parameter value from the CSV file. """ - return _IMAGE_PARAMS_DF[image].values[0] + value: np.float64 = _IMAGE_PARAMS_DF[image].values[0] + return value diff --git a/imap_processing/utils.py b/imap_processing/utils.py index 01abd6c3b..680cc3f2d 100644 --- a/imap_processing/utils.py +++ b/imap_processing/utils.py @@ -6,6 +6,7 @@ from typing import Optional, Union import numpy as np +import numpy.typing as npt import pandas as pd import xarray as xr from space_packet_parser import parser, xtcedef @@ -279,7 +280,7 @@ def _get_minimum_numpy_datatype( # noqa: PLR0912 - Too many branches return datatype -def _create_minimum_dtype_array(values: list, dtype: str) -> np.ndarray: +def _create_minimum_dtype_array(values: list, dtype: str) -> npt.NDArray: """ Create an array with the minimum datatype. diff --git a/pyproject.toml b/pyproject.toml index 24fddad61..df0ef2a95 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -118,6 +118,13 @@ checks = ["all", #report on all checks, except the following exclude = ['__init__' ] # don't report on objects that match any of these regex [tool.mypy] -follow_imports = 'skip' -ignore_missing_imports = 'true' -exclude = ["tests", "docs"] +disable_error_code = ['import-not-found', # Unable to locate module or package specified + 'import-untyped', # The import lacks type information + 'no-untyped-call', # Function calls are only made to functions that are fully typed + 'type-arg' ] # Requires type arguments to be specified in list[] and dict[] +strict = true +explicit_package_bases = true +follow_imports = 'skip' #may want to remove +exclude = ["tests"] +packages = ["imap_processing" , "tools"] +plugins = 'numpy.typing.mypy_plugin' diff --git a/tools/spice/spice_examples.py b/tools/spice/spice_examples.py index 8bf7b6d4e..dcf15cbcc 100644 --- a/tools/spice/spice_examples.py +++ b/tools/spice/spice_examples.py @@ -8,7 +8,7 @@ import logging -import numpy as np +import numpy.typing as npt import spiceypy as spice # Logger setup @@ -68,7 +68,7 @@ def get_attitude_timerange(ck_kernel: str, id: int) -> tuple: def _get_particle_velocity( direct_events: dict, -) -> np.ndarray: +) -> npt.NDArray: """ Get the particle velocity in the heliosphere frame.