diff --git a/imap_processing/idex/l1/idex_l1.py b/imap_processing/idex/l1/idex_l1.py index d664b66ee..0865a5e3b 100644 --- a/imap_processing/idex/l1/idex_l1.py +++ b/imap_processing/idex/l1/idex_l1.py @@ -1,12 +1,14 @@ """ -Perform IDEX l1 Processing. +Perform IDEX L1 Processing. -This module processes decommutated IDEX packets and creates l1 data products. +This module processes decommutated IDEX packets and creates L1 data products. """ import logging from collections import namedtuple from enum import IntEnum +from pathlib import Path +from typing import Union import numpy as np import numpy.typing as npt @@ -19,9 +21,64 @@ logger = logging.getLogger(__name__) +# TODO: Generate quicklook plots +# TODO: Currently to code assumes that one L0 file will produce one L1 file. +# Is this a valid assumption? + +# Create a large dictionary of values from the FPGA header that need to be +# captured into the CDF file. They are lumped together because they share +# similar attributes. +# Notes about the variables are set here, acting as comments and will also be +# placed into the CDF in the VAR_NOTES attribute. +TRIGGER_DESCRIPTION = namedtuple( + "TRIGGER_DESCRIPTION", + ["name", "packet_name"], +) +TRIGGER_DESCRIPTION_DICT = { + trigger.name: trigger + for trigger in [ + TRIGGER_DESCRIPTION("event_number", "IDX__TXHDREVTNUM"), + TRIGGER_DESCRIPTION("tof_high_trigger_level", "IDX__TXHDRHGTRIGLVL"), + TRIGGER_DESCRIPTION("tof_high_trigger_num_max_1_2", "IDX__TXHDRHGTRIGNMAX12"), + TRIGGER_DESCRIPTION("tof_high_trigger_num_min_1_2", "IDX__TXHDRHGTRIGNMIN12"), + TRIGGER_DESCRIPTION("tof_high_trigger_num_min_1", "IDX__TXHDRHGTRIGNMIN1"), + TRIGGER_DESCRIPTION("tof_high_trigger_num_max_1", "IDX__TXHDRHGTRIGNMAX1"), + TRIGGER_DESCRIPTION("tof_high_trigger_num_min_2", "IDX__TXHDRHGTRIGNMIN2"), + TRIGGER_DESCRIPTION("tof_high_trigger_num_max_2", "IDX__TXHDRHGTRIGNMAX2"), + TRIGGER_DESCRIPTION("tof_low_trigger_level", "IDX__TXHDRLGTRIGLVL"), + TRIGGER_DESCRIPTION("tof_low_trigger_num_max_1_2", "IDX__TXHDRLGTRIGNMAX12"), + TRIGGER_DESCRIPTION("tof_low_trigger_num_min_1_2", "IDX__TXHDRLGTRIGNMIN12"), + TRIGGER_DESCRIPTION("tof_low_trigger_num_min_1", "IDX__TXHDRLGTRIGNMIN1"), + TRIGGER_DESCRIPTION("tof_low_trigger_num_max_1", "IDX__TXHDRLGTRIGNMAX1"), + TRIGGER_DESCRIPTION("tof_low_trigger_num_min_2", "IDX__TXHDRLGTRIGNMIN2"), + TRIGGER_DESCRIPTION("tof_low_trigger_num_max_2", "IDX__TXHDRLGTRIGNMAX2"), + TRIGGER_DESCRIPTION("tof_mid_trigger_level", "IDX__TXHDRMGTRIGLVL"), + TRIGGER_DESCRIPTION("tof_mid_trigger_num_max_1_2", "IDX__TXHDRMGTRIGNMAX12"), + TRIGGER_DESCRIPTION("tof_mid_trigger_num_min_1_2", "IDX__TXHDRMGTRIGNMIN12"), + TRIGGER_DESCRIPTION("tof_mid_trigger_num_min_1", "IDX__TXHDRMGTRIGNMIN1"), + TRIGGER_DESCRIPTION("tof_mid_trigger_num_max_1", "IDX__TXHDRMGTRIGNMAX1"), + TRIGGER_DESCRIPTION("tof_mid_trigger_num_min_2", "IDX__TXHDRMGTRIGNMIN2"), + TRIGGER_DESCRIPTION("tof_mid_trigger_num_max_2", "IDX__TXHDRMGTRIGNMAX2"), + TRIGGER_DESCRIPTION("low_sample_coincidence_mode_blocks", "IDX__TXHDRLSTRIGCMBLOCKS"), # noqa + TRIGGER_DESCRIPTION("low_sample_trigger_polarity", "IDX__TXHDRLSTRIGPOL"), + TRIGGER_DESCRIPTION("low_sample_trigger_level", "IDX__TXHDRLSTRIGLVL"), + TRIGGER_DESCRIPTION("low_sample_trigger_num_min", "IDX__TXHDRLSTRIGNMIN"), + TRIGGER_DESCRIPTION("low_sample_trigger_mode", "IDX__TXHDRLSTRIGMODE"), + TRIGGER_DESCRIPTION("tof_low_trigger_mode", "IDX__TXHDRLSTRIGMODE"), + TRIGGER_DESCRIPTION("tof_mid_trigger_mode", "IDX__TXHDRMGTRIGMODE"), + TRIGGER_DESCRIPTION("tof_high_trigger_mode", "IDX__TXHDRHGTRIGMODE"), + TRIGGER_DESCRIPTION("detector_voltage", "IDX__TXHDRHVPSHKCH0"), + TRIGGER_DESCRIPTION("sensor_voltage", "IDX__TXHDRHVPSHKCH1"), + TRIGGER_DESCRIPTION("target_voltage", "IDX__TXHDRHVPSHKCH2"), + TRIGGER_DESCRIPTION("reflectron_voltage", "IDX__TXHDRHVPSHKCH3"), + TRIGGER_DESCRIPTION("rejection_voltage", "IDX__TXHDRHVPSHKCH4"), + TRIGGER_DESCRIPTION("detector_current", "IDX__TXHDRHVPSHKCH5"), + ] +} # fmt: skip + class Scitype(IntEnum): - """IDEX Science Type.""" + """Define parameters for IDEX Science Type.""" FIRST_PACKET = 1 TOF_HIGH = 2 @@ -32,89 +89,13 @@ class Scitype(IntEnum): ION_GRID = 64 -""" -Creates a large dictionary of values from the FPGA header -that need to be captured into the CDF file. They are lumped together because -they share similar attributes. - -Notes about the variables are set here, acting as comments and will also be -placed into the CDF in the VAR_NOTES attribute. -""" -TriggerDescription = namedtuple( - "TriggerDescription", - ["name", "packet_name"], -) -trigger_description_dict = { - trigger.name: trigger - for trigger in [ - TriggerDescription("event_number", "IDX__TXHDREVTNUM"), - TriggerDescription("tof_high_trigger_level", "IDX__TXHDRHGTRIGLVL"), - TriggerDescription("tof_high_trigger_num_max_1_2", "IDX__TXHDRHGTRIGNMAX12"), - TriggerDescription("tof_high_trigger_num_min_1_2", "IDX__TXHDRHGTRIGNMIN12"), - TriggerDescription("tof_high_trigger_num_min_1", "IDX__TXHDRHGTRIGNMIN1"), - TriggerDescription("tof_high_trigger_num_max_1", "IDX__TXHDRHGTRIGNMAX1"), - TriggerDescription("tof_high_trigger_num_min_2", "IDX__TXHDRHGTRIGNMIN2"), - TriggerDescription("tof_high_trigger_num_max_2", "IDX__TXHDRHGTRIGNMAX2"), - TriggerDescription("tof_low_trigger_level", "IDX__TXHDRLGTRIGLVL"), - TriggerDescription("tof_low_trigger_num_max_1_2", "IDX__TXHDRLGTRIGNMAX12"), - TriggerDescription("tof_low_trigger_num_min_1_2", "IDX__TXHDRLGTRIGNMIN12"), - TriggerDescription("tof_low_trigger_num_min_1", "IDX__TXHDRLGTRIGNMIN1"), - TriggerDescription("tof_low_trigger_num_max_1", "IDX__TXHDRLGTRIGNMAX1"), - TriggerDescription("tof_low_trigger_num_min_2", "IDX__TXHDRLGTRIGNMIN2"), - TriggerDescription("tof_low_trigger_num_max_2", "IDX__TXHDRLGTRIGNMAX2"), - TriggerDescription("tof_mid_trigger_level", "IDX__TXHDRMGTRIGLVL"), - TriggerDescription("tof_mid_trigger_num_max_1_2", "IDX__TXHDRMGTRIGNMAX12"), - TriggerDescription("tof_mid_trigger_num_min_1_2", "IDX__TXHDRMGTRIGNMIN12"), - TriggerDescription("tof_mid_trigger_num_min_1", "IDX__TXHDRMGTRIGNMIN1"), - TriggerDescription("tof_mid_trigger_num_max_1", "IDX__TXHDRMGTRIGNMAX1"), - TriggerDescription("tof_mid_trigger_num_min_2", "IDX__TXHDRMGTRIGNMIN2"), - TriggerDescription("tof_mid_trigger_num_max_2", "IDX__TXHDRMGTRIGNMAX2"), - TriggerDescription("low_sample_coincidence_mode_blocks", "IDX__TXHDRLSTRIGCMBLOCKS"), # noqa - TriggerDescription("low_sample_trigger_polarity", "IDX__TXHDRLSTRIGPOL"), - TriggerDescription("low_sample_trigger_level", "IDX__TXHDRLSTRIGLVL"), - TriggerDescription("low_sample_trigger_num_min", "IDX__TXHDRLSTRIGNMIN"), - TriggerDescription("low_sample_trigger_mode", "IDX__TXHDRLSTRIGMODE"), - TriggerDescription("tof_low_trigger_mode", "IDX__TXHDRLSTRIGMODE"), - TriggerDescription("tof_mid_trigger_mode", "IDX__TXHDRMGTRIGMODE"), - TriggerDescription("tof_high_trigger_mode", "IDX__TXHDRHGTRIGMODE"), - TriggerDescription("detector_voltage", "IDX__TXHDRHVPSHKCH0"), - TriggerDescription("sensor_voltage", "IDX__TXHDRHVPSHKCH1"), - TriggerDescription("target_voltage", "IDX__TXHDRHVPSHKCH2"), - TriggerDescription("reflectron_voltage", "IDX__TXHDRHVPSHKCH3"), - TriggerDescription("rejection_voltage", "IDX__TXHDRHVPSHKCH4"), - TriggerDescription("detector_current", "IDX__TXHDRHVPSHKCH5"), - ] -} # fmt: skip - - -def get_idex_attrs(data_version: str) -> ImapCdfAttributes: - """ - Load in CDF attributes for IDEX instrument. - - Parameters - ---------- - data_version : str - Data version for CDF filename, in the format "vXXX". - - Returns - ------- - idex_attrs : ImapCdfAttributes - Imap object with l1a attribute files loaded in. - """ - idex_attrs = ImapCdfAttributes() - idex_attrs.add_instrument_global_attrs("idex") - idex_attrs.add_instrument_variable_attrs("idex", "l1") - idex_attrs.add_global_attribute("Data_version", data_version) - return idex_attrs - - class PacketParser: """ IDEX packet parsing class. Encapsulates the decom work needed to decom a daily file of IDEX data - received from the POC. The class is instantiated with a reference to a L0 file as - it exists on the local file system. + received from the POC. The class is instantiated with a reference to a L0 + file as it exists on the local file system. Parameters ---------- @@ -123,34 +104,30 @@ class PacketParser: data_version : str The version of the data product being created. - Methods - ------- - TODO : Add method to generate quicklook plots - Examples -------- .. code-block:: python - from imap_processing.idex.idex_packet_parser import PacketParser + from imap_processing.idex.idex_l1 import PacketParser l0_file = "imap_processing/tests/idex/imap_idex_l0_sci_20230725_v001.pkts" l1_data = PacketParser(l0_file, data_version) l1_data.write_l1_cdf() """ - def __init__(self, packet_file: str, data_version: str) -> None: + def __init__(self, packet_file: Union[str, Path], data_version: str) -> None: """ - Read a l0 pkts file and perform all of the decom work. + Read a L0 pkts file and perform all of the decom work. Parameters ---------- - packet_file : str + packet_file : pathlib.Path | str The path and filename to the L0 file to read. data_version : str The version of the data product being created. Notes ----- - Currently assumes one L0 file will generate exactly one l1a file. + Currently assumes one L0 file will generate exactly one L1 file. """ decom_packet_list = decom_packets(packet_file) @@ -170,7 +147,7 @@ def __init__(self, packet_file: str, data_version: str) -> None: ) else: # Populate the IDEXRawDustEvent with 1's and 0's - dust_events[event_number].parse_packet(packet) + dust_events[event_number]._populate_bit_strings(packet) else: logger.warning(f"Unhandled packet received: {packet}") @@ -187,15 +164,15 @@ class RawDustEvent: """ Encapsulate IDEX Raw Dust Event. - Encapsulates the work needed to convert a single dust event into a - processed XArray Dateset object. + Encapsulates the work needed to convert a single dust event into a processed + ``xarray`` ``dateset`` object. Parameters ---------- header_packet : space_packet_parser.parser.Packet The FPGA metadata event header. data_version : str - The version of the data product being created. + The version of the data product being created. Attributes ---------- @@ -210,14 +187,24 @@ class RawDustEvent: Methods ------- + _append_raw_data(scitype, bits) + Append data to the appropriate bit string. _set_impact_time(packet) + Calculate the datetime64 from the FPGA header information. _set_sample_trigger_times(packet) + Calculate the actual sample trigger time. _parse_high_sample_waveform(waveform_raw) + Will process the high sample waveform. _parse_low_sample_waveform(waveform_raw) + Will process the low sample waveform. _calc_low_sample_resolution(num_samples) + Calculate the resolution of the low samples. _calc_high_sample_resolution(num_samples) - parse_packet(packet) + Calculate the resolution of high samples. + _populate_bit_strings(packet) + Parse IDEX data packets to populate bit strings. process() + Will process the raw data into a xarray.Dataset. """ # Constants @@ -248,7 +235,7 @@ def __init__( header_packet : space_packet_parser.parser.Packet The FPGA metadata event header. data_version : str - Data version for CDF filename, in the format "vXXX". + Data version for CDF filename, in the format ``vXXX``. """ # Calculate the impact time in seconds since epoch self.impact_time = 0 @@ -259,10 +246,11 @@ def __init__( self.low_sample_trigger_time = 0 self.high_sample_trigger_time = 0 self._set_sample_trigger_times(header_packet) + # Iterate through the trigger description dictionary and pull out the values self.trigger_values = { trigger.name: header_packet.data[trigger.packet_name].raw_value - for trigger in trigger_description_dict.values() + for trigger in TRIGGER_DESCRIPTION_DICT.values() } logger.debug( f"trigger_values:\n{self.trigger_values}" @@ -278,11 +266,41 @@ def __init__( self.cdf_attrs = get_idex_attrs(data_version) + def _append_raw_data(self, scitype: Scitype, bits: str) -> None: + """ + Append data to the appropriate bit string. + + This function determines which variable to append the bits to, given a + specific scitype. + + Parameters + ---------- + scitype : Scitype + The science type of the data. + bits : str + The binary data to append. + """ + if scitype == Scitype.TOF_HIGH: + self.TOF_High_bits += bits + elif scitype == Scitype.TOF_LOW: + self.TOF_Low_bits += bits + elif scitype == Scitype.TOF_MID: + self.TOF_Mid_bits += bits + elif scitype == Scitype.TARGET_LOW: + self.Target_Low_bits += bits + elif scitype == Scitype.TARGET_HIGH: + self.Target_High_bits += bits + elif scitype == Scitype.ION_GRID: + self.Ion_Grid_bits += bits + else: + logger.warning("Unknown science type received: [%s]", scitype) + def _set_impact_time(self, packet: space_packet_parser.parser.Packet) -> None: """ - Calculate the datetime64 from the FPGA header information. + Calculate the impact time from the FPGA header information. - We are given the MET seconds, we need convert it to UTC. + We are given the MET seconds, we need convert it to UTC in type + ``np.datetime64``. Parameters ---------- @@ -291,9 +309,9 @@ def _set_impact_time(self, packet: space_packet_parser.parser.Packet) -> None: Notes ----- - Todo - This conversion is temporary for now, and will need SPICE in the future. - IDEX has set the time launch to Jan 1 2012 for calibration testing. + TODO: This conversion is temporary for now, and will need SPICE in the + future. IDEX has set the time launch to Jan 1 2012 for calibration + testing. """ # Number of seconds since epoch (nominally the launch time) seconds_since_launch = packet.data["SHCOARSE"].derived_value @@ -324,24 +342,24 @@ def _set_sample_trigger_times( Notes ----- - A "sample" is one single data point. + A "sample" is one single data point. - A "block" is ~1.969 microseconds of data collection (8/4.0625). - The only time that a block of data matters is in this function. + A "block" is ~1.969 microseconds of data collection (8/4.0625). The only + time that a block of data matters is in this function. - Because the low sample data are taken every 1/4.0625 microseconds, - there are 8 samples in one block of data. + Because the low sample data are taken every 1/4.0625 microseconds, there + are 8 samples in one block of data. - Because the high sample data are taken every 1/260 microseconds, - there are 512 samples in one block of High Sample data. + Because the high sample data are taken every 1/260 microseconds, there + are 512 samples in one block of High Sample data. - The header has information about the number of blocks before triggering, - rather than the number of samples before triggering. + The header has information about the number of blocks before triggering, + rather than the number of samples before triggering. """ # Retrieve the number of samples of high gain delay high_gain_delay = packet.data["IDX__TXHDRADC0IDELAY"].raw_value - # Retrieve number of low/high sample pretrigger blocks + # Retrieve number of low/high sample pre-trigger blocks num_low_sample_pretrigger_blocks = packet.data[ "IDX__TXHDRLSPREBLOCKS" ].derived_value @@ -474,7 +492,7 @@ def _calc_high_sample_resolution(self, num_samples: int) -> npt.NDArray: ) return time_high_sr_data - def parse_packet(self, packet: space_packet_parser.parser.Packet) -> None: + def _populate_bit_strings(self, packet: space_packet_parser.parser.Packet) -> None: """ Parse IDEX data packets to populate bit strings. @@ -488,46 +506,17 @@ def parse_packet(self, packet: space_packet_parser.parser.Packet) -> None: raw_science_bits = packet.data["IDX__SCI0RAW"].raw_value self._append_raw_data(scitype, raw_science_bits) - def _append_raw_data(self, scitype: Scitype, bits: str) -> None: - """ - Append data to the appropriate bit string. - - This function determines which variable to append the bits - to, given a specific scitype. - - Parameters - ---------- - scitype : Scitype - The science type of the data. - bits : str - The binary data to append. - """ - if scitype == Scitype.TOF_HIGH: - self.TOF_High_bits += bits - elif scitype == Scitype.TOF_LOW: - self.TOF_Low_bits += bits - elif scitype == Scitype.TOF_MID: - self.TOF_Mid_bits += bits - elif scitype == Scitype.TARGET_LOW: - self.Target_Low_bits += bits - elif scitype == Scitype.TARGET_HIGH: - self.Target_High_bits += bits - elif scitype == Scitype.ION_GRID: - self.Ion_Grid_bits += bits - else: - logger.warning("Unknown science type received: [%s]", scitype) - def process(self) -> xr.Dataset: """ - Will process the raw data into a xarray.Dataset. + Will process the raw data into a ``xarray.Dataset``. - To be called after all packets for the IDEX event have been parsed - Parses the binary data into numpy integer arrays, and combines them - into a xarray.Dataset object. + To be called after all packets for the IDEX event have been parsed. + Parses the binary data into numpy integer arrays, and combines them into + a ``xarray.Dataset`` object. Returns ------- - xarray.Dataset + dataset : xarray.Dataset A Dataset object containing the data from a single impact. """ # Create object for CDF attrs @@ -536,7 +525,7 @@ def process(self) -> xr.Dataset: # Gather the huge number of trigger info metadata trigger_vars = {} for var, value in self.trigger_values.items(): - trigger_description = trigger_description_dict[var] + trigger_description = TRIGGER_DESCRIPTION_DICT[var] trigger_vars[var] = xr.DataArray( name=var, data=[value], @@ -549,42 +538,36 @@ def process(self) -> xr.Dataset: name="TOF_High", data=[self._parse_high_sample_waveform(self.TOF_High_bits)], dims=("epoch", "time_high_ssr_dim"), - # attrs=idex_cdf_attrs.tof_high_attrs.output(), attrs=idex_attrs.get_variable_attributes("tof_high_attrs"), ) tof_low_xr = xr.DataArray( name="TOF_Low", data=[self._parse_high_sample_waveform(self.TOF_Low_bits)], dims=("epoch", "time_high_sr"), - # attrs=idex_cdf_attrs.tof_low_attrs.output(), attrs=idex_attrs.get_variable_attributes("tof_low_attrs"), ) tof_mid_xr = xr.DataArray( name="TOF_Mid", data=[self._parse_high_sample_waveform(self.TOF_Mid_bits)], dims=("epoch", "time_high_sr"), - # attrs=idex_cdf_attrs.tof_mid_attrs.output(), attrs=idex_attrs.get_variable_attributes("tof_mid_attrs"), ) target_high_xr = xr.DataArray( name="Target_High", data=[self._parse_low_sample_waveform(self.Target_High_bits)], dims=("epoch", "time_low_sr"), - # attrs=idex_cdf_attrs.target_high_attrs.output(), attrs=idex_attrs.get_variable_attributes("target_high_attrs"), ) target_low_xr = xr.DataArray( name="Target_Low", data=[self._parse_low_sample_waveform(self.Target_Low_bits)], dims=("epoch", "time_low_sr"), - # attrs=idex_cdf_attrs.target_low_attrs.output(), attrs=idex_attrs.get_variable_attributes("target_low_attrs"), ) ion_grid_xr = xr.DataArray( name="Ion_Grid", data=[self._parse_low_sample_waveform(self.Ion_Grid_bits)], dims=("epoch", "time_low_sr"), - # attrs=idex_cdf_attrs.ion_grid_attrs.output(), attrs=idex_attrs.get_variable_attributes("ion_grid_attrs"), ) @@ -593,7 +576,6 @@ def process(self) -> xr.Dataset: name="epoch", data=[self.impact_time], dims=("epoch"), - # attrs=ConstantCoordinates.EPOCH, attrs=idex_attrs.get_variable_attributes("epoch"), ) @@ -601,7 +583,6 @@ def process(self) -> xr.Dataset: name="time_low_sr", data=[self._calc_low_sample_resolution(len(target_low_xr[0]))], dims=("epoch", "time_low_sr_dim"), - # attrs=idex_cdf_attrs.low_sr_attrs.output(), attrs=idex_attrs.get_variable_attributes("low_sr_attrs"), ) @@ -609,12 +590,11 @@ def process(self) -> xr.Dataset: name="time_high_sr", data=[self._calc_high_sample_resolution(len(tof_low_xr[0]))], dims=("epoch", "time_high_sr_dim"), - # attrs=idex_cdf_attrs.high_sr_attrs.output(), attrs=idex_attrs.get_variable_attributes("high_sr_attrs"), ) # Combine to return a dataset object - return xr.Dataset( + dataset = xr.Dataset( data_vars={ "TOF_Low": tof_low_xr, "TOF_High": tof_high_xr, @@ -630,3 +610,26 @@ def process(self) -> xr.Dataset: "time_high_sr": time_high_sr_xr, }, ) + + return dataset + + +def get_idex_attrs(data_version: str) -> ImapCdfAttributes: + """ + Load in CDF attributes for IDEX instrument. + + Parameters + ---------- + data_version : str + Data version for CDF filename, in the format "vXXX". + + Returns + ------- + idex_attrs : ImapCdfAttributes + Imap object with l1a attribute files loaded in. + """ + idex_attrs = ImapCdfAttributes() + idex_attrs.add_instrument_global_attrs("idex") + idex_attrs.add_instrument_variable_attrs("idex", "l1") + idex_attrs.add_global_attribute("Data_version", data_version) + return idex_attrs diff --git a/imap_processing/tests/idex/test_idex_l0.py b/imap_processing/tests/idex/test_idex_l0.py index 3309be7e8..131640619 100644 --- a/imap_processing/tests/idex/test_idex_l0.py +++ b/imap_processing/tests/idex/test_idex_l0.py @@ -4,34 +4,61 @@ import numpy as np import pytest +import xarray as xr from imap_processing import imap_module_directory from imap_processing.idex.l1.idex_l1 import PacketParser @pytest.fixture(scope="session") -def decom_test_data(): +def decom_test_data() -> xr.Dataset: + """Return a ``xarray`` dataset containing test data. + + Returns + ------- + dataset : xarray.Dataset + A ``xarray`` dataset containing the test data + """ test_file = Path( f"{imap_module_directory}/tests/idex/imap_idex_l0_raw_20230725_v001.pkts" ) - return PacketParser(test_file, "001") + return PacketParser(test_file, "001").data + + +def test_idex_decom_length(decom_test_data: xr.Dataset): + """Verify that there are 6 data variables in the output. + + Parameters + ---------- + decom_test_data : xarray.Dataset + The dataset to test with + """ + assert len(decom_test_data) == 42 + +def test_idex_decom_event_num(decom_test_data: xr.Dataset): + """Verify that 19 impacts were gathered by the test data. -def test_idex_decom_length(decom_test_data): - # Verify that there are 6 data variables in the output - assert len(decom_test_data.data) == 42 + Parameters + ---------- + decom_test_data : xarray.Dataset + The dataset to test with + """ + for var in decom_test_data: + assert len(decom_test_data[var]) == 19 -def test_idex_decom_event_num(decom_test_data): - # Verify that 19 impacts were gathered by the test data - for var in decom_test_data.data: - assert len(decom_test_data.data[var]) == 19 +def test_idex_tof_high_data(decom_test_data: xr.Dataset): + """Verify that a sample of the data is correct. + ``impact_14_tof_high_data.txt`` has been verified correct by the IDEX team -def test_idex_tof_high_data(decom_test_data): - # Verify that a sample of the data is correct - # impact_14_tof_high_data.txt has been verified correct by the IDEX team + Parameters + ---------- + decom_test_data : xarray.Dataset + The dataset to test with + """ with open(f"{imap_module_directory}/tests/idex/impact_14_tof_high_data.txt") as f: data = np.array([int(line.rstrip("\n")) for line in f]) - assert (decom_test_data.data["TOF_High"][13].data == data).all() + assert (decom_test_data["TOF_High"][13].data == data).all() diff --git a/imap_processing/tests/idex/test_idex_l1.py b/imap_processing/tests/idex/test_idex_l1.py index 0a7463a56..d7ca35e15 100644 --- a/imap_processing/tests/idex/test_idex_l1.py +++ b/imap_processing/tests/idex/test_idex_l1.py @@ -13,15 +13,28 @@ @pytest.fixture() -def decom_test_data(): +def decom_test_data() -> xr.Dataset: + """Return a ``xarray`` dataset containing test data. + + Returns + ------- + dataset : xarray.Dataset + A ``xarray`` dataset containing the test data + """ test_file = Path( f"{imap_module_directory}/tests/idex/imap_idex_l0_raw_20230725_v001.pkts" ) return PacketParser(test_file, "v001").data -def test_idex_cdf_file(decom_test_data): - # Verify that a CDF file can be created with no errors thrown by xarray_to_cdf +def test_idex_cdf_file(decom_test_data: xr.Dataset): + """Verify the CDF file can be created with no errors. + + Parameters + ---------- + decom_test_data : xarray.Dataset + The dataset to test with + """ file_name = write_cdf(decom_test_data) @@ -29,16 +42,28 @@ def test_idex_cdf_file(decom_test_data): assert file_name.name == "imap_idex_l1_sci_20250724_v001.cdf" -def test_bad_cdf_attributes(decom_test_data): - # Deliberately mess up the attributes to verify that an ISTPError is raised +def test_bad_cdf_attributes(decom_test_data: xr.Dataset): + """Ensure an ``ISTPError`` is raised when using bad CDF attributes. + + Parameters + ---------- + decom_test_data : xarray.Dataset + The dataset to test with + """ del decom_test_data["TOF_High"].attrs["DEPEND_1"] with pytest.raises(ISTPError): write_cdf(decom_test_data) -def test_bad_cdf_file_data(decom_test_data): - # Deliberately mess up the data to verify that an ISTPError is raised +def test_bad_cdf_file_data(decom_test_data: xr.Dataset): + """Ensure an ``ISTPError`` is raised when using bad data. + + Parameters + ---------- + decom_test_data : xarray.Dataset + The dataset to test with + """ bad_data_attrs = { "CATDESC": "Bad_Data", "DEPEND_0": "epoch", @@ -66,9 +91,16 @@ def test_bad_cdf_file_data(decom_test_data): write_cdf(decom_test_data) -def test_idex_tof_high_data_from_cdf(decom_test_data): - # Verify that a sample of the data is correct inside the CDF file - # impact_14_tof_high_data.txt has been verified correct by the IDEX team +def test_idex_tof_high_data_from_cdf(decom_test_data: xr.Dataset): + """Verify that a sample of the data is correct inside the CDF file. + + ``impact_14_tof_high_data.txt`` has been verified correct by the IDEX team + + Parameters + ---------- + decom_test_data : xarray.Dataset + The dataset to test with + """ with open(f"{imap_module_directory}/tests/idex/impact_14_tof_high_data.txt") as f: data = np.array([int(line.rstrip()) for line in f])