From 940348a0dc8a5ad382e8fe385ea0d28fba2b7dd7 Mon Sep 17 00:00:00 2001 From: Matthew Bourque Date: Tue, 3 Sep 2024 11:46:16 -0600 Subject: [PATCH] Addressed review comments --- imap_processing/codice/codice_l1a.py | 34 ++++----- .../tests/codice/test_codice_l1a.py | 74 +++++++++---------- 2 files changed, 50 insertions(+), 58 deletions(-) diff --git a/imap_processing/codice/codice_l1a.py b/imap_processing/codice/codice_l1a.py index 78a5a626f..f1776a4fa 100644 --- a/imap_processing/codice/codice_l1a.py +++ b/imap_processing/codice/codice_l1a.py @@ -22,7 +22,6 @@ from imap_processing import imap_module_directory from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes -from imap_processing.cdf.utils import met_to_j2000ns from imap_processing.codice import constants from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import CODICEAPID @@ -100,7 +99,9 @@ def configure_data_products(self, apid: int) -> None: self.dataset_name = config["dataset_name"] self.instrument = config["instrument"] - def create_science_dataset(self, met: np.int64, data_version: str) -> xr.Dataset: + def create_science_dataset( + self, packet: xr.Dataset, data_version: str + ) -> xr.Dataset: """ Create an ``xarray`` dataset for the unpacked science data. @@ -108,8 +109,8 @@ def create_science_dataset(self, met: np.int64, data_version: str) -> xr.Dataset Parameters ---------- - met : numpy.int64 - The mission elapsed time of the packet, used to determine epoch data. + packet : xarray.Dataset + The packet to process. data_version : str Version of the data product being created. @@ -126,7 +127,7 @@ def create_science_dataset(self, met: np.int64, data_version: str) -> xr.Dataset # Define coordinates epoch = xr.DataArray( - [met_to_j2000ns(met)], + packet.epoch, name="epoch", dims=["epoch"], attrs=cdf_attrs.get_variable_attributes("epoch"), @@ -371,9 +372,6 @@ def create_event_dataset( elif apid == CODICEAPID.COD_HI_PHA: dataset_name = "imap_codice_l1a_hi_pha" - # Determine the start time of the packet - met = packet.acq_start_seconds.data[0] - # Extract the data # event_data = packet.event_data.data (Currently turned off, see TODO) @@ -384,7 +382,7 @@ def create_event_dataset( # Define coordinates epoch = xr.DataArray( - met_to_j2000ns([met]), + packet.epoch, name="epoch", dims=["epoch"], attrs=cdf_attrs.get_variable_attributes("epoch"), @@ -426,10 +424,7 @@ def create_hskp_dataset( cdf_attrs.add_global_attribute("Data_version", data_version) epoch = xr.DataArray( - met_to_j2000ns( - packet.shcoarse.data, - reference_epoch=np.datetime64("2010-01-01T00:01:06.184", "ns"), - ), + packet.epoch, name="epoch", dims=["epoch"], attrs=cdf_attrs.get_variable_attributes("epoch"), @@ -493,10 +488,10 @@ def get_params(packet: xr.Dataset) -> tuple[int, int, int, int]: view_id : int Provides information about how data was collapsed and/or compressed. """ - table_id = packet.table_id.data[0] - plan_id = packet.plan_id.data[0] - plan_step = packet.plan_step.data[0] - view_id = packet.view_id.data[0] + table_id = int(packet.table_id.data) + plan_id = int(packet.plan_id.data) + plan_step = int(packet.plan_step.data) + view_id = int(packet.view_id.data) return table_id, plan_id, plan_step, view_id @@ -534,9 +529,6 @@ def process_codice_l1a(file_path: Path, data_version: str) -> xr.Dataset: dataset = create_event_dataset(apid, packet, data_version) elif apid in constants.APIDS_FOR_SCIENCE_PROCESSING: - # Determine the start time of the packet - met = packet.acq_start_seconds.data[0] - # Extract the data science_values = packet.data.data[0] @@ -547,7 +539,7 @@ def process_codice_l1a(file_path: Path, data_version: str) -> xr.Dataset: pipeline = CoDICEL1aPipeline(table_id, plan_id, plan_step, view_id) pipeline.configure_data_products(apid) pipeline.unpack_science_data(science_values) - dataset = pipeline.create_science_dataset(met, data_version) + dataset = pipeline.create_science_dataset(packet, data_version) logger.info(f"\nFinal data product:\n{dataset}\n") diff --git a/imap_processing/tests/codice/test_codice_l1a.py b/imap_processing/tests/codice/test_codice_l1a.py index ff5302b48..1d897d3ec 100644 --- a/imap_processing/tests/codice/test_codice_l1a.py +++ b/imap_processing/tests/codice/test_codice_l1a.py @@ -34,23 +34,6 @@ (1, 1, 1, 128), # lo-nsw-species (1, 128), # lo-pha ] -EXPECTED_ARRAY_SIZES = [ - 129, # hskp - 1, # hi-counters-aggregated - 3, # hi-counters-singles - 8, # hi-omni - 4, # hi-sectored - 0, # hi-pha - 3, # lo-counters-aggregated - 3, # lo-counters-singles - 6, # lo-sw-angular - 3, # lo-nsw-angular - 7, # lo-sw-priority - 4, # lo-nsw-priority - 18, # lo-sw-species - 10, # lo-nsw-species - 0, # lo-pha -] EXPECTED_LOGICAL_SOURCE = [ "imap_codice_l1a_hskp", "imap_codice_l1a_hi-counters-aggregated", @@ -68,6 +51,23 @@ "imap_codice_l1a_lo-nsw-species", "imap_codice_l1a_lo-pha", ] +EXPECTED_NUM_VARIABLES = [ + 129, # hskp + 1, # hi-counters-aggregated + 3, # hi-counters-singles + 8, # hi-omni + 4, # hi-sectored + 0, # hi-pha + 3, # lo-counters-aggregated + 3, # lo-counters-singles + 6, # lo-sw-angular + 3, # lo-nsw-angular + 7, # lo-sw-priority + 4, # lo-nsw-priority + 18, # lo-sw-species + 10, # lo-nsw-species + 0, # lo-pha +] @pytest.fixture(params=TEST_PACKETS) @@ -134,26 +134,6 @@ def test_l1a_data_array_shape(test_l1a_data: xr.Dataset, expected_shape: tuple): assert dataset[variable].data.shape == expected_shape -@pytest.mark.parametrize( - "test_l1a_data, expected_size", - list(zip(TEST_PACKETS, EXPECTED_ARRAY_SIZES)), - indirect=["test_l1a_data"], -) -def test_l1a_data_array_size(test_l1a_data: xr.Dataset, expected_size: int): - """Tests that the data arrays in the generated CDFs have the expected size. - - Parameters - ---------- - test_l1a_data : xarray.Dataset - A ``xarray`` dataset containing the test data - expected_size : int - The expected size of the data array - """ - - dataset = test_l1a_data - assert len(dataset) == expected_size - - @pytest.mark.skip("Awaiting validation data") @pytest.mark.parametrize( "test_l1a_data, validation_data", @@ -185,3 +165,23 @@ def test_l1a_data_array_values(test_l1a_data: xr.Dataset, validation_data: Path) np.testing.assert_array_equal( validation_data[variable].data, generated_dataset[variable].data[0] ) + + +@pytest.mark.parametrize( + "test_l1a_data, expected_num_variables", + list(zip(TEST_PACKETS, EXPECTED_NUM_VARIABLES)), + indirect=["test_l1a_data"], +) +def test_l1a_num_variables(test_l1a_data: xr.Dataset, expected_num_variables: int): + """Tests that the data arrays in the generated CDFs have the expected size. + + Parameters + ---------- + test_l1a_data : xarray.Dataset + A ``xarray`` dataset containing the test data + expected_num_variables : int + The expected number of data variables in the CDF + """ + + dataset = test_l1a_data + assert len(dataset) == expected_num_variables