Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added new tof test data #807

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

This file was deleted.

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions imap_processing/tests/ultra/unit/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,8 +173,8 @@ def decom_test_data(request, xtce_path):
def events_fsw_comparison_theta_0():
"""FSW test data."""
filename = (
"FM45_40P_Phi28p5_BeamCal_LinearScan_phi28.50_theta-0.00"
"_Ultra_Image_Raw_Event_20240207T102746_withFSWcalcs.csv"
"ultra45_raw_sc_ultrarawimg_withFSWcalcs_FM45_40P_Phi28p5_"
"BeamCal_LinearScan_phi2850_theta-000_20240207T102740.csv"
)
return (
Path(sys.modules[__name__.split(".")[0]].__file__).parent
Expand Down
91 changes: 84 additions & 7 deletions imap_processing/tests/ultra/unit/test_ultra_l1b_extended.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,11 @@
import pytest

from imap_processing.ultra.l1b.ultra_l1b_extended import (
CoinType,
StartType,
StopType,
calculate_etof_xc,
get_coincidence_positions,
get_front_x_position,
get_front_y_position,
get_path_length,
Expand All @@ -30,12 +33,11 @@ def yf_fixture(de_dataset, events_fsw_comparison_theta_0):

def test_get_front_x_position(
de_dataset,
events_fsw_comparison_theta_0,
yf_fixture,
):
"""Tests get_front_x_position function."""

df = pd.read_csv(events_fsw_comparison_theta_0)
df_filt = df[df["StartType"] != -1]
df_filt, _, _ = yf_fixture

xf = get_front_x_position(
de_dataset["START_TYPE"].data,
Expand All @@ -50,6 +52,7 @@ def test_get_front_y_position(yf_fixture):
df_filt, d, yf = yf_fixture

assert yf == pytest.approx(df_filt["Yf"].astype("float"), abs=1e-5)
assert d == pytest.approx(df_filt["d"].astype("float"), abs=1e-5)


def test_get_path_length(de_dataset, yf_fixture):
Expand All @@ -68,14 +71,13 @@ def test_get_path_length(de_dataset, yf_fixture):

def test_get_ph_tof_and_back_positions(
de_dataset,
events_fsw_comparison_theta_0,
yf_fixture,
):
"""Tests get_ph_tof_and_back_positions function."""

df = pd.read_csv(events_fsw_comparison_theta_0)
df_filt = df[df["StartType"] != -1]
df_filt, _, _ = yf_fixture

_, _, ph_xb, ph_yb = get_ph_tof_and_back_positions(
ph_tof, _, ph_xb, ph_yb = get_ph_tof_and_back_positions(
de_dataset, df_filt.Xf.astype("float").values, "ultra45"
)

Expand All @@ -87,6 +89,9 @@ def test_get_ph_tof_and_back_positions(

np.testing.assert_array_equal(ph_xb, selected_rows["Xb"].astype("float"))
np.testing.assert_array_equal(ph_yb, selected_rows["Yb"].astype("float"))
np.testing.assert_allclose(
ph_tof, selected_rows["TOF"].astype("float"), atol=1e-5, rtol=0
)


def test_get_ssd_back_position_and_tof_offset(
Expand Down Expand Up @@ -127,3 +132,75 @@ def test_get_ssd_back_position_and_tof_offset(
assert np.all(ssd_number_rt >= 0), "Values in ssd_number_rt out of range."

assert np.all(ssd_number_rt <= 7), "Values in ssd_number_rt out of range."


def test_get_coincidence_positions(de_dataset, yf_fixture):
"""Tests get_coincidence_positions function."""
df_filt, _, _ = yf_fixture
# Get particle tof (t2).
_, t2, _, _ = get_ph_tof_and_back_positions(
de_dataset, df_filt.Xf.astype("float").values, "ultra45"
)

# Filter for stop type.
indices = np.nonzero(
np.isin(de_dataset["STOP_TYPE"], [StopType.Top.value, StopType.Bottom.value])
)[0]
de_filtered = de_dataset.isel(epoch=indices)
rows = df_filt.iloc[indices]

# Get coincidence position and eTOF.
etof, xc = get_coincidence_positions(de_filtered, t2, "ultra45")

np.testing.assert_allclose(xc, rows["Xc"].astype("float"), atol=1e-4, rtol=0)
np.testing.assert_allclose(
etof, rows["eTOF"].astype("float").values, rtol=0, atol=1e-06
)


def test_calculate_etof_xc(de_dataset, yf_fixture):
"""Tests calculate_etof_xc function."""
df_filt, _, _ = yf_fixture
# Get particle tof (t2).
_, t2, _, _ = get_ph_tof_and_back_positions(
de_dataset, df_filt.Xf.astype("float").values, "ultra45"
)
# Filter based on STOP_TYPE.
indices = np.nonzero(
np.isin(de_dataset["STOP_TYPE"], [StopType.Top.value, StopType.Bottom.value])
)[0]
de_filtered = de_dataset.isel(epoch=indices)
df_filtered = df_filt.iloc[indices]

# Filter for COIN_TYPE Top and Bottom.
index_top = np.nonzero(np.isin(de_filtered["COIN_TYPE"], CoinType.Top.value))[0]
de_top = de_filtered.isel(epoch=index_top)
df_top = df_filtered.iloc[index_top]

index_bottom = np.nonzero(np.isin(de_filtered["COIN_TYPE"], CoinType.Bottom.value))[
0
]
de_bottom = de_filtered.isel(epoch=index_bottom)
df_bottom = df_filtered.iloc[index_bottom]

# Calculate for Top and Bottom
etof_top, xc_top = calculate_etof_xc(de_top, t2[index_top], "ultra45", "TP")
etof_bottom, xc_bottom = calculate_etof_xc(
de_bottom, t2[index_bottom], "ultra45", "BT"
)

# Assertions for Top
np.testing.assert_allclose(
xc_top * 100, df_top["Xc"].astype("float"), atol=1e-4, rtol=0
)
np.testing.assert_allclose(
etof_top, df_top["eTOF"].astype("float").values, atol=1e-06, rtol=0
)

# Assertions for Bottom
np.testing.assert_allclose(
xc_bottom * 100, df_bottom["Xc"].astype("float"), atol=1e-4, rtol=0
)
np.testing.assert_allclose(
etof_bottom, df_bottom["eTOF"].astype("float").values, atol=1e-06, rtol=0
)
3 changes: 2 additions & 1 deletion imap_processing/ultra/l1b/lookup_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import numpy as np
import numpy.typing as npt
import pandas as pd
import xarray as xr

from imap_processing import imap_module_directory

Expand Down Expand Up @@ -46,7 +47,7 @@ def get_y_adjust(dy_lut: np.ndarray) -> npt.NDArray:
return _YADJUST_DF["dYAdj"].iloc[dy_lut].values


def get_norm(dn: np.ndarray, key: str, file_label: str) -> npt.NDArray:
def get_norm(dn: xr.DataArray, key: str, file_label: str) -> npt.NDArray:
"""
Correct mismatches between the stop Time to Digital Converters (TDCs).

Expand Down
119 changes: 115 additions & 4 deletions imap_processing/ultra/l1b/ultra_l1b_extended.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,13 @@ class StopType(Enum):
SSD: ClassVar[list[int]] = [8, 9, 10, 11, 12, 13, 14, 15]


class CoinType(Enum):
"""Coin Type: 1=Top, 2=Bottom."""

Top = 1
Bottom = 2


def get_front_x_position(start_type: ndarray, start_position_tdc: ndarray) -> ndarray:
"""
Calculate the front xf position.
Expand Down Expand Up @@ -161,7 +168,7 @@ def get_ph_tof_and_back_positions(
Returns
-------
tof : np.array
Time of flight (tenths of a nanosecond).
Time of flight (nanoseconds).
t2 : np.array
Particle time of flight from start to stop (tenths of a nanosecond).
xb : np.array
Expand Down Expand Up @@ -236,9 +243,6 @@ def get_ph_tof_and_back_positions(
stop_type_bottom
] * get_image_params("XFTTOF")

# Multiply by 100 to get tenths of a nanosecond.
tof = tof * 100

return tof, t2, xb, yb


Expand Down Expand Up @@ -315,3 +319,110 @@ def get_ssd_back_position_and_tof_offset(
] = get_image_params(f"TOFSSDRTOFF{i}")

return yb, tof_offset, ssd_number


def calculate_etof_xc(
de_subset: xarray.Dataset, particle_tof: np.ndarray, sensor: str, location: str
) -> tuple[np.ndarray, np.ndarray]:
"""
Calculate the etof and xc values for the given subset.

Parameters
----------
de_subset : xarray.Dataset
Subset of the dataset for a specific COIN_TYPE.
particle_tof : np.ndarray
Particle time of flight (i.e. from start to stop).
sensor : str
Sensor name.
location : str
Location indicator, either 'TP' (Top) or 'BT' (Bottom).

Returns
-------
etof : np.ndarray
Time for the electrons to travel back to the coincidence
anode (tenths of a nanosecond).
xc : np.ndarray
X coincidence position (millimeters).
"""
# CoinNNorm
coin_n_norm = get_norm(de_subset["COIN_NORTH_TDC"], "CoinN", sensor)
# CoinSNorm
coin_s_norm = get_norm(de_subset["COIN_SOUTH_TDC"], "CoinS", sensor)
xc = get_image_params(f"XCOIN{location}SC") * (
coin_s_norm - coin_n_norm
) + get_image_params(f"XCOIN{location}OFF") # millimeter

# Time for the electrons to travel back to coincidence anode.
t2 = get_image_params("ETOFSC") * (coin_n_norm + coin_s_norm) + get_image_params(
f"ETOF{location}OFF"
)

# Multiply by 10 to convert to tenths of a nanosecond.
etof = t2 * 10 - particle_tof

return etof, xc


def get_coincidence_positions(
de_dataset: xarray.Dataset, particle_tof: np.ndarray, sensor: str
) -> tuple[np.ndarray, np.ndarray]:
"""
Calculate coincidence positions.

Calculate time for electrons to travel back to
the coincidence anode (etof) and the x coincidence position (xc).

The tof measured by the coincidence anode consists of the particle
tof from start to stop, plus the time for the electrons to travel
back to the coincidence anode.

Further description is available on pages 34-35 of
IMAP-Ultra Flight Software Specification document
(7523-9009_Rev_-.pdf).

Parameters
----------
de_dataset : xarray.Dataset
Data in xarray format.
particle_tof : np.ndarray
Particle time of flight (i.e. from start to stop)
(tenths of a nanosecond).
sensor : str
Sensor name.

Returns
-------
etof : np.ndarray
Time for the electrons to travel back to
coincidence anode (tenths of a nanosecond).
xc : np.ndarray
X coincidence position (hundredths of a millimeter).
"""
index_top = np.nonzero(np.isin(de_dataset["COIN_TYPE"], CoinType.Top.value))[0]
de_top = de_dataset.isel(epoch=index_top)
laspsandoval marked this conversation as resolved.
Show resolved Hide resolved

index_bottom = np.nonzero(np.isin(de_dataset["COIN_TYPE"], CoinType.Bottom.value))[
0
]
de_bottom = de_dataset.isel(epoch=index_bottom)

etof = np.zeros(len(de_dataset["COIN_TYPE"]), dtype=np.float64)
xc_array = np.zeros(len(de_dataset["COIN_TYPE"]), dtype=np.float64)

# Normalized TDCs
# For the stop anode, there are mismatches between the coincidence TDCs,
# i.e., CoinN and CoinS. They must be normalized via lookup tables.
etof_top, xc_top = calculate_etof_xc(de_top, particle_tof[index_top], sensor, "TP")
etof[index_top] = etof_top
xc_array[index_top] = xc_top

etof_bottom, xc_bottom = calculate_etof_xc(
de_bottom, particle_tof[index_bottom], sensor, "BT"
)
etof[index_bottom] = etof_bottom
xc_array[index_bottom] = xc_bottom

# Convert to hundredths of a millimeter by multiplying times 100
return etof, xc_array * 100
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
SHCOARSE,XFTSC,XFTLTOFF,XFTRTOFF,TOFSC,TOFTPOFF,TOFBTOFF,XFTTOF,XCOINTPSC,XCOINTPOFF,XCOINBTSC,XCOINBTOFF,ETOFSC,ETOFTPOFF,ETOFBTOFF,TOFDIFFTPMIN,TOFDIFFTPMAX,TOFDIFFBTMIN,TOFDIFFBTMAX,ETOFMIN,ETOFMAX,ETOFSLOPE1,ETOFOFF1,ETOFSLOPE2,ETOFOFF2,SPTPPHOFF,SPBTPHOFF,YBKSSD0,YBKSSD1,YBKSSD2,YBKSSD3,YBKSSD4,YBKSSD5,YBKSSD6,YBKSSD7,TOFSSDSC,TOFSSDLTOFF0,TOFSSDLTOFF1,TOFSSDLTOFF2,TOFSSDLTOFF3,TOFSSDLTOFF4,TOFSSDLTOFF5,TOFSSDLTOFF6,TOFSSDLTOFF7,TOFSSDRTOFF0,TOFSSDRTOFF1,TOFSSDRTOFF2,TOFSSDRTOFF3,TOFSSDRTOFF4,TOFSSDRTOFF5,TOFSSDRTOFF6,TOFSSDRTOFF7,TOFSSDTOTOFF,PATHSTEEPTHRESH,PATHMEDIUMTHRESH
,0.172998047,49.3,48.25,0.5,-528,-525,0.001831055,0.067929688,41.75,0.067929688,-39.79492188,0.1,-44.5,-44.5,,,,,,,,,,,,,29.3,37.3,7.1,15.1,-15.1,-7.1,-37.3,-29.3,0.196484375,-6,-7.3,-3.8,-4.2,-3.8,-3.7,-6.3,-5,-5,-6.3,-3.7,-3.8,-4,-4.2,-7.3,-6,5.9,,
,0.172998047,49.3,48.25,0.5,-528,-525,0.001831055,0.067929688,41.75,0.067929688,-40.75,0.1,-44.5,-44.5,,,,,,,,,,,,,29.3,37.3,7.1,15.1,-15.1,-7.1,-37.3,-29.3,0.196484375,-6,-7.3,-3.8,-4.2,-3.8,-3.7,-6.3,-5,-5,-6.3,-3.7,-3.8,-4,-4.2,-7.3,-6,5.9,,
Loading