Skip to content

Commit

Permalink
swe update
Browse files Browse the repository at this point in the history
  • Loading branch information
daralynnrhode committed Jul 30, 2024
1 parent e2270dc commit 0cecfa5
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 8 deletions.
5 changes: 3 additions & 2 deletions imap_processing/swe/l1a/swe_science.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def swe_science(decom_data: list, data_version: str) -> xr.Dataset:
science_array = []
raw_science_array = []

metadata_arrays: np.array = collections.defaultdict(list)
metadata_arrays: dict = collections.defaultdict(list)

# We know we can only have 8 bit numbers input, so iterate over all
# possibilities once up front
Expand All @@ -133,7 +133,8 @@ def swe_science(decom_data: list, data_version: str) -> xr.Dataset:
# where 1260 = 180 x 7 CEMs
# Take the "raw_counts" indices/counts mapping from
# decompression_table and then reshape the return
uncompress_data = np.take(decompression_table, raw_counts).reshape(180, 7)
uncompress_data_ndarray: np.ndarray = np.take(decompression_table, raw_counts)
uncompress_data = uncompress_data_ndarray.reshape(180, 7)
# Save raw counts data as well
raw_counts = raw_counts.reshape(180, 7)

Expand Down
17 changes: 11 additions & 6 deletions imap_processing/swe/l1b/swe_l1b_science.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def deadtime_correction(counts: np.ndarray, acq_duration: int) -> np.ndarray:
deadtime = 1.5e-6
correct = 1.0 - (deadtime * counts / acq_duration)
correct = np.maximum(0.1, correct)
corrected_count = np.divide(counts, correct)
corrected_count: np.ndarray = np.divide(counts, correct)
return corrected_count


Expand All @@ -139,7 +139,8 @@ def convert_counts_to_rate(data: np.ndarray, acq_duration: int) -> np.ndarray:
# convert milliseconds to seconds
# Todo: check with SWE team about int or float types.
acq_duration = int(acq_duration / 1000.0)
return data / acq_duration
count_rates: np.ndarray = data / acq_duration
return count_rates


def calculate_calibration_factor(time: int) -> None:
Expand Down Expand Up @@ -231,7 +232,7 @@ def populate_full_cycle_data(
# in odd column every six steps.
if esa_table_num == 0:
# create new full cycle data array
full_cycle_data = np.zeros((24, 30, 7))
full_cycle_data: np.ndarray = np.zeros((24, 30, 7))

# Initialize esa_step_number and column_index.
# esa_step_number goes from 0 to 719 range where
Expand Down Expand Up @@ -294,7 +295,8 @@ def find_cycle_starts(cycles: np.ndarray) -> np.ndarray:
Array of indices of start cycle.
"""
if cycles.size < 4:
return np.array([], np.int64)
start_cycle_: np.ndarray = np.array([], np.int64)
return start_cycle_

# calculate difference between consecutive cycles
diff = cycles[1:] - cycles[:-1]
Expand All @@ -309,7 +311,8 @@ def find_cycle_starts(cycles: np.ndarray) -> np.ndarray:
# [0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0] # And all?
ione = diff == 1
valid = (cycles == 0)[:-3] & ione[:-2] & ione[1:-1] & ione[2:]
return np.where(valid)[0]
start_cycle: np.ndarray = np.where(valid)[0]
return start_cycle


def get_indices_of_full_cycles(quarter_cycle: np.ndarray) -> np.ndarray:
Expand All @@ -333,7 +336,9 @@ def get_indices_of_full_cycles(quarter_cycle: np.ndarray) -> np.ndarray:
# Eg. [[0, 1, 2, 3]]
# then we add both of them together to get an array of shape(n, 4)
# Eg. [[3, 4, 5, 6], [8, 9, 10, 11]]
full_cycles_indices = indices_of_start[..., None] + np.arange(4)[None, ...]
full_cycles_indices: np.ndarray = (
indices_of_start[..., None] + np.arange(4)[None, ...]
)
return full_cycles_indices.reshape(-1)


Expand Down

0 comments on commit 0cecfa5

Please sign in to comment.