Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Mag l1a compression #801

Merged
Merged
Show file tree
Hide file tree
Changes from 28 commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
e61ac01
Setting up compressed vector processing
maxinelasp Aug 7, 2024
0b866d3
First pass at compressed processing, and tests
maxinelasp Aug 20, 2024
40b589c
First pass at compression
maxinelasp Aug 23, 2024
7eab568
some tidying up
maxinelasp Aug 23, 2024
aa27b8f
Updating to include range data section processing/tests
maxinelasp Aug 27, 2024
3eee52c
Attempting to add uncompression steps
maxinelasp Aug 27, 2024
ed0db75
got it working
maxinelasp Aug 29, 2024
23e6ab4
Finished compression algorithm
maxinelasp Aug 29, 2024
75240fd
Merge branch 'upstream-dev' into mag_l1a_compression
maxinelasp Aug 29, 2024
add62a4
Updating ruff and mypy errors
maxinelasp Aug 30, 2024
4763b02
removing unneeded changes
maxinelasp Aug 30, 2024
5ea3a9f
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Aug 30, 2024
0eee19c
Updating docs
maxinelasp Aug 30, 2024
7bb2cc4
Merge branch 'mag_l1a_compression' of github.com:maxinelasp/imap_proc…
maxinelasp Aug 30, 2024
4827866
Fixing error with update from MAG team
maxinelasp Sep 3, 2024
dd1c889
Fixing compressed data errors with range
maxinelasp Sep 4, 2024
fd11ccf
PR comments
maxinelasp Sep 4, 2024
5af91bd
fixing mypy version and error
maxinelasp Sep 5, 2024
c466d97
Mypy change
maxinelasp Sep 5, 2024
ffe575c
PR updates
maxinelasp Sep 5, 2024
611b462
Fixing issues now that we have real data - temporary update
maxinelasp Sep 6, 2024
f5e8ba4
Fixing tests
maxinelasp Sep 11, 2024
12fdcdc
Adding new tests
maxinelasp Sep 12, 2024
84c91bc
Pulling out duplicate code
maxinelasp Sep 13, 2024
306769a
PR updates
maxinelasp Sep 16, 2024
e6b0cd3
Merge branch 'upstream-dev' into mag_l1a_compression
maxinelasp Sep 16, 2024
9e05ce8
correcting filler byte in tests
maxinelasp Sep 16, 2024
b6c7883
Correcting pre-commit
maxinelasp Sep 16, 2024
44c3f52
Addressing comments
maxinelasp Sep 19, 2024
b8061b3
Merge branch 'upstream-dev' into mag_l1a_compression
maxinelasp Sep 19, 2024
0f64957
fixing poetry file
maxinelasp Sep 19, 2024
eaa6d8e
fixing tests + pre commit
maxinelasp Sep 19, 2024
cfc9722
Fix tests plus docs
maxinelasp Sep 19, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ repos:
- id: numpydoc-validation
exclude: '^imap_processing/tests/|.*test.*'
- repo: https://github.com/pre-commit/mirrors-mypy
rev: 'v1.10.0'
rev: 'v1.10.1'
hooks:
- id: mypy
exclude: .*(tests|docs).*
Expand Down
3 changes: 2 additions & 1 deletion examples/Dockerfile.processing
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@ FROM public.ecr.aws/docker/library/python:3.10-slim
# TODO: delete this section once imap_processing is released
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y git
RUN pip install git+https://github.com/IMAP-Science-Operations-Center/imap_processing.git@dev
#RUN pip install git+https://github.com/IMAP-Science-Operations-Center/imap_processing.git@dev
RUN pip install git+https://github.com/maxinelasp/imap_processing.git@mag_l1a_compression

# Uncomment this once imap_processing is released
# RUN pip install imap_processing
Expand Down
50 changes: 50 additions & 0 deletions imap_processing/mag/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

from enum import Enum

import numpy as np


class DataMode(Enum):
"""
Expand Down Expand Up @@ -55,3 +57,51 @@ class PrimarySensor(Enum):

MAGO = 0
MAGI = 1


FIBONACCI_SEQUENCE = [
1,
2,
3,
5,
8,
13,
21,
34,
55,
89,
144,
233,
377,
610,
987,
1597,
2584,
4181,
6765,
10946,
17711,
28657,
46368,
75025,
121393,
196418,
317811,
514229,
832040,
1346269,
2178309,
3524578,
5702887,
9227465,
14930352,
24157817,
39088169,
63245986,
102334155,
165580141,
]

MAX_FINE_TIME = np.iinfo(np.uint16).max # maximum 16 bit unsigned int
AXIS_COUNT = 3
RANGE_BIT_WIDTH = 2
20 changes: 7 additions & 13 deletions imap_processing/mag/l0/mag_l0_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,19 +106,13 @@ def __post_init__(self) -> None:
Also convert encoded "VECSEC" (vectors per second) into proper vectors per
second values.
"""
# Convert string output from space_packet_parser to numpy array of
# big-endian bytes
self.VECTORS = np.frombuffer(
int(self.VECTORS, 2).to_bytes(len(self.VECTORS) // 8, "big"), # type: ignore[arg-type]
# TODO Check MYPY Error: Argument 1 to "int" has incompatible type
# "Union[ndarray[Any, Any], str]"; expected "Union[str, bytes, bytearray]"
dtype=np.dtype(">b"),
)

# Remove buffer from end of vectors. Vector data needs to be in 50 bit chunks,
# and may have an extra byte at the end from CCSDS padding.
if len(self.VECTORS) % 2:
self.VECTORS = self.VECTORS[:-1]
if isinstance(self.VECTORS, str):
# Convert string output from space_packet_parser to numpy array of
# big-endian bytes
self.VECTORS = np.frombuffer(
int(self.VECTORS, 2).to_bytes(len(self.VECTORS) // 8, "big"),
dtype=np.dtype(">B"),
)

self.PRI_VECSEC = 2**self.PRI_VECSEC
self.SEC_VECSEC = 2**self.SEC_VECSEC
10 changes: 2 additions & 8 deletions imap_processing/mag/l1a/mag_l1a.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,9 +143,6 @@ def process_packets(
mago = {}

for mag_l0 in mag_l0_list:
if mag_l0.COMPRESSION:
raise NotImplementedError("Unable to process compressed data")

primary_start_time = TimeTuple(mag_l0.PRI_COARSETM, mag_l0.PRI_FNTM)
secondary_start_time = TimeTuple(mag_l0.SEC_COARSETM, mag_l0.SEC_FNTM)

Expand Down Expand Up @@ -181,14 +178,11 @@ def process_packets(
# now we know the number of secs of data in the packet, and the data rates of
# each sensor, we can calculate how much data is in this packet and where the
# byte boundaries are.

primary_vectors, secondary_vectors = MagL1a.process_vector_data(
mag_l0.VECTORS.astype(dtype=np.int32), # type: ignore[union-attr]
# TODO Maybe Change, Item "str" of "Union[Any, str]"
# has no attribute "astype"
# this is because mypy expects both to have the attributes
mag_l0.VECTORS, # type: ignore
primary_packet_data.total_vectors,
secondary_packet_data.total_vectors,
mag_l0.COMPRESSION,
)

primary_timestamped_vectors = MagL1a.calculate_vector_time(
Expand Down
Loading
Loading